Bug 1127498 - Share one buffer between all threads, improve marker lifetime management, some code cleanup. r=BenWa

This commit is contained in:
Markus Stange 2015-01-30 14:49:32 -05:00
parent 69c01f4e71
commit a56b456111
14 changed files with 396 additions and 458 deletions

View File

@ -155,17 +155,16 @@ void genPseudoBacktraceEntries(/*MODIFIED*/UnwinderThreadBuffer* utb,
// RUNS IN SIGHANDLER CONTEXT
static
void populateBuffer(UnwinderThreadBuffer* utb, TickSample* sample,
UTB_RELEASE_FUNC releaseFunction, bool jankOnly)
UTB_RELEASE_FUNC releaseFunction)
{
ThreadProfile& sampledThreadProfile = *sample->threadProfile;
PseudoStack* stack = sampledThreadProfile.GetPseudoStack();
stack->updateGeneration(sampledThreadProfile.GetGenerationID());
/* Manufacture the ProfileEntries that we will give to the unwinder
thread, and park them in |utb|. */
bool recordSample = true;
/* Don't process the PeudoStack's markers or honour jankOnly if we're
/* Don't process the PeudoStack's markers if we're
immediately sampling the current thread. */
if (!sample->isSamplingCurrentThread) {
// LinkedUWTBuffers before markers
@ -178,30 +177,9 @@ void populateBuffer(UnwinderThreadBuffer* utb, TickSample* sample,
ProfilerMarkerLinkedList* pendingMarkersList = stack->getPendingMarkers();
while (pendingMarkersList && pendingMarkersList->peek()) {
ProfilerMarker* marker = pendingMarkersList->popHead();
stack->addStoredMarker(marker);
sampledThreadProfile.addStoredMarker(marker);
utb__addEntry( utb, ProfileEntry('m', marker) );
}
if (jankOnly) {
// if we are on a different event we can discard any temporary samples
// we've kept around
if (sLastSampledEventGeneration != sCurrentEventGeneration) {
// XXX: we also probably want to add an entry to the profile to help
// distinguish which samples are part of the same event. That, or record
// the event generation in each sample
sampledThreadProfile.erase();
}
sLastSampledEventGeneration = sCurrentEventGeneration;
recordSample = false;
// only record the events when we have a we haven't seen a tracer
// event for 100ms
if (!sLastTracerEvent.IsNull()) {
mozilla::TimeDuration delta = sample->timestamp - sLastTracerEvent;
if (delta.ToMilliseconds() > 100.0) {
recordSample = true;
}
}
}
}
// JRS 2012-Sept-27: this logic used to involve mUseStackWalk.
@ -318,7 +296,7 @@ void sampleCurrent(TickSample* sample)
return;
}
UnwinderThreadBuffer* utb = syncBuf->GetBuffer();
populateBuffer(utb, sample, &utb__finish_sync_buffer, false);
populateBuffer(utb, sample, &utb__finish_sync_buffer);
}
// RUNS IN SIGHANDLER CONTEXT
@ -344,7 +322,7 @@ void TableTicker::UnwinderTick(TickSample* sample)
if (!utb)
return;
populateBuffer(utb, sample, &uwt__release_full_buffer, mJankOnly);
populateBuffer(utb, sample, &uwt__release_full_buffer);
}
// END take samples

View File

@ -94,7 +94,7 @@ void ProfileEntry::log()
// mTagMarker (ProfilerMarker*) m
// mTagData (const char*) c,s
// mTagPtr (void*) d,l,L,B (immediate backtrace), S(start-of-stack)
// mTagInt (int) n,f,y
// mTagInt (int) n,f,y,T (thread id)
// mTagChar (char) h
// mTagFloat (double) r,t,p,R (resident memory), U (unshared memory)
switch (mTagName) {
@ -104,7 +104,7 @@ void ProfileEntry::log()
LOGF("%c \"%s\"", mTagName, mTagData); break;
case 'd': case 'l': case 'L': case 'B': case 'S':
LOGF("%c %p", mTagName, mTagPtr); break;
case 'n': case 'f': case 'y':
case 'n': case 'f': case 'y': case 'T':
LOGF("%c %d", mTagName, mTagInt); break;
case 'h':
LOGF("%c \'%c\'", mTagName, mTagChar); break;
@ -139,126 +139,50 @@ std::ostream& operator<<(std::ostream& stream, const ProfileEntry& entry)
////////////////////////////////////////////////////////////////////////
// BEGIN ThreadProfile
// BEGIN ProfileBuffer
#define DYNAMIC_MAX_STRING 512
ThreadProfile::ThreadProfile(ThreadInfo* aInfo, int aEntrySize)
: mThreadInfo(aInfo)
ProfileBuffer::ProfileBuffer(int aEntrySize)
: mEntries(MakeUnique<ProfileEntry[]>(aEntrySize))
, mWritePos(0)
, mLastFlushPos(0)
, mReadPos(0)
, mEntrySize(aEntrySize)
, mPseudoStack(aInfo->Stack())
, mMutex("ThreadProfile::mMutex")
, mThreadId(aInfo->ThreadId())
, mIsMainThread(aInfo->IsMainThread())
, mPlatformData(aInfo->GetPlatformData())
, mGeneration(0)
, mPendingGenerationFlush(0)
, mStackTop(aInfo->StackTop())
, mRespInfo(this)
#ifdef XP_LINUX
, mRssMemory(0)
, mUssMemory(0)
#endif
{
MOZ_COUNT_CTOR(ThreadProfile);
mEntries = new ProfileEntry[mEntrySize];
}
ThreadProfile::~ThreadProfile()
// Called from signal, call only reentrant functions
void ProfileBuffer::addTag(const ProfileEntry& aTag)
{
MOZ_COUNT_DTOR(ThreadProfile);
delete[] mEntries;
}
void ThreadProfile::addTag(ProfileEntry aTag)
{
// Called from signal, call only reentrant functions
mEntries[mWritePos] = aTag;
mWritePos = mWritePos + 1;
if (mWritePos >= mEntrySize) {
mPendingGenerationFlush++;
mWritePos = mWritePos % mEntrySize;
mEntries[mWritePos++] = aTag;
if (mWritePos == mEntrySize) {
mGeneration++;
mWritePos = 0;
}
if (mWritePos == mReadPos) {
// Keep one slot open
// Keep one slot open.
mEntries[mReadPos] = ProfileEntry();
mReadPos = (mReadPos + 1) % mEntrySize;
}
// we also need to move the flush pos to ensure we
// do not pass it
if (mWritePos == mLastFlushPos) {
mLastFlushPos = (mLastFlushPos + 1) % mEntrySize;
}
void ProfileBuffer::addStoredMarker(ProfilerMarker *aStoredMarker) {
aStoredMarker->SetGeneration(mGeneration);
mStoredMarkers.insert(aStoredMarker);
}
void ProfileBuffer::deleteExpiredStoredMarkers() {
// Delete markers of samples that have been overwritten due to circular
// buffer wraparound.
int generation = mGeneration;
while (mStoredMarkers.peek() &&
mStoredMarkers.peek()->HasExpired(generation)) {
delete mStoredMarkers.popHead();
}
}
// flush the new entries
void ThreadProfile::flush()
{
mLastFlushPos = mWritePos;
mGeneration += mPendingGenerationFlush;
mPendingGenerationFlush = 0;
}
#define DYNAMIC_MAX_STRING 512
// discards all of the entries since the last flush()
// NOTE: that if mWritePos happens to wrap around past
// mLastFlushPos we actually only discard mWritePos - mLastFlushPos entries
//
// r = mReadPos
// w = mWritePos
// f = mLastFlushPos
//
// r f w
// |-----------------------------|
// | abcdefghijklmnopq | -> 'abcdefghijklmnopq'
// |-----------------------------|
//
//
// mWritePos and mReadPos have passed mLastFlushPos
// f
// w r
// |-----------------------------|
// |ABCDEFGHIJKLMNOPQRSqrstuvwxyz|
// |-----------------------------|
// w
// r
// |-----------------------------|
// |ABCDEFGHIJKLMNOPQRSqrstuvwxyz| -> ''
// |-----------------------------|
//
//
// mWritePos will end up the same as mReadPos
// r
// w f
// |-----------------------------|
// |ABCDEFGHIJKLMklmnopqrstuvwxyz|
// |-----------------------------|
// r
// w
// |-----------------------------|
// |ABCDEFGHIJKLMklmnopqrstuvwxyz| -> ''
// |-----------------------------|
//
//
// mWritePos has moved past mReadPos
// w r f
// |-----------------------------|
// |ABCDEFdefghijklmnopqrstuvwxyz|
// |-----------------------------|
// r w
// |-----------------------------|
// |ABCDEFdefghijklmnopqrstuvwxyz| -> 'defghijkl'
// |-----------------------------|
void ThreadProfile::erase()
{
mWritePos = mLastFlushPos;
mPendingGenerationFlush = 0;
}
char* ThreadProfile::processDynamicTag(int readPos,
char* ProfileBuffer::processDynamicTag(int readPos,
int* tagsConsumed, char* tagBuff)
{
int readAheadPos = (readPos + 1) % mEntrySize;
@ -266,7 +190,7 @@ char* ThreadProfile::processDynamicTag(int readPos,
// Read the string stored in mTagData until the null character is seen
bool seenNullByte = false;
while (readAheadPos != mLastFlushPos && !seenNullByte) {
while (readAheadPos != mWritePos && !seenNullByte) {
(*tagsConsumed)++;
ProfileEntry readAheadEntry = mEntries[readAheadPos];
for (size_t pos = 0; pos < sizeof(void*); pos++) {
@ -283,16 +207,25 @@ char* ThreadProfile::processDynamicTag(int readPos,
return tagBuff;
}
void ThreadProfile::IterateTags(IterateTagsCallback aCallback)
void ProfileBuffer::IterateTagsForThread(IterateTagsCallback aCallback, int aThreadId)
{
MOZ_ASSERT(aCallback);
int readPos = mReadPos;
while (readPos != mLastFlushPos) {
// Number of tag consumed
int incBy = 1;
int currentThreadID = -1;
while (readPos != mWritePos) {
const ProfileEntry& entry = mEntries[readPos];
if (entry.mTagName == 'T') {
currentThreadID = entry.mTagInt;
readPos = (readPos + 1) % mEntrySize;
continue;
}
// Number of tags consumed
int incBy = 1;
// Read ahead to the next tag, if it's a 'd' tag process it now
const char* tagStringData = entry.mTagData;
int readAheadPos = (readPos + 1) % mEntrySize;
@ -300,47 +233,31 @@ void ThreadProfile::IterateTags(IterateTagsCallback aCallback)
// Make sure the string is always null terminated if it fills up DYNAMIC_MAX_STRING-2
tagBuff[DYNAMIC_MAX_STRING-1] = '\0';
if (readAheadPos != mLastFlushPos && mEntries[readAheadPos].mTagName == 'd') {
if (readAheadPos != mWritePos && mEntries[readAheadPos].mTagName == 'd') {
tagStringData = processDynamicTag(readPos, &incBy, tagBuff);
}
if (currentThreadID == aThreadId) {
aCallback(entry, tagStringData);
}
readPos = (readPos + incBy) % mEntrySize;
}
}
void ThreadProfile::ToStreamAsJSON(std::ostream& stream)
void ProfileBuffer::StreamSamplesToJSObject(JSStreamWriter& b, int aThreadId)
{
JSStreamWriter b(stream);
StreamJSObject(b);
}
void ThreadProfile::StreamJSObject(JSStreamWriter& b)
{
b.BeginObject();
// Thread meta data
if (XRE_GetProcessType() == GeckoProcessType_Plugin) {
// TODO Add the proper plugin name
b.NameValue("name", "Plugin");
} else if (XRE_GetProcessType() == GeckoProcessType_Content) {
// This isn't going to really help once we have multiple content
// processes, but it'll do for now.
b.NameValue("name", "Content");
} else {
b.NameValue("name", Name());
}
b.NameValue("tid", static_cast<int>(mThreadId));
b.Name("samples");
b.BeginArray();
bool sample = false;
int readPos = mReadPos;
while (readPos != mLastFlushPos) {
// Number of tag consumed
int currentThreadID = -1;
while (readPos != mWritePos) {
ProfileEntry entry = mEntries[readPos];
if (entry.mTagName == 'T') {
currentThreadID = entry.mTagInt;
}
if (currentThreadID == aThreadId) {
switch (entry.mTagName) {
case 'r':
{
@ -407,7 +324,7 @@ void ThreadProfile::StreamJSObject(JSStreamWriter& b)
int framePos = (readPos + 1) % mEntrySize;
ProfileEntry frame = mEntries[framePos];
while (framePos != mLastFlushPos && frame.mTagName != 's') {
while (framePos != mWritePos && frame.mTagName != 's' && frame.mTagName != 'T') {
int incBy = 1;
frame = mEntries[framePos];
@ -419,7 +336,7 @@ void ThreadProfile::StreamJSObject(JSStreamWriter& b)
// DYNAMIC_MAX_STRING-2
tagBuff[DYNAMIC_MAX_STRING-1] = '\0';
if (readAheadPos != mLastFlushPos && mEntries[readAheadPos].mTagName == 'd') {
if (readAheadPos != mWritePos && mEntries[readAheadPos].mTagName == 'd') {
tagStringData = processDynamicTag(framePos, &incBy, tagBuff);
}
@ -440,13 +357,13 @@ void ThreadProfile::StreamJSObject(JSStreamWriter& b)
b.BeginObject();
b.NameValue("location", tagStringData);
readAheadPos = (framePos + incBy) % mEntrySize;
if (readAheadPos != mLastFlushPos &&
if (readAheadPos != mWritePos &&
mEntries[readAheadPos].mTagName == 'n') {
b.NameValue("line", mEntries[readAheadPos].mTagInt);
incBy++;
}
readAheadPos = (framePos + incBy) % mEntrySize;
if (readAheadPos != mLastFlushPos &&
if (readAheadPos != mWritePos &&
mEntries[readAheadPos].mTagName == 'y') {
b.NameValue("category", mEntries[readAheadPos].mTagInt);
incBy++;
@ -459,24 +376,176 @@ void ThreadProfile::StreamJSObject(JSStreamWriter& b)
}
break;
}
}
readPos = (readPos + 1) % mEntrySize;
}
if (sample) {
b.EndObject();
}
b.EndArray();
}
b.Name("markers");
void ProfileBuffer::StreamMarkersToJSObject(JSStreamWriter& b, int aThreadId)
{
b.BeginArray();
readPos = mReadPos;
while (readPos != mLastFlushPos) {
int readPos = mReadPos;
int currentThreadID = -1;
while (readPos != mWritePos) {
ProfileEntry entry = mEntries[readPos];
if (entry.mTagName == 'm') {
if (entry.mTagName == 'T') {
currentThreadID = entry.mTagInt;
} else if (currentThreadID == aThreadId && entry.mTagName == 'm') {
entry.getMarker()->StreamJSObject(b);
}
readPos = (readPos + 1) % mEntrySize;
}
b.EndArray();
}
int ProfileBuffer::FindLastSampleOfThread(int aThreadId)
{
// We search backwards from mWritePos-1 to mReadPos.
// Adding mEntrySize makes the result of the modulus positive.
for (int readPos = (mWritePos + mEntrySize - 1) % mEntrySize;
readPos != (mReadPos + mEntrySize - 1) % mEntrySize;
readPos = (readPos + mEntrySize - 1) % mEntrySize) {
ProfileEntry entry = mEntries[readPos];
if (entry.mTagName == 'T' && entry.mTagInt == aThreadId) {
return readPos;
}
}
return -1;
}
void ProfileBuffer::DuplicateLastSample(int aThreadId)
{
int lastSampleStartPos = FindLastSampleOfThread(aThreadId);
if (lastSampleStartPos == -1) {
return;
}
MOZ_ASSERT(mEntries[lastSampleStartPos].mTagName == 'T');
addTag(mEntries[lastSampleStartPos]);
// Go through the whole entry and duplicate it, until we find the next one.
for (int readPos = (lastSampleStartPos + 1) % mEntrySize;
readPos != mWritePos;
readPos = (readPos + 1) % mEntrySize) {
switch (mEntries[readPos].mTagName) {
case 'T':
// We're done.
return;
case 't':
// Copy with new time
addTag(ProfileEntry('t', static_cast<float>((mozilla::TimeStamp::Now() - sStartTime).ToMilliseconds())));
break;
case 'm':
// Don't copy markers
break;
// Copy anything else we don't know about
// L, B, S, c, s, d, l, f, h, r, t, p
default:
addTag(mEntries[readPos]);
break;
}
}
}
std::ostream&
ProfileBuffer::StreamToOStream(std::ostream& stream, int aThreadId) const
{
int readPos = mReadPos;
int currentThreadID = -1;
while (readPos != mWritePos) {
ProfileEntry entry = mEntries[readPos];
if (entry.mTagName == 'T') {
currentThreadID = entry.mTagInt;
} else if (currentThreadID == aThreadId) {
stream << mEntries[readPos];
}
readPos = (readPos + 1) % mEntrySize;
}
return stream;
}
// END ProfileBuffer
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// BEGIN ThreadProfile
ThreadProfile::ThreadProfile(ThreadInfo* aInfo, ProfileBuffer* aBuffer)
: mThreadInfo(aInfo)
, mBuffer(aBuffer)
, mPseudoStack(aInfo->Stack())
, mMutex("ThreadProfile::mMutex")
, mThreadId(int(aInfo->ThreadId()))
, mIsMainThread(aInfo->IsMainThread())
, mPlatformData(aInfo->GetPlatformData())
, mStackTop(aInfo->StackTop())
, mRespInfo(this)
#ifdef XP_LINUX
, mRssMemory(0)
, mUssMemory(0)
#endif
{
MOZ_COUNT_CTOR(ThreadProfile);
MOZ_ASSERT(aBuffer);
// I don't know if we can assert this. But we should warn.
MOZ_ASSERT(aInfo->ThreadId() >= 0, "native thread ID is < 0");
MOZ_ASSERT(aInfo->ThreadId() <= INT32_MAX, "native thread ID is > INT32_MAX");
}
ThreadProfile::~ThreadProfile()
{
MOZ_COUNT_DTOR(ThreadProfile);
}
void ThreadProfile::addTag(const ProfileEntry& aTag)
{
mBuffer->addTag(aTag);
}
void ThreadProfile::addStoredMarker(ProfilerMarker *aStoredMarker) {
mBuffer->addStoredMarker(aStoredMarker);
}
void ThreadProfile::IterateTags(IterateTagsCallback aCallback)
{
mBuffer->IterateTagsForThread(aCallback, mThreadId);
}
void ThreadProfile::ToStreamAsJSON(std::ostream& stream)
{
JSStreamWriter b(stream);
StreamJSObject(b);
}
void ThreadProfile::StreamJSObject(JSStreamWriter& b)
{
b.BeginObject();
// Thread meta data
if (XRE_GetProcessType() == GeckoProcessType_Plugin) {
// TODO Add the proper plugin name
b.NameValue("name", "Plugin");
} else if (XRE_GetProcessType() == GeckoProcessType_Content) {
// This isn't going to really help once we have multiple content
// processes, but it'll do for now.
b.NameValue("name", "Content");
} else {
b.NameValue("name", Name());
}
b.NameValue("tid", static_cast<int>(mThreadId));
b.Name("samples");
mBuffer->StreamSamplesToJSObject(b, mThreadId);
b.Name("markers");
mBuffer->StreamMarkersToJSObject(b, mThreadId);
b.EndObject();
}
@ -516,46 +585,14 @@ mozilla::Mutex* ThreadProfile::GetMutex()
return &mMutex;
}
void ThreadProfile::DuplicateLastSample() {
// Scan the whole buffer (even unflushed parts)
// Adding mEntrySize makes the result of the modulus positive
// We search backwards from mWritePos-1 to mReadPos
for (int readPos = (mWritePos + mEntrySize - 1) % mEntrySize;
readPos != (mReadPos + mEntrySize - 1) % mEntrySize;
readPos = (readPos + mEntrySize - 1) % mEntrySize) {
if (mEntries[readPos].mTagName == 's') {
// Found the start of the last entry at position readPos
int copyEndIdx = mWritePos;
// Go through the whole entry and duplicate it
for (;readPos != copyEndIdx; readPos = (readPos + 1) % mEntrySize) {
switch (mEntries[readPos].mTagName) {
// Copy with new time
case 't':
addTag(ProfileEntry('t', static_cast<float>((mozilla::TimeStamp::Now() - sStartTime).ToMilliseconds())));
break;
// Don't copy markers
case 'm':
break;
// Copy anything else we don't know about
// L, B, S, c, s, d, l, f, h, r, t, p
default:
addTag(mEntries[readPos]);
break;
}
}
break;
}
}
void ThreadProfile::DuplicateLastSample()
{
mBuffer->DuplicateLastSample(mThreadId);
}
std::ostream& operator<<(std::ostream& stream, const ThreadProfile& profile)
{
int readPos = profile.mReadPos;
while (readPos != profile.mLastFlushPos) {
stream << profile.mEntries[readPos];
readPos = (readPos + 1) % profile.mEntrySize;
}
return stream;
return profile.mBuffer->StreamToOStream(stream, profile.mThreadId);
}
// END ThreadProfile

View File

@ -12,8 +12,10 @@
#include "platform.h"
#include "JSStreamWriter.h"
#include "ProfilerBacktrace.h"
#include "nsRefPtr.h"
#include "mozilla/Mutex.h"
#include "gtest/MozGtestFriend.h"
#include "mozilla/UniquePtr.h"
class ThreadProfile;
@ -52,7 +54,7 @@ private:
FRIEND_TEST(ThreadProfile, InsertTagsNoWrap);
FRIEND_TEST(ThreadProfile, InsertTagsWrap);
FRIEND_TEST(ThreadProfile, MemoryMeasure);
friend class ThreadProfile;
friend class ProfileBuffer;
union {
const char* mTagData;
char mTagChars[sizeof(void*)];
@ -71,15 +73,64 @@ private:
typedef void (*IterateTagsCallback)(const ProfileEntry& entry, const char* tagStringData);
class ProfileBuffer {
public:
NS_INLINE_DECL_REFCOUNTING(ProfileBuffer)
explicit ProfileBuffer(int aEntrySize);
void addTag(const ProfileEntry& aTag);
void IterateTagsForThread(IterateTagsCallback aCallback, int aThreadId);
void StreamSamplesToJSObject(JSStreamWriter& b, int aThreadId);
void StreamMarkersToJSObject(JSStreamWriter& b, int aThreadId);
void DuplicateLastSample(int aThreadId);
void addStoredMarker(ProfilerMarker* aStoredMarker);
void deleteExpiredStoredMarkers();
std::ostream& StreamToOStream(std::ostream& stream, int aThreadId) const;
protected:
char* processDynamicTag(int readPos, int* tagsConsumed, char* tagBuff);
int FindLastSampleOfThread(int aThreadId);
~ProfileBuffer() {}
public:
// Circular buffer 'Keep One Slot Open' implementation for simplicity
mozilla::UniquePtr<ProfileEntry[]> mEntries;
// Points to the next entry we will write to, which is also the one at which
// we need to stop reading.
int mWritePos;
// Points to the entry at which we can start reading.
int mReadPos;
// The number of entries in our buffer.
int mEntrySize;
// How many times mWritePos has wrapped around.
int mGeneration;
// Markers that marker entries in the buffer might refer to.
ProfilerMarkerLinkedList mStoredMarkers;
};
class ThreadProfile
{
public:
ThreadProfile(ThreadInfo* aThreadInfo, int aEntrySize);
ThreadProfile(ThreadInfo* aThreadInfo, ProfileBuffer* aBuffer);
virtual ~ThreadProfile();
void addTag(ProfileEntry aTag);
void flush();
void erase();
char* processDynamicTag(int readPos, int* tagsConsumed, char* tagBuff);
void addTag(const ProfileEntry& aTag);
/**
* Track a marker which has been inserted into the ThreadProfile.
* This marker can safely be deleted once the generation has
* expired.
*/
void addStoredMarker(ProfilerMarker *aStoredMarker);
void IterateTags(IterateTagsCallback aCallback);
friend std::ostream& operator<<(std::ostream& stream,
const ThreadProfile& profile);
@ -94,13 +145,9 @@ public:
bool IsMainThread() const { return mIsMainThread; }
const char* Name() const { return mThreadInfo->Name(); }
Thread::tid_t ThreadId() const { return mThreadId; }
int ThreadId() const { return mThreadId; }
PlatformData* GetPlatformData() const { return mPlatformData; }
int GetGenerationID() const { return mGeneration; }
bool HasGenerationExpired(int aGenID) const {
return aGenID + 2 <= mGeneration;
}
void* GetStackTop() const { return mStackTop; }
void DuplicateLastSample();
@ -118,20 +165,14 @@ private:
FRIEND_TEST(ThreadProfile, InsertTagsWrap);
FRIEND_TEST(ThreadProfile, MemoryMeasure);
ThreadInfo* mThreadInfo;
// Circular buffer 'Keep One Slot Open' implementation
// for simplicity
ProfileEntry* mEntries;
int mWritePos; // points to the next entry we will write to
int mLastFlushPos; // points to the next entry since the last flush()
int mReadPos; // points to the next entry we will read to
int mEntrySize;
const nsRefPtr<ProfileBuffer> mBuffer;
PseudoStack* mPseudoStack;
mozilla::Mutex mMutex;
Thread::tid_t mThreadId;
int mThreadId;
bool mIsMainThread;
PlatformData* mPlatformData; // Platform specific data.
int mGeneration;
int mPendingGenerationFlush;
void* const mStackTop;
ThreadResponsiveness mRespInfo;

View File

@ -176,89 +176,61 @@ private:
typedef ProfilerLinkedList<ProfilerMarker> ProfilerMarkerLinkedList;
typedef ProfilerLinkedList<LinkedUWTBuffer> UWTBufferLinkedList;
class PendingMarkers {
template<typename T>
class ProfilerSignalSafeLinkedList {
public:
PendingMarkers()
ProfilerSignalSafeLinkedList()
: mSignalLock(false)
{}
~PendingMarkers();
void addMarker(ProfilerMarker *aMarker);
void updateGeneration(int aGenID);
/**
* Track a marker which has been inserted into the ThreadProfile.
* This marker can safely be deleted once the generation has
* expired.
*/
void addStoredMarker(ProfilerMarker *aStoredMarker);
// called within signal. Function must be reentrant
ProfilerMarkerLinkedList* getPendingMarkers()
~ProfilerSignalSafeLinkedList()
{
// if mSignalLock then the stack is inconsistent because it's being
// modified by the profiled thread. Post pone these markers
// for the next sample. The odds of a livelock are nearly impossible
// and would show up in a profile as many sample in 'addMarker' thus
// we ignore this scenario.
if (mSignalLock) {
return nullptr;
}
return &mPendingMarkers;
// Some thread is modifying the list. We should only be released on that
// thread.
abort();
}
void clearMarkers()
{
while (mPendingMarkers.peek()) {
delete mPendingMarkers.popHead();
}
while (mStoredMarkers.peek()) {
delete mStoredMarkers.popHead();
while (mList.peek()) {
delete mList.popHead();
}
}
private:
// Keep a list of active markers to be applied to the next sample taken
ProfilerMarkerLinkedList mPendingMarkers;
ProfilerMarkerLinkedList mStoredMarkers;
// If this is set then it's not safe to read mStackPointer from the signal handler
volatile bool mSignalLock;
// We don't want to modify _markers from within the signal so we allow
// it to queue a clear operation.
volatile mozilla::sig_safe_t mGenID;
};
// Insert an item into the list.
// Must only be called from the owning thread.
// Must not be called while the list from accessList() is being accessed.
// In the profiler, we ensure that by interrupting the profiled thread
// (which is the one that owns this list and calls insert() on it) until
// we're done reading the list from the signal handler.
void insert(T* aElement) {
MOZ_ASSERT(aElement);
class PendingUWTBuffers
{
public:
PendingUWTBuffers()
: mSignalLock(false)
{
}
void addLinkedUWTBuffer(LinkedUWTBuffer* aBuff)
{
MOZ_ASSERT(aBuff);
mSignalLock = true;
STORE_SEQUENCER();
mPendingUWTBuffers.insert(aBuff);
mList.insert(aElement);
STORE_SEQUENCER();
mSignalLock = false;
}
// called within signal. Function must be reentrant
UWTBufferLinkedList* getLinkedUWTBuffers()
// Called within signal, from any thread, possibly while insert() is in the
// middle of modifying the list (on the owning thread). Will return null if
// that is the case.
// Function must be reentrant.
ProfilerLinkedList<T>* accessList()
{
if (mSignalLock) {
return nullptr;
}
return &mPendingUWTBuffers;
return &mList;
}
private:
UWTBufferLinkedList mPendingUWTBuffers;
ProfilerLinkedList<T> mList;
// If this is set, then it's not safe to read the list because its contents
// are being changed.
volatile bool mSignalLock;
};
@ -285,32 +257,27 @@ public:
void addLinkedUWTBuffer(LinkedUWTBuffer* aBuff)
{
mPendingUWTBuffers.addLinkedUWTBuffer(aBuff);
mPendingUWTBuffers.insert(aBuff);
}
UWTBufferLinkedList* getLinkedUWTBuffers()
{
return mPendingUWTBuffers.getLinkedUWTBuffers();
return mPendingUWTBuffers.accessList();
}
void addMarker(const char *aMarkerStr, ProfilerMarkerPayload *aPayload, float aTime)
{
ProfilerMarker* marker = new ProfilerMarker(aMarkerStr, aPayload, aTime);
mPendingMarkers.addMarker(marker);
}
void addStoredMarker(ProfilerMarker *aStoredMarker) {
mPendingMarkers.addStoredMarker(aStoredMarker);
}
void updateGeneration(int aGenID) {
mPendingMarkers.updateGeneration(aGenID);
mPendingMarkers.insert(marker);
}
// called within signal. Function must be reentrant
ProfilerMarkerLinkedList* getPendingMarkers()
{
return mPendingMarkers.getPendingMarkers();
// The profiled thread is interrupted, so we can access the list safely.
// Unless the profiled thread was in the middle of changing the list when
// we interrupted it - in that case, accessList() will return null.
return mPendingMarkers.accessList();
}
void push(const char *aName, js::ProfileEntry::Category aCategory, uint32_t line)
@ -450,9 +417,9 @@ public:
// Keep a list of pending markers that must be moved
// to the circular buffer
PendingMarkers mPendingMarkers;
ProfilerSignalSafeLinkedList<ProfilerMarker> mPendingMarkers;
// List of LinkedUWTBuffers that must be processed on the next tick
PendingUWTBuffers mPendingUWTBuffers;
ProfilerSignalSafeLinkedList<LinkedUWTBuffer> mPendingUWTBuffers;
// This may exceed the length of mStack, so instead use the stackSize() method
// to determine the number of valid samples in mStack
mozilla::sig_safe_t mStackPointer;

View File

@ -8,7 +8,7 @@
#include "UnwinderThread2.h"
SyncProfile::SyncProfile(ThreadInfo* aInfo, int aEntrySize)
: ThreadProfile(aInfo, aEntrySize)
: ThreadProfile(aInfo, new ProfileBuffer(aEntrySize))
, mOwnerState(REFERENCED)
, mUtb(nullptr)
{
@ -57,7 +57,6 @@ SyncProfile::EndUnwind()
utb__end_sync_buffer_unwind(mUtb);
}
if (mOwnerState != ORPHANED) {
flush();
mOwnerState = OWNED;
}
// Save mOwnerState before we release the mutex

View File

@ -107,6 +107,10 @@ void TableTicker::HandleSaveRequest()
NS_DispatchToMainThread(runnable);
}
void TableTicker::DeleteExpiredMarkers()
{
mBuffer->deleteExpiredStoredMarkers();
}
void TableTicker::StreamTaskTracer(JSStreamWriter& b)
{
@ -153,7 +157,6 @@ void TableTicker::StreamMetaJSCustomObject(JSStreamWriter& b)
b.NameValue("version", 2);
b.NameValue("interval", interval());
b.NameValue("stackwalk", mUseStackWalk);
b.NameValue("jank", mJankOnly);
b.NameValue("processType", XRE_GetProcessType());
mozilla::TimeDuration delta = mozilla::TimeStamp::Now() - sStartTime;
@ -753,52 +756,9 @@ void TableTicker::InplaceTick(TickSample* sample)
{
ThreadProfile& currThreadProfile = *sample->threadProfile;
currThreadProfile.addTag(ProfileEntry('T', currThreadProfile.ThreadId()));
PseudoStack* stack = currThreadProfile.GetPseudoStack();
stack->updateGeneration(currThreadProfile.GetGenerationID());
bool recordSample = true;
#if defined(XP_WIN)
bool powerSample = false;
#endif
/* Don't process the PeudoStack's markers or honour jankOnly if we're
immediately sampling the current thread. */
if (!sample->isSamplingCurrentThread) {
// Marker(s) come before the sample
ProfilerMarkerLinkedList* pendingMarkersList = stack->getPendingMarkers();
while (pendingMarkersList && pendingMarkersList->peek()) {
ProfilerMarker* marker = pendingMarkersList->popHead();
stack->addStoredMarker(marker);
currThreadProfile.addTag(ProfileEntry('m', marker));
}
#if defined(XP_WIN)
if (mProfilePower) {
mIntelPowerGadget->TakeSample();
powerSample = true;
}
#endif
if (mJankOnly) {
// if we are on a different event we can discard any temporary samples
// we've kept around
if (sLastSampledEventGeneration != sCurrentEventGeneration) {
// XXX: we also probably want to add an entry to the profile to help
// distinguish which samples are part of the same event. That, or record
// the event generation in each sample
currThreadProfile.erase();
}
sLastSampledEventGeneration = sCurrentEventGeneration;
recordSample = false;
// only record the events when we have a we haven't seen a tracer event for 100ms
if (!sLastTracerEvent.IsNull()) {
mozilla::TimeDuration delta = sample->timestamp - sLastTracerEvent;
if (delta.ToMilliseconds() > 100.0) {
recordSample = true;
}
}
}
}
#if defined(USE_NS_STACKWALK) || defined(USE_EHABI_STACKWALK)
if (mUseStackWalk) {
@ -810,8 +770,16 @@ void TableTicker::InplaceTick(TickSample* sample)
doSampleStackTrace(currThreadProfile, sample, mAddLeafAddresses);
#endif
if (recordSample)
currThreadProfile.flush();
// Don't process the PeudoStack's markers if we're
// synchronously sampling the current thread.
if (!sample->isSamplingCurrentThread) {
ProfilerMarkerLinkedList* pendingMarkersList = stack->getPendingMarkers();
while (pendingMarkersList && pendingMarkersList->peek()) {
ProfilerMarker* marker = pendingMarkersList->popHead();
currThreadProfile.addStoredMarker(marker);
currThreadProfile.addTag(ProfileEntry('m', marker));
}
}
if (sample && currThreadProfile.GetThreadResponsiveness()->HasData()) {
mozilla::TimeDuration delta = currThreadProfile.GetThreadResponsiveness()->GetUnresponsiveDuration(sample->timestamp);
@ -834,7 +802,8 @@ void TableTicker::InplaceTick(TickSample* sample)
}
#if defined(XP_WIN)
if (powerSample) {
if (mProfilePower) {
mIntelPowerGadget->TakeSample();
currThreadProfile.addTag(ProfileEntry('p', static_cast<float>(mIntelPowerGadget->GetTotalPackagePowerInWatts())));
}
#endif

View File

@ -42,8 +42,6 @@ threadSelected(ThreadInfo* aInfo, char** aThreadNameFilters, uint32_t aFeatureCo
extern mozilla::TimeStamp sLastTracerEvent;
extern int sFrameNumber;
extern int sLastFrameNumber;
extern unsigned int sCurrentEventGeneration;
extern unsigned int sLastSampledEventGeneration;
class BreakpadSampler;
@ -54,6 +52,7 @@ class TableTicker: public Sampler {
const char** aThreadNameFilters, uint32_t aFilterCount)
: Sampler(aInterval, true, aEntrySize)
, mPrimaryThreadProfile(nullptr)
, mBuffer(new ProfileBuffer(aEntrySize))
, mSaveRequested(false)
, mUnwinderThread(false)
, mFilterCount(aFilterCount)
@ -63,8 +62,6 @@ class TableTicker: public Sampler {
{
mUseStackWalk = hasFeature(aFeatures, aFeatureCount, "stackwalk");
//XXX: It's probably worth splitting the jank profiler out from the regular profiler at some point
mJankOnly = hasFeature(aFeatures, aFeatureCount, "jank");
mProfileJS = hasFeature(aFeatures, aFeatureCount, "js");
mProfileJava = hasFeature(aFeatures, aFeatureCount, "java");
mProfileGPU = hasFeature(aFeatures, aFeatureCount, "gpu");
@ -156,7 +153,7 @@ class TableTicker: public Sampler {
return;
}
ThreadProfile* profile = new ThreadProfile(aInfo, EntrySize());
ThreadProfile* profile = new ThreadProfile(aInfo, mBuffer);
aInfo->SetProfile(profile);
}
@ -178,6 +175,7 @@ class TableTicker: public Sampler {
}
virtual void HandleSaveRequest();
virtual void DeleteExpiredMarkers() MOZ_OVERRIDE;
ThreadProfile* GetPrimaryThreadProfile()
{
@ -227,10 +225,10 @@ protected:
// This represent the application's main thread (SAMPLER_INIT)
ThreadProfile* mPrimaryThreadProfile;
nsRefPtr<ProfileBuffer> mBuffer;
bool mSaveRequested;
bool mAddLeafAddresses;
bool mUseStackWalk;
bool mJankOnly;
bool mProfileJS;
bool mProfileGPU;
bool mProfileThreads;

View File

@ -1186,7 +1186,7 @@ static void process_buffer(UnwinderThreadBuffer* buff, int oldest_ix)
for (k = 0; k < buff->entsUsed; k++) {
ProfileEntry ent = utb_get_profent(buff, k);
// action flush-hints
if (ent.is_ent_hint('F')) { buff->aProfile->flush(); continue; }
if (ent.is_ent_hint('F')) { continue; }
// skip ones we can't copy
if (ent.is_ent_hint() || ent.is_ent('S')) { continue; }
// handle GetBacktrace()
@ -1223,7 +1223,7 @@ static void process_buffer(UnwinderThreadBuffer* buff, int oldest_ix)
continue;
}
// action flush-hints
if (ent.is_ent_hint('F')) { buff->aProfile->flush(); continue; }
if (ent.is_ent_hint('F')) { continue; }
// skip ones we can't copy
if (ent.is_ent_hint() || ent.is_ent('S')) { continue; }
// handle GetBacktrace()
@ -1249,7 +1249,7 @@ static void process_buffer(UnwinderThreadBuffer* buff, int oldest_ix)
buff->aProfile->addTag( ProfileEntry('s', "(root)") );
}
// action flush-hints
if (ent.is_ent_hint('F')) { buff->aProfile->flush(); continue; }
if (ent.is_ent_hint('F')) { continue; }
// skip ones we can't copy
if (ent.is_ent_hint() || ent.is_ent('S')) { continue; }
// handle GetBacktrace()
@ -1281,7 +1281,7 @@ static void process_buffer(UnwinderThreadBuffer* buff, int oldest_ix)
for (k = 0; k < ix_first_hP; k++) {
ProfileEntry ent = utb_get_profent(buff, k);
// action flush-hints
if (ent.is_ent_hint('F')) { buff->aProfile->flush(); continue; }
if (ent.is_ent_hint('F')) { continue; }
// skip ones we can't copy
if (ent.is_ent_hint() || ent.is_ent('S')) { continue; }
// handle GetBacktrace()
@ -1400,7 +1400,7 @@ static void process_buffer(UnwinderThreadBuffer* buff, int oldest_ix)
for (k = ix_last_hQ+1; k < buff->entsUsed; k++) {
ProfileEntry ent = utb_get_profent(buff, k);
// action flush-hints
if (ent.is_ent_hint('F')) { buff->aProfile->flush(); continue; }
if (ent.is_ent_hint('F')) { continue; }
// skip ones we can't copy
if (ent.is_ent_hint() || ent.is_ent('S')) { continue; }
// and copy everything else
@ -1418,11 +1418,7 @@ static void process_buffer(UnwinderThreadBuffer* buff, int oldest_ix)
for (k = 0; k < buff->entsUsed; k++) {
ProfileEntry ent = utb_get_profent(buff, k);
if (show) ent.log();
if (ent.is_ent_hint('F')) {
/* This is a flush-hint */
buff->aProfile->flush();
}
else if (ent.is_ent_hint('N')) {
if (ent.is_ent_hint('N')) {
/* This is a do-a-native-unwind-right-now hint */
MOZ_ASSERT(buff->haveNativeInfo);
PCandSP* pairs = nullptr;

View File

@ -294,6 +294,7 @@ static void* SignalSender(void* arg) {
while (SamplerRegistry::sampler->IsActive()) {
SamplerRegistry::sampler->HandleSaveRequest();
SamplerRegistry::sampler->DeleteExpiredMarkers();
if (!SamplerRegistry::sampler->IsPaused()) {
mozilla::MutexAutoLock lock(*Sampler::sRegisteredThreadsMutex);
@ -311,8 +312,6 @@ static void* SignalSender(void* arg) {
PseudoStack::SleepState sleeping = info->Stack()->observeSleeping();
if (sleeping == PseudoStack::SLEEPING_AGAIN) {
info->Profile()->DuplicateLastSample();
//XXX: This causes flushes regardless of jank-only mode
info->Profile()->flush();
continue;
}

View File

@ -203,6 +203,7 @@ class SamplerThread : public Thread {
// Implement Thread::Run().
virtual void Run() {
while (SamplerRegistry::sampler->IsActive()) {
SamplerRegistry::sampler->DeleteExpiredMarkers();
if (!SamplerRegistry::sampler->IsPaused()) {
mozilla::MutexAutoLock lock(*Sampler::sRegisteredThreadsMutex);
std::vector<ThreadInfo*> threads =
@ -218,8 +219,6 @@ class SamplerThread : public Thread {
PseudoStack::SleepState sleeping = info->Stack()->observeSleeping();
if (sleeping == PseudoStack::SLEEPING_AGAIN) {
info->Profile()->DuplicateLastSample();
//XXX: This causes flushes regardless of jank-only mode
info->Profile()->flush();
continue;
}

View File

@ -120,6 +120,8 @@ class SamplerThread : public Thread {
::timeBeginPeriod(interval_);
while (sampler_->IsActive()) {
sampler_->DeleteExpiredMarkers();
if (!sampler_->IsPaused()) {
mozilla::MutexAutoLock lock(*Sampler::sRegisteredThreadsMutex);
std::vector<ThreadInfo*> threads =
@ -135,8 +137,6 @@ class SamplerThread : public Thread {
PseudoStack::SleepState sleeping = info->Stack()->observeSleeping();
if (sleeping == PseudoStack::SLEEPING_AGAIN) {
info->Profile()->DuplicateLastSample();
//XXX: This causes flushes regardless of jank-only mode
info->Profile()->flush();
continue;
}

View File

@ -56,13 +56,6 @@ const char* PROFILER_ENTRIES = "MOZ_PROFILER_ENTRIES";
const char* PROFILER_STACK = "MOZ_PROFILER_STACK_SCAN";
const char* PROFILER_FEATURES = "MOZ_PROFILING_FEATURES";
/* used to keep track of the last event that we sampled during */
unsigned int sLastSampledEventGeneration = 0;
/* a counter that's incremented everytime we get responsiveness event
* note: it might also be worth trackplaing everytime we go around
* the event loop */
unsigned int sCurrentEventGeneration = 0;
/* we don't need to worry about overflow because we only treat the
* case of them being the same as special. i.e. we only run into
* a problem if 2^32 events happen between samples that we need
@ -203,45 +196,6 @@ void ProfilerMarker::StreamJSObject(JSStreamWriter& b) const {
b.EndObject();
}
PendingMarkers::~PendingMarkers() {
clearMarkers();
if (mSignalLock != false) {
// We're releasing the pseudostack while it's still in use.
// The label macros keep a non ref counted reference to the
// stack to avoid a TLS. If these are not all cleared we will
// get a use-after-free so better to crash now.
abort();
}
}
void
PendingMarkers::addMarker(ProfilerMarker *aMarker) {
mSignalLock = true;
STORE_SEQUENCER();
MOZ_ASSERT(aMarker);
mPendingMarkers.insert(aMarker);
// Clear markers that have been overwritten
while (mStoredMarkers.peek() &&
mStoredMarkers.peek()->HasExpired(mGenID)) {
delete mStoredMarkers.popHead();
}
STORE_SEQUENCER();
mSignalLock = false;
}
void
PendingMarkers::updateGeneration(int aGenID) {
mGenID = aGenID;
}
void
PendingMarkers::addStoredMarker(ProfilerMarker *aStoredMarker) {
aStoredMarker->SetGeneration(mGenID);
mStoredMarkers.insert(aStoredMarker);
}
bool sps_version2()
{
static int version = 0; // Raced on, potentially
@ -939,8 +893,6 @@ bool mozilla_sampler_is_active()
void mozilla_sampler_responsiveness(const mozilla::TimeStamp& aTime)
{
sCurrentEventGeneration++;
sLastTracerEvent = aTime;
}

View File

@ -309,6 +309,8 @@ class Sampler {
virtual void RequestSave() = 0;
// Process any outstanding request outside a signal handler.
virtual void HandleSaveRequest() = 0;
// Delete markers which are no longer part of the profile due to buffer wraparound.
virtual void DeleteExpiredMarkers() = 0;
// Start and stop sampler.
void Start();

View File

@ -12,7 +12,8 @@ TEST(ThreadProfile, Initialization) {
PseudoStack* stack = PseudoStack::create();
Thread::tid_t tid = 1000;
ThreadInfo info("testThread", tid, true, stack, nullptr);
ThreadProfile tp(&info, 10);
nsRefPtr<ProfileBuffer> pb = new ProfileBuffer(10);
ThreadProfile tp(&info, pb);
}
// Make sure we can record one tag and read it
@ -20,11 +21,11 @@ TEST(ThreadProfile, InsertOneTag) {
PseudoStack* stack = PseudoStack::create();
Thread::tid_t tid = 1000;
ThreadInfo info("testThread", tid, true, stack, nullptr);
ThreadProfile tp(&info, 10);
tp.addTag(ProfileEntry('t', 123.1f));
ASSERT_TRUE(tp.mEntries != nullptr);
ASSERT_TRUE(tp.mEntries[tp.mReadPos].mTagName == 't');
ASSERT_TRUE(tp.mEntries[tp.mReadPos].mTagFloat == 123.1f);
nsRefPtr<ProfileBuffer> pb = new ProfileBuffer(10);
pb->addTag(ProfileEntry('t', 123.1f));
ASSERT_TRUE(pb->mEntries != nullptr);
ASSERT_TRUE(pb->mEntries[pb->mReadPos].mTagName == 't');
ASSERT_TRUE(pb->mEntries[pb->mReadPos].mTagFloat == 123.1f);
}
// See if we can insert some tags
@ -32,17 +33,17 @@ TEST(ThreadProfile, InsertTagsNoWrap) {
PseudoStack* stack = PseudoStack::create();
Thread::tid_t tid = 1000;
ThreadInfo info("testThread", tid, true, stack, nullptr);
ThreadProfile tp(&info, 100);
nsRefPtr<ProfileBuffer> pb = new ProfileBuffer(100);
int test_size = 50;
for (int i = 0; i < test_size; i++) {
tp.addTag(ProfileEntry('t', i));
pb->addTag(ProfileEntry('t', i));
}
ASSERT_TRUE(tp.mEntries != nullptr);
int readPos = tp.mReadPos;
while (readPos != tp.mWritePos) {
ASSERT_TRUE(tp.mEntries[readPos].mTagName == 't');
ASSERT_TRUE(tp.mEntries[readPos].mTagInt == readPos);
readPos = (readPos + 1) % tp.mEntrySize;
ASSERT_TRUE(pb->mEntries != nullptr);
int readPos = pb->mReadPos;
while (readPos != pb->mWritePos) {
ASSERT_TRUE(pb->mEntries[readPos].mTagName == 't');
ASSERT_TRUE(pb->mEntries[readPos].mTagInt == readPos);
readPos = (readPos + 1) % pb->mEntrySize;
}
}
@ -54,20 +55,20 @@ TEST(ThreadProfile, InsertTagsWrap) {
int tags = 24;
int buffer_size = tags + 1;
ThreadInfo info("testThread", tid, true, stack, nullptr);
ThreadProfile tp(&info, buffer_size);
nsRefPtr<ProfileBuffer> pb = new ProfileBuffer(buffer_size);
int test_size = 43;
for (int i = 0; i < test_size; i++) {
tp.addTag(ProfileEntry('t', i));
pb->addTag(ProfileEntry('t', i));
}
ASSERT_TRUE(tp.mEntries != nullptr);
int readPos = tp.mReadPos;
ASSERT_TRUE(pb->mEntries != nullptr);
int readPos = pb->mReadPos;
int ctr = 0;
while (readPos != tp.mWritePos) {
ASSERT_TRUE(tp.mEntries[readPos].mTagName == 't');
while (readPos != pb->mWritePos) {
ASSERT_TRUE(pb->mEntries[readPos].mTagName == 't');
// the first few tags were discarded when we wrapped
ASSERT_TRUE(tp.mEntries[readPos].mTagInt == ctr + (test_size - tags));
ASSERT_TRUE(pb->mEntries[readPos].mTagInt == ctr + (test_size - tags));
ctr++;
readPos = (readPos + 1) % tp.mEntrySize;
readPos = (readPos + 1) % pb->mEntrySize;
}
}