Bug 1123237 - Part 11. Don't use STL in memory-profiler. r=BenWa,cervantes

This commit is contained in:
Kan-Ru Chen 2015-08-26 18:55:55 +08:00
parent fa6350cc5c
commit 047aee244e
9 changed files with 292 additions and 335 deletions

View File

@ -7,13 +7,11 @@
#ifndef memory_profiler_CompactTraceTable_h
#define memory_profiler_CompactTraceTable_h
#include "UncensoredAllocator.h"
#include <functional>
#include <utility>
#include "mozilla/HashFunctions.h"
#include "nsDataHashtable.h"
#include "nsTArray.h"
namespace mozilla {
struct TrieNode final
@ -24,67 +22,49 @@ struct TrieNode final
{
return parentIdx == t.parentIdx && nameIdx == t.nameIdx;
}
};
} // namespace mozilla
namespace std {
template<>
struct hash<mozilla::TrieNode>
{
size_t operator()(const mozilla::TrieNode& v) const
uint32_t Hash() const
{
uint64_t k = static_cast<uint64_t>(v.parentIdx) << 32 | v.nameIdx;
return std::hash<uint64_t>()(k);
return HashGeneric(parentIdx, nameIdx);
}
};
#ifdef MOZ_REPLACE_MALLOC
template<>
struct hash<mozilla::u_string>
{
size_t operator()(const mozilla::u_string& v) const
{
return mozilla::HashString(v.c_str());
}
};
#endif
} // namespace std
namespace mozilla {
// This class maps a Node of type T to its parent's index in the
// map. When serializing, the map is traversed and put into an ordered
// vector of Nodes.
template<typename T>
// array of Nodes.
template<typename KeyClass, typename T>
class NodeIndexMap final
{
public:
uint32_t Insert(const T& e)
{
auto i = mMap.insert(std::make_pair(e, mMap.size()));
return i.first->second;
uint32_t index = mMap.Count();
if (!mMap.Get(e, &index)) {
mMap.Put(e, index);
}
return index;
}
u_vector<T> Serialize() const
nsTArray<T> Serialize() const
{
u_vector<T> v(mMap.size());
for (auto i: mMap) {
v[i.second] = i.first;
nsTArray<T> v;
v.SetLength(mMap.Count());
for (auto iter = mMap.ConstIter(); !iter.Done(); iter.Next()) {
v[iter.Data()] = iter.Key();
}
return v;
}
uint32_t Size() const
{
return mMap.size();
return mMap.Count();
}
void Clear()
{
mMap.clear();
mMap.Clear();
}
private:
u_unordered_map<T, uint32_t> mMap;
nsDataHashtable<KeyClass, uint32_t> mMap;
};
// Backtraces are stored in a trie to save spaces.
@ -97,22 +77,22 @@ class CompactTraceTable final
public:
CompactTraceTable()
{
mNames.Insert("(unknown)");
mNames.Insert(nsAutoCString("(unknown)"));
mTraces.Insert(TrieNode{0, 0});
}
u_vector<u_string> GetNames() const
nsTArray<nsCString> GetNames() const
{
return mNames.Serialize();
}
u_vector<TrieNode> GetTraces() const
nsTArray<TrieNode> GetTraces() const
{
return mTraces.Serialize();
}
// Returns an ID to a stacktrace.
uint32_t Insert(const u_vector<u_string>& aRawStacktrace)
uint32_t Insert(const nsTArray<nsCString>& aRawStacktrace)
{
uint32_t parent = 0;
for (auto& frame: aRawStacktrace) {
@ -127,8 +107,8 @@ public:
mTraces.Clear();
}
private:
NodeIndexMap<u_string> mNames;
NodeIndexMap<TrieNode> mTraces;
NodeIndexMap<nsCStringHashKey, nsCString> mNames;
NodeIndexMap<nsGenericHashKey<TrieNode>, TrieNode> mTraces;
};
} // namespace mozilla

View File

@ -6,9 +6,7 @@
#include "GCHeapProfilerImpl.h"
#include "mozilla/TimeStamp.h"
#include "prlock.h"
#include "UncensoredAllocator.h"
namespace mozilla {
@ -25,19 +23,19 @@ GCHeapProfilerImpl::~GCHeapProfilerImpl()
}
}
u_vector<u_string>
nsTArray<nsCString>
GCHeapProfilerImpl::GetNames() const
{
return mTraceTable.GetNames();
}
u_vector<TrieNode>
nsTArray<TrieNode>
GCHeapProfilerImpl::GetTraces() const
{
return mTraceTable.GetTraces();
}
const u_vector<AllocEvent>&
const nsTArray<AllocEvent>&
GCHeapProfilerImpl::GetEvents() const
{
return mAllocEvents;
@ -47,10 +45,10 @@ void
GCHeapProfilerImpl::reset()
{
mTraceTable.Reset();
mAllocEvents.clear();
mNurseryEntries.clear();
mTenuredEntriesFG.clear();
mTenuredEntriesBG.clear();
mAllocEvents.Clear();
mNurseryEntries.Clear();
mTenuredEntriesFG.Clear();
mTenuredEntriesBG.Clear();
}
void
@ -68,22 +66,25 @@ GCHeapProfilerImpl::sampleNursery(void* addr, uint32_t size)
void
GCHeapProfilerImpl::markTenuredStart()
{
AutoUseUncensoredAllocator ua;
AutoMPLock lock(mLock);
if (!mMarking) {
mMarking = true;
Swap(mTenuredEntriesFG, mTenuredEntriesBG);
MOZ_ASSERT(mTenuredEntriesFG.empty());
mTenuredEntriesFG.SwapElements(mTenuredEntriesBG);
MOZ_ASSERT(mTenuredEntriesFG.Count() == 0);
}
}
void
GCHeapProfilerImpl::markTenured(void* addr)
{
AutoUseUncensoredAllocator ua;
AutoMPLock lock(mLock);
if (mMarking) {
auto res = mTenuredEntriesBG.find(addr);
if (res != mTenuredEntriesBG.end()) {
res->second.mMarked = true;
AllocEntry entry;
if (mTenuredEntriesBG.Get(addr, &entry)) {
entry.mMarked = true;
mTenuredEntriesBG.Put(addr, entry);
}
}
}
@ -91,72 +92,76 @@ GCHeapProfilerImpl::markTenured(void* addr)
void
GCHeapProfilerImpl::sweepTenured()
{
AutoUseUncensoredAllocator ua;
AutoMPLock lock(mLock);
if (mMarking) {
mMarking = false;
for (auto& entry: mTenuredEntriesBG) {
if (entry.second.mMarked) {
entry.second.mMarked = false;
mTenuredEntriesFG.insert(entry);
for (auto iter = mTenuredEntriesBG.Iter(); !iter.Done(); iter.Next()) {
if (iter.Data().mMarked) {
iter.Data().mMarked = false;
mTenuredEntriesFG.Put(iter.Key(), iter.Data());
} else {
AllocEvent& oldEvent = mAllocEvents[entry.second.mEventIdx];
AllocEvent& oldEvent = mAllocEvents[iter.Data().mEventIdx];
AllocEvent newEvent(oldEvent.mTraceIdx, -oldEvent.mSize, TimeStamp::Now());
mAllocEvents.push_back(newEvent);
mAllocEvents.AppendElement(newEvent);
}
}
mTenuredEntriesBG.clear();
mTenuredEntriesBG.Clear();
}
}
void
GCHeapProfilerImpl::sweepNursery()
{
AutoUseUncensoredAllocator ua;
AutoMPLock lock(mLock);
for (auto& entry: mNurseryEntries) {
AllocEvent& oldEvent = mAllocEvents[entry.second.mEventIdx];
for (auto iter = mNurseryEntries.Iter(); !iter.Done(); iter.Next()) {
AllocEvent& oldEvent = mAllocEvents[iter.Data().mEventIdx];
AllocEvent newEvent(oldEvent.mTraceIdx, -oldEvent.mSize, TimeStamp::Now());
mAllocEvents.push_back(newEvent);
mAllocEvents.AppendElement(newEvent);
}
mNurseryEntries.clear();
mNurseryEntries.Clear();
}
void
GCHeapProfilerImpl::moveNurseryToTenured(void* addrOld, void* addrNew)
{
AutoUseUncensoredAllocator ua;
AutoMPLock lock(mLock);
auto iterOld = mNurseryEntries.find(addrOld);
if (iterOld == mNurseryEntries.end()) {
AllocEntry entryOld;
if (!mNurseryEntries.Get(addrOld, &entryOld)) {
return;
}
// Because the tenured heap is sampled, the address might already be there.
// If not, the address is inserted with the old event.
auto res = mTenuredEntriesFG.insert(
std::make_pair(addrNew, AllocEntry(iterOld->second.mEventIdx)));
auto iterNew = res.first;
// If it is already inserted, the insertion above will fail and the
// iterator of the already-inserted element is returned.
// We choose to ignore the the new event by setting its size zero and point
// the newly allocated address to the old event.
// An event of size zero will be skipped when reporting.
if (!res.second) {
mAllocEvents[iterNew->second.mEventIdx].mSize = 0;
iterNew->second.mEventIdx = iterOld->second.mEventIdx;
AllocEntry tenuredEntryOld;
if (!mTenuredEntriesFG.Get(addrNew, &tenuredEntryOld)) {
mTenuredEntriesFG.Put(addrNew, AllocEntry(entryOld.mEventIdx));
} else {
// If it is already inserted, the insertion above will fail and the
// iterator of the already-inserted element is returned.
// We choose to ignore the the new event by setting its size zero and point
// the newly allocated address to the old event.
// An event of size zero will be skipped when reporting.
mAllocEvents[entryOld.mEventIdx].mSize = 0;
tenuredEntryOld.mEventIdx = entryOld.mEventIdx;
mTenuredEntriesFG.Put(addrNew, tenuredEntryOld);
}
mNurseryEntries.erase(iterOld);
mNurseryEntries.Remove(addrOld);
}
void
GCHeapProfilerImpl::SampleInternal(void* aAddr, uint32_t aSize, AllocMap& aTable)
{
AutoUseUncensoredAllocator ua;
AutoMPLock lock(mLock);
size_t nSamples = AddBytesSampled(aSize);
if (nSamples > 0) {
u_vector<u_string> trace = GetStacktrace();
nsTArray<nsCString> trace = GetStacktrace();
AllocEvent ai(mTraceTable.Insert(trace), nSamples * mSampleSize, TimeStamp::Now());
aTable.insert(std::make_pair(aAddr, AllocEntry(mAllocEvents.size())));
mAllocEvents.push_back(ai);
aTable.Put(aAddr, AllocEntry(mAllocEvents.Length()));
mAllocEvents.AppendElement(ai);
}
}

View File

@ -21,9 +21,9 @@ public:
GCHeapProfilerImpl();
~GCHeapProfilerImpl() override;
u_vector<u_string> GetNames() const override;
u_vector<TrieNode> GetTraces() const override;
const u_vector<AllocEvent>& GetEvents() const override;
nsTArray<nsCString> GetNames() const override;
nsTArray<TrieNode> GetTraces() const override;
const nsTArray<AllocEvent>& GetEvents() const override;
void reset() override;
void sampleTenured(void* addr, uint32_t size) override;
@ -44,7 +44,7 @@ private:
AllocMap mTenuredEntriesFG;
AllocMap mTenuredEntriesBG;
u_vector<AllocEvent> mAllocEvents;
nsTArray<AllocEvent> mAllocEvents;
CompactTraceTable mTraceTable;
};

View File

@ -9,10 +9,9 @@
#include <cmath>
#include <cstdlib>
#include "mozilla/Compiler.h"
#include "mozilla/ClearOnShutdown.h"
#include "mozilla/Move.h"
#include "mozilla/TimeStamp.h"
#include "mozilla/unused.h"
#include "GCHeapProfilerImpl.h"
#include "GeckoProfiler.h"
@ -27,19 +26,6 @@
struct JSRuntime;
#if MOZ_USING_STLPORT
namespace std {
template<class T>
struct hash<T*>
{
size_t operator()(T* v) const
{
return hash<void*>()(static_cast<void*>(v));
}
};
} // namespace std
#endif
namespace mozilla {
#define MEMORY_PROFILER_SAMPLE_SIZE 65536
@ -52,18 +38,17 @@ ProfilerImpl::ProfilerImpl()
mRemainingBytes = std::floor(std::log(1.0 - DRandom()) / mLog1minusP);
}
u_vector<u_string>
nsTArray<nsCString>
ProfilerImpl::GetStacktrace()
{
u_vector<u_string> trace;
char* output = (char*)u_malloc(BACKTRACE_BUFFER_SIZE);
nsTArray<nsCString> trace;
nsAutoArrayPtr<char> output(new char[BACKTRACE_BUFFER_SIZE]);
profiler_get_backtrace_noalloc(output, BACKTRACE_BUFFER_SIZE);
for (const char* p = output; *p; p += strlen(p) + 1) {
trace.push_back(p);
trace.AppendElement(nsDependentCString(p));
}
u_free(output);
return trace;
}
@ -92,8 +77,8 @@ NS_IMPL_ISUPPORTS(MemoryProfiler, nsIMemoryProfiler)
PRLock* MemoryProfiler::sLock;
uint32_t MemoryProfiler::sProfileRuntimeCount;
NativeProfilerImpl* MemoryProfiler::sNativeProfiler;
JSRuntimeProfilerMap* MemoryProfiler::sJSRuntimeProfilerMap;
StaticAutoPtr<NativeProfilerImpl> MemoryProfiler::sNativeProfiler;
StaticAutoPtr<JSRuntimeProfilerMap> MemoryProfiler::sJSRuntimeProfilerMap;
TimeStamp MemoryProfiler::sStartTime;
void
@ -104,10 +89,12 @@ MemoryProfiler::InitOnce()
static bool initialized = false;
if (!initialized) {
InitializeMallocHook();
MallocHook::Initialize();
sLock = PR_NewLock();
sProfileRuntimeCount = 0;
sJSRuntimeProfilerMap = new JSRuntimeProfilerMap();
ClearOnShutdown(&sJSRuntimeProfilerMap);
ClearOnShutdown(&sNativeProfiler);
std::srand(PR_Now());
bool ignored;
sStartTime = TimeStamp::ProcessCreation(ignored);
@ -119,10 +106,12 @@ NS_IMETHODIMP
MemoryProfiler::StartProfiler()
{
InitOnce();
JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
AutoUseUncensoredAllocator ua;
AutoMPLock lock(sLock);
if (!(*sJSRuntimeProfilerMap)[runtime].mEnabled) {
(*sJSRuntimeProfilerMap)[runtime].mEnabled = true;
JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
ProfilerForJSRuntime profiler;
if (!sJSRuntimeProfilerMap->Get(runtime, &profiler) ||
!profiler.mEnabled) {
if (sProfileRuntimeCount == 0) {
js::EnableRuntimeProfilingStack(runtime, true);
if (!sNativeProfiler) {
@ -131,10 +120,12 @@ MemoryProfiler::StartProfiler()
MemProfiler::SetNativeProfiler(sNativeProfiler);
}
GCHeapProfilerImpl* gp = new GCHeapProfilerImpl();
(*sJSRuntimeProfilerMap)[runtime].mProfiler = gp;
profiler.mEnabled = true;
profiler.mProfiler = gp;
sJSRuntimeProfilerMap->Put(runtime, profiler);
MemProfiler::GetMemProfiler(runtime)->start(gp);
if (sProfileRuntimeCount == 0) {
EnableMallocHook(sNativeProfiler);
MallocHook::Enable(sNativeProfiler);
}
sProfileRuntimeCount++;
}
@ -145,16 +136,20 @@ NS_IMETHODIMP
MemoryProfiler::StopProfiler()
{
InitOnce();
JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
AutoUseUncensoredAllocator ua;
AutoMPLock lock(sLock);
if ((*sJSRuntimeProfilerMap)[runtime].mEnabled) {
JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
ProfilerForJSRuntime profiler;
if (sJSRuntimeProfilerMap->Get(runtime, &profiler) &&
profiler.mEnabled) {
MemProfiler::GetMemProfiler(runtime)->stop();
if (--sProfileRuntimeCount == 0) {
DisableMallocHook();
MallocHook::Disable();
MemProfiler::SetNativeProfiler(nullptr);
js::EnableRuntimeProfilingStack(runtime, false);
}
(*sJSRuntimeProfilerMap)[runtime].mEnabled = false;
profiler.mEnabled = false;
sJSRuntimeProfilerMap->Put(runtime, profiler);
}
return NS_OK;
}
@ -163,14 +158,17 @@ NS_IMETHODIMP
MemoryProfiler::ResetProfiler()
{
InitOnce();
JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
AutoUseUncensoredAllocator ua;
AutoMPLock lock(sLock);
if (!(*sJSRuntimeProfilerMap)[runtime].mEnabled) {
delete (*sJSRuntimeProfilerMap)[runtime].mProfiler;
(*sJSRuntimeProfilerMap)[runtime].mProfiler = nullptr;
JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
ProfilerForJSRuntime profiler;
if (!sJSRuntimeProfilerMap->Get(runtime, &profiler) ||
!profiler.mEnabled) {
delete profiler.mProfiler;
profiler.mProfiler = nullptr;
sJSRuntimeProfilerMap->Put(runtime, profiler);
}
if (sProfileRuntimeCount == 0) {
delete sNativeProfiler;
sNativeProfiler = nullptr;
}
return NS_OK;
@ -178,45 +176,45 @@ MemoryProfiler::ResetProfiler()
struct MergedTraces
{
u_vector<u_string> mNames;
u_vector<TrieNode> mTraces;
u_vector<AllocEvent> mEvents;
nsTArray<nsCString> mNames;
nsTArray<TrieNode> mTraces;
nsTArray<AllocEvent> mEvents;
};
// Merge events and corresponding traces and names.
static MergedTraces
MergeResults(u_vector<u_string> names0, u_vector<TrieNode> traces0, u_vector<AllocEvent> events0,
u_vector<u_string> names1, u_vector<TrieNode> traces1, u_vector<AllocEvent> events1)
MergeResults(const nsTArray<nsCString>& names0,
const nsTArray<TrieNode>& traces0,
const nsTArray<AllocEvent>& events0,
const nsTArray<nsCString>& names1,
const nsTArray<TrieNode>& traces1,
const nsTArray<AllocEvent>& events1)
{
NodeIndexMap<u_string> names;
NodeIndexMap<TrieNode> traces;
u_vector<AllocEvent> events;
NodeIndexMap<nsCStringHashKey, nsCString> names;
NodeIndexMap<nsGenericHashKey<TrieNode>, TrieNode> traces;
nsTArray<AllocEvent> events;
u_vector<size_t> names1Tonames0;
u_vector<size_t> traces1Totraces0(1, 0);
nsTArray<size_t> names1Tonames0(names1.Length());
nsTArray<size_t> traces1Totraces0(traces1.Length());
// Merge names.
for (auto& i: names0) {
names.Insert(i);
}
for (auto& i: names1) {
names1Tonames0.push_back(names.Insert(i));
names1Tonames0.AppendElement(names.Insert(i));
}
// Merge traces. Note that traces1[i].parentIdx < i for all i > 0.
for (auto& i: traces0) {
traces.Insert(i);
}
for (size_t i = 1; i < traces1.size(); i++) {
traces1Totraces0.AppendElement(0);
for (size_t i = 1; i < traces1.Length(); i++) {
TrieNode node = traces1[i];
node.parentIdx = traces1Totraces0[node.parentIdx];
node.nameIdx = names1Tonames0[node.nameIdx];
traces1Totraces0.push_back(traces.Insert(node));
}
// Update events1
for (auto& i: events1) {
i.mTraceIdx = traces1Totraces0[i.mTraceIdx];
traces1Totraces0.AppendElement(traces.Insert(node));
}
// Merge the events according to timestamps.
@ -225,18 +223,22 @@ MergeResults(u_vector<u_string> names0, u_vector<TrieNode> traces0, u_vector<All
while (p0 != events0.end() && p1 != events1.end()) {
if (p0->mTimestamp < p1->mTimestamp) {
events.push_back(*p0++);
events.AppendElement(*p0++);
} else {
events.push_back(*p1++);
events.AppendElement(*p1++);
events.LastElement().mTraceIdx =
traces1Totraces0[events.LastElement().mTraceIdx];
}
}
while (p0 != events0.end()) {
events.push_back(*p0++);
events.AppendElement(*p0++);
}
while (p1 != events1.end()) {
events.push_back(*p1++);
events.AppendElement(*p1++);
events.LastElement().mTraceIdx =
traces1Totraces0[events.LastElement().mTraceIdx];
}
return MergedTraces{names.Serialize(), traces.Serialize(), Move(events)};
@ -246,8 +248,9 @@ NS_IMETHODIMP
MemoryProfiler::GetResults(JSContext* cx, JS::MutableHandle<JS::Value> aResult)
{
InitOnce();
JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
AutoUseUncensoredAllocator ua;
AutoMPLock lock(sLock);
JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
// Getting results when the profiler is running is not allowed.
if (sProfileRuntimeCount > 0) {
return NS_OK;
@ -257,29 +260,31 @@ MemoryProfiler::GetResults(JSContext* cx, JS::MutableHandle<JS::Value> aResult)
return NS_OK;
}
// Return immediately when there's no result in current runtime.
if (!(*sJSRuntimeProfilerMap)[runtime].mProfiler) {
ProfilerForJSRuntime profiler;
if (!sJSRuntimeProfilerMap->Get(runtime, &profiler) ||
!profiler.mProfiler) {
return NS_OK;
}
GCHeapProfilerImpl* gp = (*sJSRuntimeProfilerMap)[runtime].mProfiler;
GCHeapProfilerImpl* gp = profiler.mProfiler;
auto results = MergeResults(gp->GetNames(), gp->GetTraces(), gp->GetEvents(),
sNativeProfiler->GetNames(),
sNativeProfiler->GetTraces(),
sNativeProfiler->GetEvents());
u_vector<u_string> names = Move(results.mNames);
u_vector<TrieNode> traces = Move(results.mTraces);
u_vector<AllocEvent> events = Move(results.mEvents);
const nsTArray<nsCString>& names = results.mNames;
const nsTArray<TrieNode>& traces = results.mTraces;
const nsTArray<AllocEvent>& events = results.mEvents;
JS::RootedObject jsnames(cx, JS_NewArrayObject(cx, names.size()));
JS::RootedObject jstraces(cx, JS_NewArrayObject(cx, traces.size()));
JS::RootedObject jsevents(cx, JS_NewArrayObject(cx, events.size()));
JS::RootedObject jsnames(cx, JS_NewArrayObject(cx, names.Length()));
JS::RootedObject jstraces(cx, JS_NewArrayObject(cx, traces.Length()));
JS::RootedObject jsevents(cx, JS_NewArrayObject(cx, events.Length()));
for (size_t i = 0; i < names.size(); i++) {
JS::RootedString name(cx, JS_NewStringCopyZ(cx, names[i].c_str()));
for (size_t i = 0; i < names.Length(); i++) {
JS::RootedString name(cx, JS_NewStringCopyZ(cx, names[i].get()));
JS_SetElement(cx, jsnames, i, name);
}
for (size_t i = 0; i < traces.size(); i++) {
for (size_t i = 0; i < traces.Length(); i++) {
JS::RootedObject tn(cx, JS_NewPlainObject(cx));
JS::RootedValue nameIdx(cx, JS_NumberValue(traces[i].nameIdx));
JS::RootedValue parentIdx(cx, JS_NumberValue(traces[i].parentIdx));
@ -294,7 +299,7 @@ MemoryProfiler::GetResults(JSContext* cx, JS::MutableHandle<JS::Value> aResult)
continue;
}
MOZ_ASSERT(!sStartTime.IsNull());
double time = (sStartTime - ent.mTimestamp).ToMilliseconds();
double time = (ent.mTimestamp - sStartTime).ToMilliseconds();
JS::RootedObject tn(cx, JS_NewPlainObject(cx));
JS::RootedValue size(cx, JS_NumberValue(ent.mSize));
JS::RootedValue traceIdx(cx, JS_NumberValue(ent.mTraceIdx));

View File

@ -9,11 +9,11 @@
#include "nsIMemoryProfiler.h"
#include "mozilla/StaticPtr.h"
#include "mozilla/TimeStamp.h"
#include "CompactTraceTable.h"
#include "UncensoredAllocator.h"
#include "nsTArray.h"
#include "prlock.h"
#define MEMORY_PROFILER_CID \
@ -23,6 +23,7 @@
#define MEMORY_PROFILER_CONTRACT_ID "@mozilla.org/tools/memory-profiler;1"
struct JSRuntime;
struct PRLock;
namespace mozilla {
@ -38,7 +39,8 @@ struct ProfilerForJSRuntime
GCHeapProfilerImpl* mProfiler;
bool mEnabled;
};
using JSRuntimeProfilerMap = u_unordered_map<JSRuntime*, ProfilerForJSRuntime>;
using JSRuntimeProfilerMap =
nsDataHashtable<nsClearingPtrHashKey<JSRuntime>, ProfilerForJSRuntime>;
class MemoryProfiler final : public nsIMemoryProfiler
{
@ -55,8 +57,8 @@ private:
static PRLock* sLock;
static uint32_t sProfileRuntimeCount;
static NativeProfilerImpl* sNativeProfiler;
static JSRuntimeProfilerMap* sJSRuntimeProfilerMap;
static StaticAutoPtr<NativeProfilerImpl> sNativeProfiler;
static StaticAutoPtr<JSRuntimeProfilerMap> sJSRuntimeProfilerMap;
static TimeStamp sStartTime;
};
@ -80,24 +82,30 @@ struct AllocEntry {
uint32_t mEventIdx : 31;
bool mMarked : 1;
AllocEntry(int aEventIdx)
// Default constructor for uninitialized stack value required by
// getter methods.
AllocEntry()
: mEventIdx(0)
, mMarked(false)
{}
explicit AllocEntry(int aEventIdx)
: mEventIdx(aEventIdx)
, mMarked(false)
{}
};
using AllocMap = u_unordered_map<void*, AllocEntry>;
using AllocMap = nsDataHashtable<nsClearingVoidPtrHashKey, AllocEntry>;
class ProfilerImpl
{
public:
static u_vector<u_string> GetStacktrace();
static nsTArray<nsCString> GetStacktrace();
static double DRandom();
ProfilerImpl();
virtual u_vector<u_string> GetNames() const = 0;
virtual u_vector<TrieNode> GetTraces() const = 0;
virtual const u_vector<AllocEvent>& GetEvents() const = 0;
virtual nsTArray<nsCString> GetNames() const = 0;
virtual nsTArray<TrieNode> GetTraces() const = 0;
virtual const nsTArray<AllocEvent>& GetEvents() const = 0;
protected:
/**

View File

@ -6,9 +6,7 @@
#include "NativeProfilerImpl.h"
#include "mozilla/TimeStamp.h"
#include "prlock.h"
#include "UncensoredAllocator.h"
namespace mozilla {
@ -24,19 +22,19 @@ NativeProfilerImpl::~NativeProfilerImpl()
}
}
u_vector<u_string>
nsTArray<nsCString>
NativeProfilerImpl::GetNames() const
{
return mTraceTable.GetNames();
}
u_vector<TrieNode>
nsTArray<TrieNode>
NativeProfilerImpl::GetTraces() const
{
return mTraceTable.GetTraces();
}
const u_vector<AllocEvent>&
const nsTArray<AllocEvent>&
NativeProfilerImpl::GetEvents() const
{
return mAllocEvents;
@ -46,37 +44,39 @@ void
NativeProfilerImpl::reset()
{
mTraceTable.Reset();
mAllocEvents.clear();
mNativeEntries.clear();
mAllocEvents.Clear();
mNativeEntries.Clear();
}
void
NativeProfilerImpl::sampleNative(void* addr, uint32_t size)
{
AutoUseUncensoredAllocator ua;
AutoMPLock lock(mLock);
size_t nSamples = AddBytesSampled(size);
if (nSamples > 0) {
u_vector<u_string> trace = GetStacktrace();
nsTArray<nsCString> trace = GetStacktrace();
AllocEvent ai(mTraceTable.Insert(trace), nSamples * mSampleSize, TimeStamp::Now());
mNativeEntries.insert(std::make_pair(addr, AllocEntry(mAllocEvents.size())));
mAllocEvents.push_back(ai);
mNativeEntries.Put(addr, AllocEntry(mAllocEvents.Length()));
mAllocEvents.AppendElement(ai);
}
}
void
NativeProfilerImpl::removeNative(void* addr)
{
AutoUseUncensoredAllocator ua;
AutoMPLock lock(mLock);
auto res = mNativeEntries.find(addr);
if (res == mNativeEntries.end()) {
AllocEntry entry;
if (!mNativeEntries.Get(addr, &entry)) {
return;
}
AllocEvent& oldEvent = mAllocEvents[res->second.mEventIdx];
AllocEvent& oldEvent = mAllocEvents[entry.mEventIdx];
AllocEvent newEvent(oldEvent.mTraceIdx, -oldEvent.mSize, TimeStamp::Now());
mAllocEvents.push_back(newEvent);
mNativeEntries.erase(res);
mAllocEvents.AppendElement(newEvent);
mNativeEntries.Remove(addr);
}
} // namespace mozilla

View File

@ -23,9 +23,9 @@ public:
NativeProfilerImpl();
~NativeProfilerImpl() override;
u_vector<u_string> GetNames() const override;
u_vector<TrieNode> GetTraces() const override;
const u_vector<AllocEvent>& GetEvents() const override;
nsTArray<nsCString> GetNames() const override;
nsTArray<TrieNode> GetTraces() const override;
const nsTArray<AllocEvent>& GetEvents() const override;
void reset() override;
void sampleNative(void* addr, uint32_t size) override;
@ -34,7 +34,7 @@ public:
private:
PRLock* mLock;
AllocMap mNativeEntries;
u_vector<AllocEvent> mAllocEvents;
nsTArray<AllocEvent> mAllocEvents;
CompactTraceTable mTraceTable;
};

View File

@ -6,102 +6,119 @@
#include "UncensoredAllocator.h"
#include "mozilla/Assertions.h"
#include "mozilla/unused.h"
#include "MainThreadUtils.h"
#include "jsfriendapi.h"
#include "nsDebug.h"
#include "prlock.h"
#ifdef MOZ_REPLACE_MALLOC
#include "replace_malloc_bridge.h"
#endif
namespace mozilla {
static void* (*uncensored_malloc)(size_t size);
static void (*uncensored_free)(void* ptr);
#ifdef MOZ_REPLACE_MALLOC
static bool sMemoryHookEnabled = false;
static NativeProfiler* sNativeProfiler;
static malloc_hook_table_t sMallocHook;
static void*
SampleNative(void* addr, size_t size)
{
if (sMemoryHookEnabled) {
sNativeProfiler->sampleNative(addr, size);
}
return addr;
}
static void
RemoveNative(void* addr)
{
if (sMemoryHookEnabled) {
sNativeProfiler->removeNative(addr);
}
}
ThreadLocal<bool> MallocHook::mEnabledTLS;
NativeProfiler* MallocHook::mNativeProfiler;
malloc_hook_table_t MallocHook::mMallocHook;
#endif
void*
u_malloc(size_t size)
AutoUseUncensoredAllocator::AutoUseUncensoredAllocator()
{
if (uncensored_malloc) {
return uncensored_malloc(size);
} else {
return malloc(size);
#ifdef MOZ_REPLACE_MALLOC
MallocHook::mEnabledTLS.set(false);
#endif
}
AutoUseUncensoredAllocator::~AutoUseUncensoredAllocator()
{
#ifdef MOZ_REPLACE_MALLOC
MallocHook::mEnabledTLS.set(true);
#endif
}
bool
MallocHook::Enabled()
{
#ifdef MOZ_REPLACE_MALLOC
return mEnabledTLS.get() && mNativeProfiler;
#else
return false;
#endif
}
void*
MallocHook::SampleNative(void* aAddr, size_t aSize)
{
#ifdef MOZ_REPLACE_MALLOC
if (MallocHook::Enabled()) {
mNativeProfiler->sampleNative(aAddr, aSize);
}
#endif
return aAddr;
}
void
u_free(void* ptr)
{
if (uncensored_free) {
uncensored_free(ptr);
} else {
free(ptr);
}
}
void InitializeMallocHook()
MallocHook::RemoveNative(void* aAddr)
{
#ifdef MOZ_REPLACE_MALLOC
sMallocHook.free_hook = RemoveNative;
sMallocHook.malloc_hook = SampleNative;
if (MallocHook::Enabled()) {
mNativeProfiler->removeNative(aAddr);
}
#endif
}
void
MallocHook::Initialize()
{
#ifdef MOZ_REPLACE_MALLOC
MOZ_ASSERT(NS_IsMainThread());
mMallocHook.free_hook = RemoveNative;
mMallocHook.malloc_hook = SampleNative;
ReplaceMallocBridge* bridge = ReplaceMallocBridge::Get(3);
if (bridge) {
mozilla::unused << bridge->RegisterHook("memory-profiler", nullptr, nullptr);
}
#endif
if (!uncensored_malloc && !uncensored_free) {
uncensored_malloc = malloc;
uncensored_free = free;
if (!mEnabledTLS.initialized()) {
bool success = mEnabledTLS.init();
if (NS_WARN_IF(!success)) {
return;
}
mEnabledTLS.set(false);
}
#endif
}
void EnableMallocHook(NativeProfiler* aNativeProfiler)
void
MallocHook::Enable(NativeProfiler* aNativeProfiler)
{
#ifdef MOZ_REPLACE_MALLOC
MOZ_ASSERT(NS_IsMainThread());
if (NS_WARN_IF(!mEnabledTLS.initialized())) {
return;
}
ReplaceMallocBridge* bridge = ReplaceMallocBridge::Get(3);
if (bridge) {
const malloc_table_t* alloc_funcs =
bridge->RegisterHook("memory-profiler", nullptr, &sMallocHook);
bridge->RegisterHook("memory-profiler", nullptr, &mMallocHook);
if (alloc_funcs) {
uncensored_malloc = alloc_funcs->malloc;
uncensored_free = alloc_funcs->free;
sNativeProfiler = aNativeProfiler;
sMemoryHookEnabled = true;
mNativeProfiler = aNativeProfiler;
}
}
#endif
}
void DisableMallocHook()
void
MallocHook::Disable()
{
#ifdef MOZ_REPLACE_MALLOC
MOZ_ASSERT(NS_IsMainThread());
ReplaceMallocBridge* bridge = ReplaceMallocBridge::Get(3);
if (bridge) {
bridge->RegisterHook("memory-profiler", nullptr, nullptr);
sMemoryHookEnabled = false;
mNativeProfiler = nullptr;
}
#endif
}

View File

@ -7,100 +7,42 @@
#ifndef memory_profiler_UncensoredAllocator_h
#define memory_profiler_UncensoredAllocator_h
#include "mozilla/Compiler.h"
#include "mozilla/Attributes.h"
#include "mozilla/ThreadLocal.h"
#include <string>
#include <unordered_map>
#include <vector>
#ifdef MOZ_REPLACE_MALLOC
#include "replace_malloc_bridge.h"
#endif
class NativeProfiler;
#if MOZ_USING_STLPORT
namespace std {
using tr1::unordered_map;
} // namespace std
#endif
namespace mozilla {
void InitializeMallocHook();
void EnableMallocHook(NativeProfiler* aNativeProfiler);
void DisableMallocHook();
void* u_malloc(size_t size);
void u_free(void* ptr);
#ifdef MOZ_REPLACE_MALLOC
template<class Tp>
struct UncensoredAllocator
class MallocHook final
{
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef Tp* pointer;
typedef const Tp* const_pointer;
typedef Tp& reference;
typedef const Tp& const_reference;
typedef Tp value_type;
UncensoredAllocator() {}
template<class T>
UncensoredAllocator(const UncensoredAllocator<T>&) {}
template<class Other>
struct rebind
{
typedef UncensoredAllocator<Other> other;
};
Tp* allocate(size_t n)
{
return reinterpret_cast<Tp*>(u_malloc(n * sizeof(Tp)));
}
void deallocate(Tp* p, size_t n)
{
u_free(reinterpret_cast<void*>(p));
}
void construct(Tp* p, const Tp& val)
{
new ((void*)p) Tp(val);
}
void destroy(Tp* p)
{
p->Tp::~Tp();
}
bool operator==(const UncensoredAllocator& rhs) const
{
return true;
}
bool operator!=(const UncensoredAllocator& rhs) const
{
return false;
}
size_type max_size() const
{
return static_cast<size_type>(-1) / sizeof(Tp);
}
public:
static void Initialize();
static void Enable(NativeProfiler* aNativeProfiler);
static void Disable();
static bool Enabled();
private:
static void* SampleNative(void* aAddr, size_t aSize);
static void RemoveNative(void* aAddr);
#ifdef MOZ_REPLACE_MALLOC
static ThreadLocal<bool> mEnabledTLS;
static NativeProfiler* mNativeProfiler;
static malloc_hook_table_t mMallocHook;
#endif
friend class AutoUseUncensoredAllocator;
};
using u_string =
std::basic_string<char, std::char_traits<char>, UncensoredAllocator<char>>;
class MOZ_RAII AutoUseUncensoredAllocator final
{
public:
AutoUseUncensoredAllocator();
~AutoUseUncensoredAllocator();
};
template<typename T>
using u_vector = std::vector<T, UncensoredAllocator<T>>;
template<typename K, typename V, typename H = std::hash<K>>
using u_unordered_map =
std::unordered_map<K, V, H, std::equal_to<K>, UncensoredAllocator<std::pair<K, V>>>;
#else
using u_string = std::string;
template<typename T>
using u_vector = std::vector<T>;
template<typename K, typename V, typename H = std::hash<K>>
using u_unordered_map =
std::unordered_map<K, V, H>;
#endif
} // namespace mozilla
#endif // memory_profiler_UncensoredAllocator_h