Bug 981693 - Improve JIT code memory reporters. r=njn

This commit is contained in:
Jan de Mooij 2014-03-17 10:11:21 +01:00
parent 94093687bf
commit 2fad4ed10f
11 changed files with 103 additions and 55 deletions

View File

@ -906,6 +906,11 @@ namespace JSC {
return m_buffer.uncheckedSize();
}
size_t allocSize() const
{
return m_buffer.allocSize();
}
void ensureSpace(int insnSpace, int constSpace)
{
m_buffer.ensureSpace(insnSpace, constSpace);

View File

@ -42,6 +42,7 @@
#include <stdarg.h>
#include "jsfriendapi.h"
#include "jsopcode.h"
#include "jsutil.h"
#include "jit/IonSpewer.h"
#include "js/RootingAPI.h"
@ -63,6 +64,7 @@ namespace JSC {
: m_buffer(m_inlineBuffer)
, m_capacity(inlineCapacity)
, m_size(0)
, m_allocSize(0)
, m_oom(false)
{
}
@ -143,6 +145,11 @@ namespace JSC {
return m_size;
}
size_t allocSize() const
{
return m_allocSize;
}
bool oom() const
{
return m_oom;
@ -159,7 +166,9 @@ namespace JSC {
return 0;
}
void* result = allocator->alloc(m_size, poolp, kind);
m_allocSize = js::AlignBytes(m_size, sizeof(void *));
void* result = allocator->alloc(m_allocSize, poolp, kind);
if (!result) {
*poolp = NULL;
return 0;
@ -255,6 +264,7 @@ namespace JSC {
char* m_buffer;
size_t m_capacity;
size_t m_size;
size_t m_allocSize;
bool m_oom;
};

View File

@ -66,38 +66,19 @@ public:
LinkBuffer(MacroAssembler* masm, ExecutableAllocator* executableAllocator,
ExecutablePool** poolp, bool* ok, CodeKind codeKind)
{
// LinkBuffer is only used by Yarr. MacroAssemblerCodeRef::release relies on this.
MOZ_ASSERT(codeKind == REGEXP_CODE);
m_codeKind = codeKind;
m_code = executableAllocAndCopy(*masm, executableAllocator, poolp);
m_executablePool = *poolp;
m_size = masm->m_assembler.size(); // must come after call to executableAllocAndCopy()!
m_allocSize = masm->m_assembler.allocSize();
#ifndef NDEBUG
m_completed = false;
#endif
*ok = !!m_code;
}
LinkBuffer(CodeKind kind)
: m_executablePool(NULL)
, m_code(NULL)
, m_size(0)
, m_codeKind(kind)
#ifndef NDEBUG
, m_completed(false)
#endif
{
}
LinkBuffer(uint8_t* ncode, size_t size, CodeKind kind)
: m_executablePool(NULL)
, m_code(ncode)
, m_size(size)
, m_codeKind(kind)
#ifndef NDEBUG
, m_completed(false)
#endif
{
}
~LinkBuffer()
{
ASSERT(!m_executablePool || m_completed);
@ -183,7 +164,8 @@ public:
{
performFinalization();
return CodeRef(m_code, m_executablePool, m_size);
MOZ_ASSERT(m_allocSize >= m_size);
return CodeRef(m_code, m_executablePool, m_allocSize);
}
CodeLocationLabel finalizeCodeAddendum()
{
@ -225,6 +207,7 @@ protected:
ExecutablePool* m_executablePool;
void* m_code;
size_t m_size;
size_t m_allocSize;
CodeKind m_codeKind;
#ifndef NDEBUG
bool m_completed;

View File

@ -182,14 +182,14 @@ class MacroAssemblerCodeRef {
public:
MacroAssemblerCodeRef()
: m_executablePool(NULL),
m_size(0)
m_allocSize(0)
{
}
MacroAssemblerCodeRef(void* code, ExecutablePool* executablePool, size_t size)
MacroAssemblerCodeRef(void* code, ExecutablePool* executablePool, size_t allocSize)
: m_code(code)
, m_executablePool(executablePool)
, m_size(size)
, m_allocSize(allocSize)
{
}
@ -201,22 +201,23 @@ public:
#if defined DEBUG && (defined WTF_CPU_X86 || defined WTF_CPU_X86_64)
void *addr = m_code.executableAddress();
memset(addr, 0xcc, m_size);
memset(addr, 0xcc, m_allocSize);
#endif
m_executablePool->release();
// MacroAssemblerCodeRef is only used by Yarr.
m_executablePool->release(m_allocSize, REGEXP_CODE);
m_executablePool = NULL;
}
MacroAssemblerCodePtr code() const {
return m_code;
}
size_t size() const {
return m_size;
size_t allocSize() const {
return m_allocSize;
}
MacroAssemblerCodePtr m_code;
ExecutablePool* m_executablePool;
size_t m_size;
size_t m_allocSize;
};
} // namespace JSC

View File

@ -440,6 +440,7 @@ public:
};
size_t size() const { return m_formatter.size(); }
size_t allocSize() const { return m_formatter.allocSize(); }
unsigned char *buffer() const { return m_formatter.buffer(); }
bool oom() const { return m_formatter.oom(); }
@ -3867,6 +3868,7 @@ private:
// Administrative methods:
size_t size() const { return m_buffer.size(); }
size_t allocSize() const { return m_buffer.allocSize(); }
unsigned char *buffer() const { return m_buffer.buffer(); }
bool oom() const { return m_buffer.oom(); }
bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }

View File

@ -38,6 +38,11 @@ size_t ExecutableAllocator::largeAllocSize = 0;
ExecutablePool::~ExecutablePool()
{
MOZ_ASSERT(m_ionCodeBytes == 0);
MOZ_ASSERT(m_baselineCodeBytes == 0);
MOZ_ASSERT(m_regexpCodeBytes == 0);
MOZ_ASSERT(m_otherCodeBytes == 0);
m_allocator->releasePoolPages(this);
}

View File

@ -85,7 +85,7 @@ namespace JSC {
class ExecutableAllocator;
enum CodeKind { ION_CODE, BASELINE_CODE, REGEXP_CODE, OTHER_CODE };
enum CodeKind { ION_CODE = 0, BASELINE_CODE, REGEXP_CODE, OTHER_CODE };
// These are reference-counted. A new one starts with a count of 1.
class ExecutablePool {
@ -130,6 +130,31 @@ public:
if (--m_refCount == 0)
js_delete(this);
}
void release(size_t n, CodeKind kind)
{
switch (kind) {
case ION_CODE:
m_ionCodeBytes -= n;
MOZ_ASSERT(m_ionCodeBytes < m_allocation.size); // Shouldn't underflow.
break;
case BASELINE_CODE:
m_baselineCodeBytes -= n;
MOZ_ASSERT(m_baselineCodeBytes < m_allocation.size);
break;
case REGEXP_CODE:
m_regexpCodeBytes -= n;
MOZ_ASSERT(m_regexpCodeBytes < m_allocation.size);
break;
case OTHER_CODE:
m_otherCodeBytes -= n;
MOZ_ASSERT(m_otherCodeBytes < m_allocation.size);
break;
default:
MOZ_ASSUME_UNREACHABLE("bad code kind");
}
release();
}
ExecutablePool(ExecutableAllocator* allocator, Allocation a)
: m_allocator(allocator), m_freePtr(a.pages), m_end(m_freePtr + a.size), m_allocation(a),
@ -223,10 +248,11 @@ public:
// pool; i.e. alloc() increments the count before returning the object.
void* alloc(size_t n, ExecutablePool** poolp, CodeKind type)
{
// Round 'n' up to a multiple of word size; if all allocations are of
// word sized quantities, then all subsequent allocations will be
// Caller must ensure 'n' is word-size aligned. If all allocations are
// of word sized quantities, then all subsequent allocations will be
// aligned.
n = roundUpAllocationSize(n, sizeof(void*));
JS_ASSERT(roundUpAllocationSize(n, sizeof(void*)) == n);
if (n == OVERSIZE_ALLOCATION) {
*poolp = NULL;
return NULL;
@ -347,7 +373,7 @@ public:
ExecutablePool* pool = createPool(largeAllocSize);
if (!pool)
return NULL;
// At this point, local |pool| is the owner.
// At this point, local |pool| is the owner.
if (m_smallPools.length() < maxSmallPools) {
// We haven't hit the maximum number of live pools; add the new pool.
@ -373,7 +399,7 @@ public:
}
}
// Pass ownership to the caller.
// Pass ownership to the caller.
return pool;
}

View File

@ -615,25 +615,28 @@ JitRuntime::getVMWrapper(const VMFunction &f) const
template <AllowGC allowGC>
JitCode *
JitCode::New(JSContext *cx, uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool)
JitCode::New(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize,
JSC::ExecutablePool *pool, JSC::CodeKind kind)
{
JitCode *codeObj = js::NewJitCode<allowGC>(cx);
if (!codeObj) {
pool->release();
pool->release(headerSize + bufferSize, kind);
return nullptr;
}
new (codeObj) JitCode(code, bufferSize, pool);
new (codeObj) JitCode(code, bufferSize, headerSize, pool, kind);
return codeObj;
}
template
JitCode *
JitCode::New<CanGC>(JSContext *cx, uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool);
JitCode::New<CanGC>(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize,
JSC::ExecutablePool *pool, JSC::CodeKind kind);
template
JitCode *
JitCode::New<NoGC>(JSContext *cx, uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool);
JitCode::New<NoGC>(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize,
JSC::ExecutablePool *pool, JSC::CodeKind kind);
void
JitCode::copyFrom(MacroAssembler &masm)
@ -696,7 +699,7 @@ JitCode::finalize(FreeOp *fop)
// Horrible hack: if we are using perf integration, we don't
// want to reuse code addresses, so we just leak the memory instead.
if (!PerfEnabled())
pool_->release();
pool_->release(headerSize_ + bufferSize_, JSC::CodeKind(kind_));
pool_ = nullptr;
}
}

View File

@ -14,6 +14,7 @@
#include "jsinfer.h"
#include "jstypes.h"
#include "assembler/jit/ExecutableAllocator.h"
#include "gc/Heap.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/IonTypes.h"
@ -37,13 +38,15 @@ class JitCode : public gc::BarrieredCell<JitCode>
protected:
uint8_t *code_;
JSC::ExecutablePool *pool_;
uint32_t bufferSize_; // Total buffer size.
uint32_t bufferSize_; // Total buffer size. Does not include headerSize_.
uint32_t insnSize_; // Instruction stream size.
uint32_t dataSize_; // Size of the read-only data area.
uint32_t jumpRelocTableBytes_; // Size of the jump relocation table.
uint32_t dataRelocTableBytes_; // Size of the data relocation table.
uint32_t preBarrierTableBytes_; // Size of the prebarrier table.
bool invalidated_; // Whether the code object has been invalidated.
uint8_t headerSize_ : 5; // Number of bytes allocated before codeStart.
uint8_t kind_ : 3; // JSC::CodeKind, for the memory reporters.
bool invalidated_ : 1; // Whether the code object has been invalidated.
// This is necessary to prevent GC tracing.
#if JS_BITS_PER_WORD == 32
@ -55,7 +58,8 @@ class JitCode : public gc::BarrieredCell<JitCode>
: code_(nullptr),
pool_(nullptr)
{ }
JitCode(uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool)
JitCode(uint8_t *code, uint32_t bufferSize, uint32_t headerSize, JSC::ExecutablePool *pool,
JSC::CodeKind kind)
: code_(code),
pool_(pool),
bufferSize_(bufferSize),
@ -64,8 +68,13 @@ class JitCode : public gc::BarrieredCell<JitCode>
jumpRelocTableBytes_(0),
dataRelocTableBytes_(0),
preBarrierTableBytes_(0),
headerSize_(headerSize),
kind_(kind),
invalidated_(false)
{ }
{
MOZ_ASSERT(JSC::CodeKind(kind_) == kind);
MOZ_ASSERT(headerSize_ == headerSize);
}
uint32_t dataOffset() const {
return insnSize_;
@ -126,7 +135,8 @@ class JitCode : public gc::BarrieredCell<JitCode>
// object can be allocated, nullptr is returned. On failure, |pool| is
// automatically released, so the code may be freed.
template <AllowGC allowGC>
static JitCode *New(JSContext *cx, uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool);
static JitCode *New(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize,
JSC::ExecutablePool *pool, JSC::CodeKind kind);
public:
static inline ThingRootKind rootKind() { return THING_ROOT_JIT_CODE; }

View File

@ -44,6 +44,9 @@ class Linker
if (bytesNeeded >= MAX_BUFFER_SIZE)
return fail(cx);
// ExecutableAllocator requires bytesNeeded to be word-size aligned.
bytesNeeded = AlignBytes(bytesNeeded, sizeof(void *));
uint8_t *result = (uint8_t *)execAlloc->alloc(bytesNeeded, &pool, kind);
if (!result)
return fail(cx);
@ -54,8 +57,8 @@ class Linker
// Bump the code up to a nice alignment.
codeStart = (uint8_t *)AlignBytes((uintptr_t)codeStart, CodeAlignment);
uint32_t headerSize = codeStart - result;
JitCode *code = JitCode::New<allowGC>(cx, codeStart,
bytesNeeded - headerSize, pool);
JitCode *code = JitCode::New<allowGC>(cx, codeStart, bytesNeeded - headerSize,
headerSize, pool, kind);
if (!code)
return nullptr;
if (masm.oom())

View File

@ -82,16 +82,16 @@ public:
bool isFallBack() { return m_needFallBack; }
#ifdef YARR_8BIT_CHAR_SUPPORT
bool has8BitCode() const { return m_ref8.size(); }
bool has8BitCode() const { return m_ref8.allocSize(); }
void set8BitCode(MacroAssemblerCodeRef ref) { m_ref8 = ref; }
bool has8BitCodeMatchOnly() const { return m_matchOnly8.size(); }
bool has8BitCodeMatchOnly() const { return m_matchOnly8.allocSize(); }
void set8BitCodeMatchOnly(MacroAssemblerCodeRef matchOnly) { m_matchOnly8 = matchOnly; }
#endif
bool has16BitCode() const { return m_ref16.size(); }
bool has16BitCode() const { return m_ref16.allocSize(); }
void set16BitCode(MacroAssemblerCodeRef ref) { m_ref16 = ref; }
bool has16BitCodeMatchOnly() const { return m_matchOnly16.size(); }
bool has16BitCodeMatchOnly() const { return m_matchOnly16.allocSize(); }
void set16BitCodeMatchOnly(MacroAssemblerCodeRef matchOnly) { m_matchOnly16 = matchOnly; }
#if YARR_8BIT_CHAR_SUPPORT