This commit is contained in:
David Anderson 2010-03-11 12:19:51 -08:00
commit f5c8a2d251
10 changed files with 75 additions and 101 deletions

View File

@ -200,7 +200,7 @@ struct JSArenaPool {
if ((pool)->current == (a)) (pool)->current = &(pool)->first; \
*(pnext) = (a)->next; \
JS_CLEAR_ARENA(a); \
js_free(a); \
free(a); \
(a) = NULL; \
JS_END_MACRO

View File

@ -99,7 +99,6 @@ js_PurgeGSNCache(JSGSNCache *cache);
#define JS_PURGE_GSN_CACHE(cx) js_PurgeGSNCache(&JS_GSN_CACHE(cx))
#define JS_METER_GSN_CACHE(cx,cnt) GSN_CACHE_METER(&JS_GSN_CACHE(cx), cnt)
#ifdef JS_TRACER
/* Forward declarations of nanojit types. */
namespace nanojit {
@ -111,11 +110,9 @@ template<typename K, typename V, typename H> class HashMap;
template<typename T> class Seq;
} /* namespace nanojit */
#endif
namespace js {
#ifdef JS_TRACER
/* Tracer constants. */
static const size_t MONITOR_N_GLOBAL_STATES = 4;
static const size_t FRAGMENT_TABLE_SIZE = 512;
@ -220,7 +217,6 @@ struct TraceNativeStorage
double *global() { return stack_global_buf + MAX_NATIVE_STACK_SLOTS; }
FrameInfo **callstack() { return callstack_buf; }
};
#endif
/* Holds data to track a single globa. */
struct GlobalState {
@ -330,15 +326,12 @@ class CallStack
}
};
#ifdef JS_TRACER
/* Holds the number of recording attemps for an address. */
typedef HashMap<jsbytecode*,
size_t,
DefaultHasher<jsbytecode*>,
SystemAllocPolicy> RecordAttemptMap;
class Oracle;
/*
* Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not
* JS_THREADSAFE) has an associated trace monitor that keeps track of loop
@ -400,7 +393,6 @@ struct TraceMonitor {
nanojit::Assembler* assembler;
FrameInfoCache* frameCache;
Oracle* oracle;
TraceRecorder* recorder;
GlobalState globalStates[MONITOR_N_GLOBAL_STATES];
@ -459,7 +451,6 @@ struct TraceMonitor {
};
} /* namespace js */
#endif
/*
* N.B. JS_ON_TRACE(cx) is true if JIT code is on the stack in the current

View File

@ -3057,6 +3057,10 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
}
#endif
#ifdef JS_TRACER
PurgeJITOracle();
#endif
/*
* Reset the property cache's type id generator so we can compress ids.
* Same for the protoHazardShape proxy-shape standing in for all object

View File

@ -67,7 +67,7 @@
static void *
DefaultAllocTable(void *pool, size_t size)
{
return js_malloc(size);
return malloc(size);
}
static void
@ -79,7 +79,7 @@ DefaultFreeTable(void *pool, void *item, size_t size)
static JSHashEntry *
DefaultAllocEntry(void *pool, const void *key)
{
return (JSHashEntry*) js_malloc(sizeof(JSHashEntry));
return (JSHashEntry*) malloc(sizeof(JSHashEntry));
}
static void

View File

@ -839,7 +839,7 @@ js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
static JSFatLock *
NewFatlock()
{
JSFatLock *fl = (JSFatLock *)js_malloc(sizeof(JSFatLock)); /* for now */
JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
if (!fl) return NULL;
fl->susp = 0;
fl->next = NULL;

View File

@ -121,8 +121,8 @@ class UpRecursiveSlotMap : public RecursiveSlotMap
};
#if defined DEBUG
JS_REQUIRES_STACK void
TraceRecorder::AssertDownFrameIsConsistent(JSContext* cx, VMSideExit* anchor, FrameInfo* fi)
static JS_REQUIRES_STACK void
AssertDownFrameIsConsistent(JSContext* cx, VMSideExit* anchor, FrameInfo* fi)
{
JS_ASSERT(anchor->recursive_down);
JS_ASSERT(anchor->recursive_down->callerHeight == fi->callerHeight);

View File

@ -242,9 +242,9 @@ PointerRangeSize(T *begin, T *end)
class SystemAllocPolicy
{
public:
void *malloc(size_t bytes) { return js_malloc(bytes); }
void *realloc(void *p, size_t bytes) { return js_realloc(p, bytes); }
void free(void *p) { js_free(p); }
void *malloc(size_t bytes) { return ::malloc(bytes); }
void *realloc(void *p, size_t bytes) { return ::realloc(p, bytes); }
void free(void *p) { ::free(p); }
void reportAllocOverflow() const {}
};

View File

@ -107,7 +107,7 @@ nanojit::Allocator::allocChunk(size_t nbytes)
{
VMAllocator *vma = (VMAllocator*)this;
JS_ASSERT(!vma->outOfMemory());
void *p = js_calloc(nbytes);
void *p = calloc(1, nbytes);
if (!p) {
JS_ASSERT(nbytes < sizeof(vma->mReserve));
vma->mOutOfMemory = true;
@ -121,7 +121,7 @@ void
nanojit::Allocator::freeChunk(void *p) {
VMAllocator *vma = (VMAllocator*)this;
if (p != &vma->mReserve[0])
js_free(p);
free(p);
}
void
@ -394,12 +394,6 @@ static void
DumpPeerStability(TraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, uint32 argc);
#endif
void
SetBuiltinError(JSContext *cx)
{
cx->interpState->builtinStatus |= BUILTIN_ERROR;
}
/*
* We really need a better way to configure the JIT. Shaver, where is
* my fancy JIT object?
@ -915,6 +909,12 @@ TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3, LI
}
#endif
/*
* The entire VM shares one oracle. Collisions and concurrent updates are
* tolerated and worst case cause performance regressions.
*/
static Oracle oracle;
Tracker::Tracker()
{
pagelist = NULL;
@ -954,7 +954,7 @@ struct Tracker::TrackerPage*
Tracker::addTrackerPage(const void* v)
{
jsuword base = getTrackerPageBase(v);
struct TrackerPage* p = (struct TrackerPage*) js_calloc(sizeof(*p));
struct TrackerPage* p = (struct TrackerPage*) calloc(1, sizeof(*p));
p->base = base;
p->next = pagelist;
pagelist = p;
@ -967,7 +967,7 @@ Tracker::clear()
while (pagelist) {
TrackerPage* p = pagelist;
pagelist = pagelist->next;
js_free(p);
free(p);
}
}
@ -1214,44 +1214,44 @@ Oracle::clearDemotability()
_pcDontDemote.reset();
}
JS_REQUIRES_STACK void
TraceRecorder::MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot)
JS_REQUIRES_STACK static JS_INLINE void
MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot)
{
if (slot < f->nStackTypes) {
oracle->markStackSlotUndemotable(cx, slot);
oracle.markStackSlotUndemotable(cx, slot);
return;
}
uint16* gslots = f->globalSlots->data();
oracle->markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
oracle.markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
}
JS_REQUIRES_STACK void
TraceRecorder::MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* pc)
JS_REQUIRES_STACK static JS_INLINE void
MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* pc)
{
if (slot < f->nStackTypes) {
oracle->markStackSlotUndemotable(cx, slot, pc);
oracle.markStackSlotUndemotable(cx, slot, pc);
return;
}
uint16* gslots = f->globalSlots->data();
oracle->markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
oracle.markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
}
static JS_REQUIRES_STACK bool
IsSlotUndemotable(Oracle* oracle, JSContext* cx, LinkableFragment* f, unsigned slot, const void* ip)
static JS_REQUIRES_STACK inline bool
IsSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* ip)
{
if (slot < f->nStackTypes)
return oracle->isStackSlotUndemotable(cx, slot, ip);
return oracle.isStackSlotUndemotable(cx, slot, ip);
uint16* gslots = f->globalSlots->data();
return oracle->isGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
return oracle.isGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
}
static JS_REQUIRES_STACK bool
IsSlotUndemotable(Oracle* oracle, JSContext* cx, LinkableFragment* f, unsigned slot)
static JS_REQUIRES_STACK inline bool
IsSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot)
{
return IsSlotUndemotable(oracle, cx, f, slot, cx->fp->regs->pc);
return IsSlotUndemotable(cx, f, slot, cx->fp->regs->pc);
}
class FrameInfoCache
@ -1996,7 +1996,7 @@ public:
visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
TraceType type = getCoercedType(*vp);
if (type == TT_INT32 &&
JS_TRACE_MONITOR(mCx).oracle->isGlobalSlotUndemotable(mCx, slot))
oracle.isGlobalSlotUndemotable(mCx, slot))
type = TT_DOUBLE;
JS_ASSERT(type != TT_JSVAL);
debug_only_printf(LC_TMTracer,
@ -2010,7 +2010,7 @@ public:
for (int i = 0; i < count; ++i) {
TraceType type = getCoercedType(vp[i]);
if (type == TT_INT32 &&
JS_TRACE_MONITOR(mCx).oracle->isStackSlotUndemotable(mCx, length()))
oracle.isStackSlotUndemotable(mCx, length()))
type = TT_DOUBLE;
JS_ASSERT(type != TT_JSVAL);
debug_only_printf(LC_TMTracer,
@ -2166,7 +2166,6 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* anchor, VMFragment* frag
RecordReason recordReason)
: cx(cx),
traceMonitor(&JS_TRACE_MONITOR(cx)),
oracle(JS_TRACE_MONITOR(cx).oracle),
fragment(fragment),
tree(fragment->root),
recordReason(recordReason),
@ -2689,7 +2688,6 @@ TraceMonitor::flush()
codeAlloc->reset();
tempAlloc->reset();
reTempAlloc->reset();
oracle->clear();
Allocator& alloc = *dataAlloc;
@ -3533,7 +3531,7 @@ TraceRecorder::importGlobalSlot(unsigned slot)
int index = tree->globalSlots->offsetOf(uint16(slot));
if (index == -1) {
type = getCoercedType(*vp);
if (type == TT_INT32 && oracle->isGlobalSlotUndemotable(cx, slot))
if (type == TT_INT32 && oracle.isGlobalSlotUndemotable(cx, slot))
type = TT_DOUBLE;
index = (int)tree->globalSlots->length();
tree->globalSlots->add(uint16(slot));
@ -3764,7 +3762,7 @@ public:
* Aggressively undo speculation so the inner tree will compile
* if this fails.
*/
mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot);
oracle.markGlobalSlotUndemotable(mCx, slot);
}
JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
++mTypeMap;
@ -3808,7 +3806,7 @@ public:
* Aggressively undo speculation so the inner tree will compile
* if this fails.
*/
mRecorder.oracle->markStackSlotUndemotable(mCx, mSlotnum);
oracle.markStackSlotUndemotable(mCx, mSlotnum);
}
JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
++vp;
@ -4429,7 +4427,7 @@ class SlotMap : public SlotVisitorBase
{
for (unsigned i = 0; i < length(); i++) {
if (get(i).lastCheck == TypeCheck_Undemote)
mRecorder.MarkSlotUndemotable(mRecorder.cx, mRecorder.tree, i);
MarkSlotUndemotable(mRecorder.cx, mRecorder.tree, i);
}
}
@ -4754,7 +4752,7 @@ TypeMapLinkability(JSContext* cx, const TypeMap& typeMap, TreeFragment* peer)
if (typeMap[i] == peerMap[i])
continue;
if (typeMap[i] == TT_INT32 && peerMap[i] == TT_DOUBLE &&
IsSlotUndemotable(JS_TRACE_MONITOR(cx).oracle, cx, peer, i, peer->ip)) {
IsSlotUndemotable(cx, peer, i, peer->ip)) {
consensus = TypeConsensus_Undemotes;
} else {
return TypeConsensus_Bad;
@ -4763,8 +4761,8 @@ TypeMapLinkability(JSContext* cx, const TypeMap& typeMap, TreeFragment* peer)
return consensus;
}
JS_REQUIRES_STACK unsigned
TraceRecorder::FindUndemotesInTypemaps(JSContext* cx, const TypeMap& typeMap, LinkableFragment* f,
static JS_REQUIRES_STACK unsigned
FindUndemotesInTypemaps(JSContext* cx, const TypeMap& typeMap, LinkableFragment* f,
Queue<unsigned>& undemotes)
{
undemotes.setLength(0);
@ -5649,7 +5647,6 @@ FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, TreeFragment** peerp)
TreeFragment* from = exit->root();
JS_ASSERT(from->code());
Oracle* oracle = JS_TRACE_MONITOR(cx).oracle;
TypeMap typeMap(NULL);
FullMapFromExit(typeMap, exit);
@ -5661,14 +5658,14 @@ FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, TreeFragment** peerp)
if (typeMap[i] == TT_DOUBLE) {
if (exit->exitType == RECURSIVE_UNLINKED_EXIT) {
if (i < exit->numStackSlots)
oracle->markStackSlotUndemotable(cx, i, exit->recursive_pc);
oracle.markStackSlotUndemotable(cx, i, exit->recursive_pc);
else
oracle->markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]);
oracle.markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]);
}
if (i < from->nStackTypes)
oracle->markStackSlotUndemotable(cx, i, from->ip);
oracle.markStackSlotUndemotable(cx, i, from->ip);
else if (i >= exit->numStackSlots)
oracle->markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]);
oracle.markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]);
}
}
@ -6036,7 +6033,7 @@ TraceRecorder::attemptTreeCall(TreeFragment* f, uintN& inlineCallCount)
}
case OVERFLOW_EXIT:
oracle->markInstructionUndemotable(cx->fp->regs->pc);
oracle.markInstructionUndemotable(cx->fp->regs->pc);
/* FALL THROUGH */
case RECURSIVE_SLURP_FAIL_EXIT:
case RECURSIVE_SLURP_MISMATCH_EXIT:
@ -6138,10 +6135,10 @@ public:
if (!IsEntryTypeCompatible(vp, mTypeMap)) {
mOk = false;
} else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) {
mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot);
oracle.markGlobalSlotUndemotable(mCx, slot);
mOk = false;
} else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) {
mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot);
oracle.markGlobalSlotUndemotable(mCx, slot);
}
mTypeMap++;
}
@ -6153,10 +6150,10 @@ public:
if (!IsEntryTypeCompatible(vp, mTypeMap)) {
mOk = false;
} else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) {
mRecorder.oracle->markStackSlotUndemotable(mCx, mStackSlotNum);
oracle.markStackSlotUndemotable(mCx, mStackSlotNum);
mOk = false;
} else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) {
mRecorder.oracle->markStackSlotUndemotable(mCx, mStackSlotNum);
oracle.markStackSlotUndemotable(mCx, mStackSlotNum);
}
vp++;
mTypeMap++;
@ -6986,7 +6983,7 @@ MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount, RecordReason reason)
return rv;
case OVERFLOW_EXIT:
tm->oracle->markInstructionUndemotable(cx->fp->regs->pc);
oracle.markInstructionUndemotable(cx->fp->regs->pc);
/* FALL THROUGH */
case RECURSIVE_SLURP_FAIL_EXIT:
case RECURSIVE_SLURP_MISMATCH_EXIT:
@ -7417,8 +7414,6 @@ InitJIT(TraceMonitor *tm)
/* Set the default size for the code cache to 16MB. */
tm->maxCodeCacheBytes = 16 M;
tm->oracle = new Oracle();
tm->recordAttempts = new RecordAttemptMap;
if (!tm->recordAttempts->init(PC_HASH_COUNT))
abort();
@ -7501,7 +7496,6 @@ FinishJIT(TraceMonitor *tm)
#endif
delete tm->recordAttempts;
delete tm->oracle;
#ifdef DEBUG
// Recover profiling data from expiring Fragments, and display
@ -7573,6 +7567,12 @@ FinishJIT(TraceMonitor *tm)
tm->cachedTempTypeMap = NULL;
}
void
PurgeJITOracle()
{
oracle.clear();
}
JS_REQUIRES_STACK void
PurgeScriptFragments(JSContext* cx, JSScript* script)
{
@ -8030,7 +8030,7 @@ TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1)
* integers and the oracle must not give us a negative hint for the
* instruction.
*/
if (oracle->isInstructionUndemotable(cx->fp->regs->pc) || !isPromoteInt(s0) || !isPromoteInt(s1)) {
if (oracle.isInstructionUndemotable(cx->fp->regs->pc) || !isPromoteInt(s0) || !isPromoteInt(s1)) {
out:
if (v == LIR_fmod) {
LIns* args[] = { s1, s0 };
@ -10243,7 +10243,7 @@ TraceRecorder::record_JSOP_NEG()
* a double. Only follow this path if we're not an integer that's 0 and
* we're not a double that's zero.
*/
if (!oracle->isInstructionUndemotable(cx->fp->regs->pc) &&
if (!oracle.isInstructionUndemotable(cx->fp->regs->pc) &&
isPromoteInt(a) &&
(!JSVAL_IS_INT(v) || JSVAL_TO_INT(v) != 0) &&
(!JSVAL_IS_DOUBLE(v) || !JSDOUBLE_IS_NEGZERO(*JSVAL_TO_DOUBLE(v))) &&
@ -15283,7 +15283,7 @@ StopTraceVisNative(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval
#endif /* MOZ_TRACEVIS */
JS_REQUIRES_STACK void
TraceRecorder::CaptureStackTypes(JSContext* cx, unsigned callDepth, TraceType* typeMap)
CaptureStackTypes(JSContext* cx, unsigned callDepth, TraceType* typeMap)
{
CaptureTypesVisitor capVisitor(cx, typeMap);
VisitStackSlots(capVisitor, cx, callDepth);

View File

@ -78,7 +78,7 @@ public:
memcpy(tmp, _data, _len * sizeof(T));
_data = tmp;
} else {
_data = (T*)js_realloc(_data, _max * sizeof(T));
_data = (T*)realloc(_data, _max * sizeof(T));
}
#if defined(DEBUG)
memset(&_data[_len], 0xcd, _max - _len);
@ -95,7 +95,7 @@ public:
~Queue() {
if (!alloc)
js_free(_data);
free(_data);
}
bool contains(T a) {
@ -215,8 +215,6 @@ public:
void clear();
};
struct TreeFragment;
class VMFragment : public nanojit::Fragment {
public:
VMFragment(const void* _ip verbose_only(, uint32_t profFragID))
@ -772,8 +770,11 @@ struct ArgsPrivateNative {
}
};
extern void
SetBuiltinError(JSContext *cx);
static JS_INLINE void
SetBuiltinError(JSContext *cx)
{
cx->interpState->builtinStatus |= BUILTIN_ERROR;
}
#ifdef DEBUG_RECORDING_STATUS_NOT_BOOL
/* #define DEBUG_RECORDING_STATUS_NOT_BOOL to detect misuses of RecordingStatus */
@ -918,9 +919,6 @@ class TraceRecorder
{
/*************************************************************** Recording session constants */
/* Cached oracle keeps track of hit counts for program counter locations */
Oracle* oracle;
/* The context in which recording started. */
JSContext* const cx;
@ -1066,17 +1064,6 @@ class TraceRecorder
*/
JS_REQUIRES_STACK nanojit::GuardRecord* createGuardRecord(VMSideExit* exit);
JS_REQUIRES_STACK JS_INLINE void MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot);
JS_REQUIRES_STACK JS_INLINE void MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* pc);
JS_REQUIRES_STACK unsigned FindUndemotesInTypemaps(JSContext* cx, const TypeMap& typeMap, LinkableFragment* f,
Queue<unsigned>& undemotes);
JS_REQUIRES_STACK void AssertDownFrameIsConsistent(JSContext* cx, VMSideExit* anchor, FrameInfo* fi);
JS_REQUIRES_STACK void CaptureStackTypes(JSContext* cx, unsigned callDepth, TraceType* typeMap);
bool isGlobal(jsval* p) const;
ptrdiff_t nativeGlobalSlot(jsval *p) const;
ptrdiff_t nativeGlobalOffset(jsval* p) const;
@ -1398,8 +1385,8 @@ class TraceRecorder
# include "jsopcode.tbl"
#undef OPDEF
inline void* operator new(size_t size) { return js_calloc(size); }
inline void operator delete(void *p) { js_free(p); }
inline void* operator new(size_t size) { return calloc(1, size); }
inline void operator delete(void *p) { free(p); }
JS_REQUIRES_STACK
TraceRecorder(JSContext* cx, VMSideExit*, VMFragment*,

View File

@ -44,7 +44,6 @@
#ifndef jsutil_h___
#define jsutil_h___
#include "jstypes.h"
#include <stdlib.h>
JS_BEGIN_EXTERN_C
@ -180,12 +179,6 @@ extern JS_FRIEND_API(void)
JS_DumpBacktrace(JSCallsite *trace);
#endif
#if defined JS_USE_CUSTOM_ALLOCATOR
#include "jscustomallocator.h"
#else
static JS_INLINE void* js_malloc(size_t bytes) {
if (bytes < sizeof(void*)) /* for asyncFree */
bytes = sizeof(void*);
@ -207,7 +200,6 @@ static JS_INLINE void* js_realloc(void* p, size_t bytes) {
static JS_INLINE void js_free(void* p) {
free(p);
}
#endif/* JS_USE_CUSTOM_ALLOCATOR */
JS_END_EXTERN_C