mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Backed out changeset c9faf5a2bc75
This commit is contained in:
parent
7cb366214a
commit
ec12d7736d
@ -200,7 +200,7 @@ struct JSArenaPool {
|
|||||||
if ((pool)->current == (a)) (pool)->current = &(pool)->first; \
|
if ((pool)->current == (a)) (pool)->current = &(pool)->first; \
|
||||||
*(pnext) = (a)->next; \
|
*(pnext) = (a)->next; \
|
||||||
JS_CLEAR_ARENA(a); \
|
JS_CLEAR_ARENA(a); \
|
||||||
js_free(a); \
|
free(a); \
|
||||||
(a) = NULL; \
|
(a) = NULL; \
|
||||||
JS_END_MACRO
|
JS_END_MACRO
|
||||||
|
|
||||||
|
@ -334,8 +334,6 @@ typedef HashMap<jsbytecode*,
|
|||||||
DefaultHasher<jsbytecode*>,
|
DefaultHasher<jsbytecode*>,
|
||||||
SystemAllocPolicy> RecordAttemptMap;
|
SystemAllocPolicy> RecordAttemptMap;
|
||||||
|
|
||||||
class Oracle;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not
|
* Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not
|
||||||
* JS_THREADSAFE) has an associated trace monitor that keeps track of loop
|
* JS_THREADSAFE) has an associated trace monitor that keeps track of loop
|
||||||
@ -397,7 +395,6 @@ struct TraceMonitor {
|
|||||||
nanojit::Assembler* assembler;
|
nanojit::Assembler* assembler;
|
||||||
FrameInfoCache* frameCache;
|
FrameInfoCache* frameCache;
|
||||||
|
|
||||||
Oracle* oracle;
|
|
||||||
TraceRecorder* recorder;
|
TraceRecorder* recorder;
|
||||||
|
|
||||||
GlobalState globalStates[MONITOR_N_GLOBAL_STATES];
|
GlobalState globalStates[MONITOR_N_GLOBAL_STATES];
|
||||||
|
@ -2907,6 +2907,9 @@ PreGCCleanup(JSContext *cx, JSGCInvocationKind gckind)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef JS_TRACER
|
||||||
|
PurgeJITOracle();
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reset the property cache's type id generator so we can compress ids.
|
* Reset the property cache's type id generator so we can compress ids.
|
||||||
|
@ -67,7 +67,7 @@
|
|||||||
static void *
|
static void *
|
||||||
DefaultAllocTable(void *pool, size_t size)
|
DefaultAllocTable(void *pool, size_t size)
|
||||||
{
|
{
|
||||||
return js_malloc(size);
|
return malloc(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -79,7 +79,7 @@ DefaultFreeTable(void *pool, void *item, size_t size)
|
|||||||
static JSHashEntry *
|
static JSHashEntry *
|
||||||
DefaultAllocEntry(void *pool, const void *key)
|
DefaultAllocEntry(void *pool, const void *key)
|
||||||
{
|
{
|
||||||
return (JSHashEntry*) js_malloc(sizeof(JSHashEntry));
|
return (JSHashEntry*) malloc(sizeof(JSHashEntry));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -836,7 +836,7 @@ js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
|
|||||||
static JSFatLock *
|
static JSFatLock *
|
||||||
NewFatlock()
|
NewFatlock()
|
||||||
{
|
{
|
||||||
JSFatLock *fl = (JSFatLock *)js_malloc(sizeof(JSFatLock)); /* for now */
|
JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
|
||||||
if (!fl) return NULL;
|
if (!fl) return NULL;
|
||||||
fl->susp = 0;
|
fl->susp = 0;
|
||||||
fl->next = NULL;
|
fl->next = NULL;
|
||||||
|
@ -121,8 +121,8 @@ class UpRecursiveSlotMap : public RecursiveSlotMap
|
|||||||
};
|
};
|
||||||
|
|
||||||
#if defined DEBUG
|
#if defined DEBUG
|
||||||
JS_REQUIRES_STACK void
|
static JS_REQUIRES_STACK void
|
||||||
TraceRecorder::assertDownFrameIsConsistent(VMSideExit* anchor, FrameInfo* fi)
|
AssertDownFrameIsConsistent(JSContext* cx, VMSideExit* anchor, FrameInfo* fi)
|
||||||
{
|
{
|
||||||
JS_ASSERT(anchor->recursive_down);
|
JS_ASSERT(anchor->recursive_down);
|
||||||
JS_ASSERT(anchor->recursive_down->callerHeight == fi->callerHeight);
|
JS_ASSERT(anchor->recursive_down->callerHeight == fi->callerHeight);
|
||||||
@ -130,7 +130,7 @@ TraceRecorder::assertDownFrameIsConsistent(VMSideExit* anchor, FrameInfo* fi)
|
|||||||
unsigned downPostSlots = fi->callerHeight;
|
unsigned downPostSlots = fi->callerHeight;
|
||||||
TraceType* typeMap = fi->get_typemap();
|
TraceType* typeMap = fi->get_typemap();
|
||||||
|
|
||||||
captureStackTypes(1, typeMap);
|
CaptureStackTypes(cx, 1, typeMap);
|
||||||
const TraceType* m1 = anchor->recursive_down->get_typemap();
|
const TraceType* m1 = anchor->recursive_down->get_typemap();
|
||||||
for (unsigned i = 0; i < downPostSlots; i++) {
|
for (unsigned i = 0; i < downPostSlots; i++) {
|
||||||
if (m1[i] == typeMap[i])
|
if (m1[i] == typeMap[i])
|
||||||
@ -258,7 +258,7 @@ TraceRecorder::upRecursion()
|
|||||||
* recursive functions.
|
* recursive functions.
|
||||||
*/
|
*/
|
||||||
#if defined DEBUG
|
#if defined DEBUG
|
||||||
assertDownFrameIsConsistent(anchor, fi);
|
AssertDownFrameIsConsistent(cx, anchor, fi);
|
||||||
#endif
|
#endif
|
||||||
fi = anchor->recursive_down;
|
fi = anchor->recursive_down;
|
||||||
} else if (recursive_pc != fragment->root->ip) {
|
} else if (recursive_pc != fragment->root->ip) {
|
||||||
@ -266,7 +266,7 @@ TraceRecorder::upRecursion()
|
|||||||
* Case 1: Guess that down-recursion has to started back out, infer types
|
* Case 1: Guess that down-recursion has to started back out, infer types
|
||||||
* from the down frame.
|
* from the down frame.
|
||||||
*/
|
*/
|
||||||
captureStackTypes(1, fi->get_typemap());
|
CaptureStackTypes(cx, 1, fi->get_typemap());
|
||||||
} else {
|
} else {
|
||||||
/* Case 2: Guess that up-recursion is backing out, infer types from our Tree. */
|
/* Case 2: Guess that up-recursion is backing out, infer types from our Tree. */
|
||||||
JS_ASSERT(tree->nStackTypes == downPostSlots + 1);
|
JS_ASSERT(tree->nStackTypes == downPostSlots + 1);
|
||||||
@ -491,7 +491,7 @@ TraceRecorder::slurpDownFrames(jsbytecode* return_pc)
|
|||||||
TraceType* typeMap = exit->stackTypeMap();
|
TraceType* typeMap = exit->stackTypeMap();
|
||||||
jsbytecode* oldpc = cx->fp->regs->pc;
|
jsbytecode* oldpc = cx->fp->regs->pc;
|
||||||
cx->fp->regs->pc = exit->pc;
|
cx->fp->regs->pc = exit->pc;
|
||||||
captureStackTypes(frameDepth, typeMap);
|
CaptureStackTypes(cx, frameDepth, typeMap);
|
||||||
cx->fp->regs->pc = oldpc;
|
cx->fp->regs->pc = oldpc;
|
||||||
if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) {
|
if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) {
|
||||||
JS_ASSERT_IF(*cx->fp->regs->pc != JSOP_RETURN, *cx->fp->regs->pc == JSOP_STOP);
|
JS_ASSERT_IF(*cx->fp->regs->pc != JSOP_RETURN, *cx->fp->regs->pc == JSOP_STOP);
|
||||||
|
@ -241,9 +241,9 @@ PointerRangeSize(T *begin, T *end)
|
|||||||
class SystemAllocPolicy
|
class SystemAllocPolicy
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
void *malloc(size_t bytes) { return js_malloc(bytes); }
|
void *malloc(size_t bytes) { return ::malloc(bytes); }
|
||||||
void *realloc(void *p, size_t bytes) { return js_realloc(p, bytes); }
|
void *realloc(void *p, size_t bytes) { return ::realloc(p, bytes); }
|
||||||
void free(void *p) { js_free(p); }
|
void free(void *p) { ::free(p); }
|
||||||
void reportAllocOverflow() const {}
|
void reportAllocOverflow() const {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ nanojit::Allocator::allocChunk(size_t nbytes)
|
|||||||
{
|
{
|
||||||
VMAllocator *vma = (VMAllocator*)this;
|
VMAllocator *vma = (VMAllocator*)this;
|
||||||
JS_ASSERT(!vma->outOfMemory());
|
JS_ASSERT(!vma->outOfMemory());
|
||||||
void *p = js_calloc(nbytes);
|
void *p = calloc(1, nbytes);
|
||||||
if (!p) {
|
if (!p) {
|
||||||
JS_ASSERT(nbytes < sizeof(vma->mReserve));
|
JS_ASSERT(nbytes < sizeof(vma->mReserve));
|
||||||
vma->mOutOfMemory = true;
|
vma->mOutOfMemory = true;
|
||||||
@ -121,7 +121,7 @@ void
|
|||||||
nanojit::Allocator::freeChunk(void *p) {
|
nanojit::Allocator::freeChunk(void *p) {
|
||||||
VMAllocator *vma = (VMAllocator*)this;
|
VMAllocator *vma = (VMAllocator*)this;
|
||||||
if (p != &vma->mReserve[0])
|
if (p != &vma->mReserve[0])
|
||||||
js_free(p);
|
free(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -906,6 +906,12 @@ TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3, LI
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The entire VM shares one oracle. Collisions and concurrent updates are
|
||||||
|
* tolerated and worst case cause performance regressions.
|
||||||
|
*/
|
||||||
|
static Oracle oracle;
|
||||||
|
|
||||||
Tracker::Tracker()
|
Tracker::Tracker()
|
||||||
{
|
{
|
||||||
pagelist = NULL;
|
pagelist = NULL;
|
||||||
@ -945,7 +951,7 @@ struct Tracker::TrackerPage*
|
|||||||
Tracker::addTrackerPage(const void* v)
|
Tracker::addTrackerPage(const void* v)
|
||||||
{
|
{
|
||||||
jsuword base = getTrackerPageBase(v);
|
jsuword base = getTrackerPageBase(v);
|
||||||
struct TrackerPage* p = (struct TrackerPage*) js_calloc(sizeof(*p));
|
struct TrackerPage* p = (struct TrackerPage*) calloc(1, sizeof(*p));
|
||||||
p->base = base;
|
p->base = base;
|
||||||
p->next = pagelist;
|
p->next = pagelist;
|
||||||
pagelist = p;
|
pagelist = p;
|
||||||
@ -958,7 +964,7 @@ Tracker::clear()
|
|||||||
while (pagelist) {
|
while (pagelist) {
|
||||||
TrackerPage* p = pagelist;
|
TrackerPage* p = pagelist;
|
||||||
pagelist = pagelist->next;
|
pagelist = pagelist->next;
|
||||||
js_free(p);
|
free(p);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1211,38 +1217,44 @@ Oracle::clearDemotability()
|
|||||||
_pcDontDemote.reset();
|
_pcDontDemote.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
JS_REQUIRES_STACK void
|
JS_REQUIRES_STACK static JS_INLINE void
|
||||||
TraceRecorder::markSlotUndemotable(LinkableFragment* f, unsigned slot)
|
MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot)
|
||||||
{
|
{
|
||||||
if (slot < f->nStackTypes) {
|
if (slot < f->nStackTypes) {
|
||||||
oracle->markStackSlotUndemotable(cx, slot);
|
oracle.markStackSlotUndemotable(cx, slot);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint16* gslots = f->globalSlots->data();
|
uint16* gslots = f->globalSlots->data();
|
||||||
oracle->markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
|
oracle.markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
|
||||||
}
|
}
|
||||||
|
|
||||||
JS_REQUIRES_STACK void
|
JS_REQUIRES_STACK static JS_INLINE void
|
||||||
TraceRecorder::markSlotUndemotable(LinkableFragment* f, unsigned slot, const void* pc)
|
MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* pc)
|
||||||
{
|
{
|
||||||
if (slot < f->nStackTypes) {
|
if (slot < f->nStackTypes) {
|
||||||
oracle->markStackSlotUndemotable(cx, slot, pc);
|
oracle.markStackSlotUndemotable(cx, slot, pc);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint16* gslots = f->globalSlots->data();
|
uint16* gslots = f->globalSlots->data();
|
||||||
oracle->markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
|
oracle.markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static JS_REQUIRES_STACK bool
|
static JS_REQUIRES_STACK inline bool
|
||||||
IsSlotUndemotable(Oracle* oracle, JSContext* cx, LinkableFragment* f, unsigned slot, const void* ip)
|
IsSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* ip)
|
||||||
{
|
{
|
||||||
if (slot < f->nStackTypes)
|
if (slot < f->nStackTypes)
|
||||||
return oracle->isStackSlotUndemotable(cx, slot, ip);
|
return oracle.isStackSlotUndemotable(cx, slot, ip);
|
||||||
|
|
||||||
uint16* gslots = f->globalSlots->data();
|
uint16* gslots = f->globalSlots->data();
|
||||||
return oracle->isGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
|
return oracle.isGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static JS_REQUIRES_STACK inline bool
|
||||||
|
IsSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot)
|
||||||
|
{
|
||||||
|
return IsSlotUndemotable(cx, f, slot, cx->fp->regs->pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
class FrameInfoCache
|
class FrameInfoCache
|
||||||
@ -1955,7 +1967,7 @@ public:
|
|||||||
visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
|
visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
|
||||||
TraceType type = getCoercedType(*vp);
|
TraceType type = getCoercedType(*vp);
|
||||||
if (type == TT_INT32 &&
|
if (type == TT_INT32 &&
|
||||||
JS_TRACE_MONITOR(mCx).oracle->isGlobalSlotUndemotable(mCx, slot))
|
oracle.isGlobalSlotUndemotable(mCx, slot))
|
||||||
type = TT_DOUBLE;
|
type = TT_DOUBLE;
|
||||||
JS_ASSERT(type != TT_JSVAL);
|
JS_ASSERT(type != TT_JSVAL);
|
||||||
debug_only_printf(LC_TMTracer,
|
debug_only_printf(LC_TMTracer,
|
||||||
@ -1969,7 +1981,7 @@ public:
|
|||||||
for (int i = 0; i < count; ++i) {
|
for (int i = 0; i < count; ++i) {
|
||||||
TraceType type = getCoercedType(vp[i]);
|
TraceType type = getCoercedType(vp[i]);
|
||||||
if (type == TT_INT32 &&
|
if (type == TT_INT32 &&
|
||||||
JS_TRACE_MONITOR(mCx).oracle->isStackSlotUndemotable(mCx, length()))
|
oracle.isStackSlotUndemotable(mCx, length()))
|
||||||
type = TT_DOUBLE;
|
type = TT_DOUBLE;
|
||||||
JS_ASSERT(type != TT_JSVAL);
|
JS_ASSERT(type != TT_JSVAL);
|
||||||
debug_only_printf(LC_TMTracer,
|
debug_only_printf(LC_TMTracer,
|
||||||
@ -2125,7 +2137,6 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* anchor, VMFragment* frag
|
|||||||
RecordReason recordReason)
|
RecordReason recordReason)
|
||||||
: cx(cx),
|
: cx(cx),
|
||||||
traceMonitor(&JS_TRACE_MONITOR(cx)),
|
traceMonitor(&JS_TRACE_MONITOR(cx)),
|
||||||
oracle(JS_TRACE_MONITOR(cx).oracle),
|
|
||||||
fragment(fragment),
|
fragment(fragment),
|
||||||
tree(fragment->root),
|
tree(fragment->root),
|
||||||
recordReason(recordReason),
|
recordReason(recordReason),
|
||||||
@ -2673,7 +2684,6 @@ TraceMonitor::flush()
|
|||||||
codeAlloc->reset();
|
codeAlloc->reset();
|
||||||
tempAlloc->reset();
|
tempAlloc->reset();
|
||||||
reTempAlloc->reset();
|
reTempAlloc->reset();
|
||||||
oracle->clear();
|
|
||||||
|
|
||||||
Allocator& alloc = *dataAlloc;
|
Allocator& alloc = *dataAlloc;
|
||||||
|
|
||||||
@ -3522,7 +3532,7 @@ TraceRecorder::importGlobalSlot(unsigned slot)
|
|||||||
int index = tree->globalSlots->offsetOf(uint16(slot));
|
int index = tree->globalSlots->offsetOf(uint16(slot));
|
||||||
if (index == -1) {
|
if (index == -1) {
|
||||||
type = getCoercedType(*vp);
|
type = getCoercedType(*vp);
|
||||||
if (type == TT_INT32 && oracle->isGlobalSlotUndemotable(cx, slot))
|
if (type == TT_INT32 && oracle.isGlobalSlotUndemotable(cx, slot))
|
||||||
type = TT_DOUBLE;
|
type = TT_DOUBLE;
|
||||||
index = (int)tree->globalSlots->length();
|
index = (int)tree->globalSlots->length();
|
||||||
tree->globalSlots->add(uint16(slot));
|
tree->globalSlots->add(uint16(slot));
|
||||||
@ -3753,7 +3763,7 @@ public:
|
|||||||
* Aggressively undo speculation so the inner tree will compile
|
* Aggressively undo speculation so the inner tree will compile
|
||||||
* if this fails.
|
* if this fails.
|
||||||
*/
|
*/
|
||||||
mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot);
|
oracle.markGlobalSlotUndemotable(mCx, slot);
|
||||||
}
|
}
|
||||||
JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
|
JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
|
||||||
++mTypeMap;
|
++mTypeMap;
|
||||||
@ -3797,7 +3807,7 @@ public:
|
|||||||
* Aggressively undo speculation so the inner tree will compile
|
* Aggressively undo speculation so the inner tree will compile
|
||||||
* if this fails.
|
* if this fails.
|
||||||
*/
|
*/
|
||||||
mRecorder.oracle->markStackSlotUndemotable(mCx, mSlotnum);
|
oracle.markStackSlotUndemotable(mCx, mSlotnum);
|
||||||
}
|
}
|
||||||
JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
|
JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
|
||||||
++vp;
|
++vp;
|
||||||
@ -4429,7 +4439,7 @@ class SlotMap : public SlotVisitorBase
|
|||||||
{
|
{
|
||||||
for (unsigned i = 0; i < length(); i++) {
|
for (unsigned i = 0; i < length(); i++) {
|
||||||
if (get(i).lastCheck == TypeCheck_Undemote)
|
if (get(i).lastCheck == TypeCheck_Undemote)
|
||||||
mRecorder.markSlotUndemotable(mRecorder.tree, i);
|
MarkSlotUndemotable(mRecorder.cx, mRecorder.tree, i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4754,7 +4764,7 @@ TypeMapLinkability(JSContext* cx, const TypeMap& typeMap, TreeFragment* peer)
|
|||||||
if (typeMap[i] == peerMap[i])
|
if (typeMap[i] == peerMap[i])
|
||||||
continue;
|
continue;
|
||||||
if (typeMap[i] == TT_INT32 && peerMap[i] == TT_DOUBLE &&
|
if (typeMap[i] == TT_INT32 && peerMap[i] == TT_DOUBLE &&
|
||||||
IsSlotUndemotable(JS_TRACE_MONITOR(cx).oracle, cx, peer, i, peer->ip)) {
|
IsSlotUndemotable(cx, peer, i, peer->ip)) {
|
||||||
consensus = TypeConsensus_Undemotes;
|
consensus = TypeConsensus_Undemotes;
|
||||||
} else {
|
} else {
|
||||||
return TypeConsensus_Bad;
|
return TypeConsensus_Bad;
|
||||||
@ -4763,8 +4773,8 @@ TypeMapLinkability(JSContext* cx, const TypeMap& typeMap, TreeFragment* peer)
|
|||||||
return consensus;
|
return consensus;
|
||||||
}
|
}
|
||||||
|
|
||||||
JS_REQUIRES_STACK unsigned
|
static JS_REQUIRES_STACK unsigned
|
||||||
TraceRecorder::findUndemotesInTypemaps(const TypeMap& typeMap, LinkableFragment* f,
|
FindUndemotesInTypemaps(JSContext* cx, const TypeMap& typeMap, LinkableFragment* f,
|
||||||
Queue<unsigned>& undemotes)
|
Queue<unsigned>& undemotes)
|
||||||
{
|
{
|
||||||
undemotes.setLength(0);
|
undemotes.setLength(0);
|
||||||
@ -4777,7 +4787,7 @@ TraceRecorder::findUndemotesInTypemaps(const TypeMap& typeMap, LinkableFragment*
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (unsigned i = 0; i < undemotes.length(); i++)
|
for (unsigned i = 0; i < undemotes.length(); i++)
|
||||||
markSlotUndemotable(f, undemotes[i]);
|
MarkSlotUndemotable(cx, f, undemotes[i]);
|
||||||
return undemotes.length();
|
return undemotes.length();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4826,7 +4836,7 @@ TraceRecorder::joinEdgesToEntry(TreeFragment* peer_root)
|
|||||||
uexit = peer->removeUnstableExit(uexit->exit);
|
uexit = peer->removeUnstableExit(uexit->exit);
|
||||||
} else {
|
} else {
|
||||||
/* Check for int32->double slots that suggest trashing. */
|
/* Check for int32->double slots that suggest trashing. */
|
||||||
if (findUndemotesInTypemaps(typeMap, tree, undemotes)) {
|
if (FindUndemotesInTypemaps(cx, typeMap, tree, undemotes)) {
|
||||||
JS_ASSERT(peer == uexit->fragment->root);
|
JS_ASSERT(peer == uexit->fragment->root);
|
||||||
if (fragment == peer)
|
if (fragment == peer)
|
||||||
trashSelf = true;
|
trashSelf = true;
|
||||||
@ -5673,7 +5683,6 @@ FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, TreeFragment** peerp)
|
|||||||
TreeFragment* from = exit->root();
|
TreeFragment* from = exit->root();
|
||||||
|
|
||||||
JS_ASSERT(from->code());
|
JS_ASSERT(from->code());
|
||||||
Oracle* oracle = JS_TRACE_MONITOR(cx).oracle;
|
|
||||||
|
|
||||||
TypeMap typeMap(NULL);
|
TypeMap typeMap(NULL);
|
||||||
FullMapFromExit(typeMap, exit);
|
FullMapFromExit(typeMap, exit);
|
||||||
@ -5685,14 +5694,14 @@ FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, TreeFragment** peerp)
|
|||||||
if (typeMap[i] == TT_DOUBLE) {
|
if (typeMap[i] == TT_DOUBLE) {
|
||||||
if (exit->exitType == RECURSIVE_UNLINKED_EXIT) {
|
if (exit->exitType == RECURSIVE_UNLINKED_EXIT) {
|
||||||
if (i < exit->numStackSlots)
|
if (i < exit->numStackSlots)
|
||||||
oracle->markStackSlotUndemotable(cx, i, exit->recursive_pc);
|
oracle.markStackSlotUndemotable(cx, i, exit->recursive_pc);
|
||||||
else
|
else
|
||||||
oracle->markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]);
|
oracle.markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]);
|
||||||
}
|
}
|
||||||
if (i < from->nStackTypes)
|
if (i < from->nStackTypes)
|
||||||
oracle->markStackSlotUndemotable(cx, i, from->ip);
|
oracle.markStackSlotUndemotable(cx, i, from->ip);
|
||||||
else if (i >= exit->numStackSlots)
|
else if (i >= exit->numStackSlots)
|
||||||
oracle->markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]);
|
oracle.markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6060,7 +6069,7 @@ TraceRecorder::attemptTreeCall(TreeFragment* f, uintN& inlineCallCount)
|
|||||||
}
|
}
|
||||||
|
|
||||||
case OVERFLOW_EXIT:
|
case OVERFLOW_EXIT:
|
||||||
oracle->markInstructionUndemotable(cx->fp->regs->pc);
|
oracle.markInstructionUndemotable(cx->fp->regs->pc);
|
||||||
/* FALL THROUGH */
|
/* FALL THROUGH */
|
||||||
case RECURSIVE_SLURP_FAIL_EXIT:
|
case RECURSIVE_SLURP_FAIL_EXIT:
|
||||||
case RECURSIVE_SLURP_MISMATCH_EXIT:
|
case RECURSIVE_SLURP_MISMATCH_EXIT:
|
||||||
@ -6168,10 +6177,10 @@ public:
|
|||||||
if (!IsEntryTypeCompatible(vp, mTypeMap)) {
|
if (!IsEntryTypeCompatible(vp, mTypeMap)) {
|
||||||
mOk = false;
|
mOk = false;
|
||||||
} else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) {
|
} else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) {
|
||||||
mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot);
|
oracle.markGlobalSlotUndemotable(mCx, slot);
|
||||||
mOk = false;
|
mOk = false;
|
||||||
} else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) {
|
} else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) {
|
||||||
mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot);
|
oracle.markGlobalSlotUndemotable(mCx, slot);
|
||||||
}
|
}
|
||||||
mTypeMap++;
|
mTypeMap++;
|
||||||
}
|
}
|
||||||
@ -6183,10 +6192,10 @@ public:
|
|||||||
if (!IsEntryTypeCompatible(vp, mTypeMap)) {
|
if (!IsEntryTypeCompatible(vp, mTypeMap)) {
|
||||||
mOk = false;
|
mOk = false;
|
||||||
} else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) {
|
} else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) {
|
||||||
mRecorder.oracle->markStackSlotUndemotable(mCx, mStackSlotNum);
|
oracle.markStackSlotUndemotable(mCx, mStackSlotNum);
|
||||||
mOk = false;
|
mOk = false;
|
||||||
} else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) {
|
} else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) {
|
||||||
mRecorder.oracle->markStackSlotUndemotable(mCx, mStackSlotNum);
|
oracle.markStackSlotUndemotable(mCx, mStackSlotNum);
|
||||||
}
|
}
|
||||||
vp++;
|
vp++;
|
||||||
mTypeMap++;
|
mTypeMap++;
|
||||||
@ -7016,7 +7025,7 @@ MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount, RecordReason reason)
|
|||||||
return rv;
|
return rv;
|
||||||
|
|
||||||
case OVERFLOW_EXIT:
|
case OVERFLOW_EXIT:
|
||||||
tm->oracle->markInstructionUndemotable(cx->fp->regs->pc);
|
oracle.markInstructionUndemotable(cx->fp->regs->pc);
|
||||||
/* FALL THROUGH */
|
/* FALL THROUGH */
|
||||||
case RECURSIVE_SLURP_FAIL_EXIT:
|
case RECURSIVE_SLURP_FAIL_EXIT:
|
||||||
case RECURSIVE_SLURP_MISMATCH_EXIT:
|
case RECURSIVE_SLURP_MISMATCH_EXIT:
|
||||||
@ -7457,8 +7466,6 @@ InitJIT(TraceMonitor *tm)
|
|||||||
/* Set the default size for the code cache to 16MB. */
|
/* Set the default size for the code cache to 16MB. */
|
||||||
tm->maxCodeCacheBytes = 16 M;
|
tm->maxCodeCacheBytes = 16 M;
|
||||||
|
|
||||||
tm->oracle = new Oracle();
|
|
||||||
|
|
||||||
tm->recordAttempts = new RecordAttemptMap;
|
tm->recordAttempts = new RecordAttemptMap;
|
||||||
if (!tm->recordAttempts->init(PC_HASH_COUNT))
|
if (!tm->recordAttempts->init(PC_HASH_COUNT))
|
||||||
abort();
|
abort();
|
||||||
@ -7541,7 +7548,6 @@ FinishJIT(TraceMonitor *tm)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
delete tm->recordAttempts;
|
delete tm->recordAttempts;
|
||||||
delete tm->oracle;
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
// Recover profiling data from expiring Fragments, and display
|
// Recover profiling data from expiring Fragments, and display
|
||||||
@ -7613,6 +7619,12 @@ FinishJIT(TraceMonitor *tm)
|
|||||||
tm->cachedTempTypeMap = NULL;
|
tm->cachedTempTypeMap = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
PurgeJITOracle()
|
||||||
|
{
|
||||||
|
oracle.clear();
|
||||||
|
}
|
||||||
|
|
||||||
JS_REQUIRES_STACK void
|
JS_REQUIRES_STACK void
|
||||||
PurgeScriptFragments(JSContext* cx, JSScript* script)
|
PurgeScriptFragments(JSContext* cx, JSScript* script)
|
||||||
{
|
{
|
||||||
@ -8070,7 +8082,7 @@ TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1)
|
|||||||
* integers and the oracle must not give us a negative hint for the
|
* integers and the oracle must not give us a negative hint for the
|
||||||
* instruction.
|
* instruction.
|
||||||
*/
|
*/
|
||||||
if (oracle->isInstructionUndemotable(cx->fp->regs->pc) || !isPromoteInt(s0) || !isPromoteInt(s1)) {
|
if (oracle.isInstructionUndemotable(cx->fp->regs->pc) || !isPromoteInt(s0) || !isPromoteInt(s1)) {
|
||||||
out:
|
out:
|
||||||
if (v == LIR_fmod) {
|
if (v == LIR_fmod) {
|
||||||
LIns* args[] = { s1, s0 };
|
LIns* args[] = { s1, s0 };
|
||||||
@ -10230,7 +10242,7 @@ TraceRecorder::record_JSOP_NEG()
|
|||||||
* a double. Only follow this path if we're not an integer that's 0 and
|
* a double. Only follow this path if we're not an integer that's 0 and
|
||||||
* we're not a double that's zero.
|
* we're not a double that's zero.
|
||||||
*/
|
*/
|
||||||
if (!oracle->isInstructionUndemotable(cx->fp->regs->pc) &&
|
if (!oracle.isInstructionUndemotable(cx->fp->regs->pc) &&
|
||||||
isPromoteInt(a) &&
|
isPromoteInt(a) &&
|
||||||
(!JSVAL_IS_INT(v) || JSVAL_TO_INT(v) != 0) &&
|
(!JSVAL_IS_INT(v) || JSVAL_TO_INT(v) != 0) &&
|
||||||
(!JSVAL_IS_DOUBLE(v) || !JSDOUBLE_IS_NEGZERO(*JSVAL_TO_DOUBLE(v))) &&
|
(!JSVAL_IS_DOUBLE(v) || !JSDOUBLE_IS_NEGZERO(*JSVAL_TO_DOUBLE(v))) &&
|
||||||
@ -15300,7 +15312,7 @@ StopTraceVisNative(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval
|
|||||||
#endif /* MOZ_TRACEVIS */
|
#endif /* MOZ_TRACEVIS */
|
||||||
|
|
||||||
JS_REQUIRES_STACK void
|
JS_REQUIRES_STACK void
|
||||||
TraceRecorder::captureStackTypes(unsigned callDepth, TraceType* typeMap)
|
CaptureStackTypes(JSContext* cx, unsigned callDepth, TraceType* typeMap)
|
||||||
{
|
{
|
||||||
CaptureTypesVisitor capVisitor(cx, typeMap);
|
CaptureTypesVisitor capVisitor(cx, typeMap);
|
||||||
VisitStackSlots(capVisitor, cx, callDepth);
|
VisitStackSlots(capVisitor, cx, callDepth);
|
||||||
|
@ -78,7 +78,7 @@ public:
|
|||||||
memcpy(tmp, _data, _len * sizeof(T));
|
memcpy(tmp, _data, _len * sizeof(T));
|
||||||
_data = tmp;
|
_data = tmp;
|
||||||
} else {
|
} else {
|
||||||
_data = (T*)js_realloc(_data, _max * sizeof(T));
|
_data = (T*)realloc(_data, _max * sizeof(T));
|
||||||
}
|
}
|
||||||
#if defined(DEBUG)
|
#if defined(DEBUG)
|
||||||
memset(&_data[_len], 0xcd, _max - _len);
|
memset(&_data[_len], 0xcd, _max - _len);
|
||||||
@ -95,7 +95,7 @@ public:
|
|||||||
|
|
||||||
~Queue() {
|
~Queue() {
|
||||||
if (!alloc)
|
if (!alloc)
|
||||||
js_free(_data);
|
free(_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool contains(T a) {
|
bool contains(T a) {
|
||||||
@ -926,9 +926,6 @@ class TraceRecorder
|
|||||||
/* Cached value of JS_TRACE_MONITOR(cx). */
|
/* Cached value of JS_TRACE_MONITOR(cx). */
|
||||||
TraceMonitor* const traceMonitor;
|
TraceMonitor* const traceMonitor;
|
||||||
|
|
||||||
/* Cached oracle keeps track of hit counts for program counter locations */
|
|
||||||
Oracle* oracle;
|
|
||||||
|
|
||||||
/* The Fragment being recorded by this recording session. */
|
/* The Fragment being recorded by this recording session. */
|
||||||
VMFragment* const fragment;
|
VMFragment* const fragment;
|
||||||
|
|
||||||
@ -1068,17 +1065,6 @@ class TraceRecorder
|
|||||||
*/
|
*/
|
||||||
JS_REQUIRES_STACK nanojit::GuardRecord* createGuardRecord(VMSideExit* exit);
|
JS_REQUIRES_STACK nanojit::GuardRecord* createGuardRecord(VMSideExit* exit);
|
||||||
|
|
||||||
JS_REQUIRES_STACK JS_INLINE void markSlotUndemotable(LinkableFragment* f, unsigned slot);
|
|
||||||
|
|
||||||
JS_REQUIRES_STACK JS_INLINE void markSlotUndemotable(LinkableFragment* f, unsigned slot, const void* pc);
|
|
||||||
|
|
||||||
JS_REQUIRES_STACK unsigned findUndemotesInTypemaps(const TypeMap& typeMap, LinkableFragment* f,
|
|
||||||
Queue<unsigned>& undemotes);
|
|
||||||
|
|
||||||
JS_REQUIRES_STACK void assertDownFrameIsConsistent(VMSideExit* anchor, FrameInfo* fi);
|
|
||||||
|
|
||||||
JS_REQUIRES_STACK void captureStackTypes(unsigned callDepth, TraceType* typeMap);
|
|
||||||
|
|
||||||
bool isGlobal(jsval* p) const;
|
bool isGlobal(jsval* p) const;
|
||||||
ptrdiff_t nativeGlobalSlot(jsval *p) const;
|
ptrdiff_t nativeGlobalSlot(jsval *p) const;
|
||||||
ptrdiff_t nativeGlobalOffset(jsval* p) const;
|
ptrdiff_t nativeGlobalOffset(jsval* p) const;
|
||||||
@ -1394,8 +1380,8 @@ class TraceRecorder
|
|||||||
# include "jsopcode.tbl"
|
# include "jsopcode.tbl"
|
||||||
#undef OPDEF
|
#undef OPDEF
|
||||||
|
|
||||||
inline void* operator new(size_t size) { return js_calloc(size); }
|
inline void* operator new(size_t size) { return calloc(1, size); }
|
||||||
inline void operator delete(void *p) { js_free(p); }
|
inline void operator delete(void *p) { free(p); }
|
||||||
|
|
||||||
JS_REQUIRES_STACK
|
JS_REQUIRES_STACK
|
||||||
TraceRecorder(JSContext* cx, VMSideExit*, VMFragment*,
|
TraceRecorder(JSContext* cx, VMSideExit*, VMFragment*,
|
||||||
|
@ -44,7 +44,6 @@
|
|||||||
#ifndef jsutil_h___
|
#ifndef jsutil_h___
|
||||||
#define jsutil_h___
|
#define jsutil_h___
|
||||||
|
|
||||||
#include "jstypes.h"
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
JS_BEGIN_EXTERN_C
|
JS_BEGIN_EXTERN_C
|
||||||
@ -183,12 +182,6 @@ extern JS_FRIEND_API(void)
|
|||||||
JS_DumpBacktrace(JSCallsite *trace);
|
JS_DumpBacktrace(JSCallsite *trace);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined JS_USE_CUSTOM_ALLOCATOR
|
|
||||||
|
|
||||||
#include "jscustomallocator.h"
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
static JS_INLINE void* js_malloc(size_t bytes) {
|
static JS_INLINE void* js_malloc(size_t bytes) {
|
||||||
if (bytes < sizeof(void*)) /* for asyncFree */
|
if (bytes < sizeof(void*)) /* for asyncFree */
|
||||||
bytes = sizeof(void*);
|
bytes = sizeof(void*);
|
||||||
@ -210,7 +203,6 @@ static JS_INLINE void* js_realloc(void* p, size_t bytes) {
|
|||||||
static JS_INLINE void js_free(void* p) {
|
static JS_INLINE void js_free(void* p) {
|
||||||
free(p);
|
free(p);
|
||||||
}
|
}
|
||||||
#endif/* JS_USE_CUSTOM_ALLOCATOR */
|
|
||||||
|
|
||||||
JS_END_EXTERN_C
|
JS_END_EXTERN_C
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user