Merge tracemonkey to mozilla-central.

This commit is contained in:
Robert Sayre 2009-01-27 13:40:39 -08:00
commit 103d02661d
20 changed files with 935 additions and 496 deletions

View File

@ -2595,6 +2595,31 @@ JS_SetGCParameter(JSRuntime *rt, JSGCParamKey key, uint32 value)
case JSGC_STACKPOOL_LIFESPAN:
rt->gcEmptyArenaPoolLifespan = value;
break;
default:
JS_ASSERT(key == JSGC_TRIGGER_FACTOR);
JS_ASSERT(value >= 100);
rt->gcTriggerFactor = value;
return;
}
}
JS_PUBLIC_API(uint32)
JS_GetGCParameter(JSRuntime *rt, JSGCParamKey key)
{
switch (key) {
case JSGC_MAX_BYTES:
return rt->gcMaxBytes;
case JSGC_MAX_MALLOC_BYTES:
return rt->gcMaxMallocBytes;
case JSGC_STACKPOOL_LIFESPAN:
return rt->gcEmptyArenaPoolLifespan;
case JSGC_TRIGGER_FACTOR:
return rt->gcTriggerFactor;
case JSGC_BYTES:
return rt->gcBytes;
default:
JS_ASSERT(key == JSGC_NUMBER);
return rt->gcNumber;
}
}

View File

@ -1137,12 +1137,31 @@ typedef enum JSGCParamKey {
JSGC_MAX_MALLOC_BYTES = 1,
/* Hoard stackPools for this long, in ms, default is 30 seconds. */
JSGC_STACKPOOL_LIFESPAN = 2
JSGC_STACKPOOL_LIFESPAN = 2,
/*
* The factor that defines when the GC is invoked. The factor is a
* percent of the memory allocated by the GC after the last run of
* the GC. When the current memory allocated by the GC is more than
* this percent then the GC is invoked. The factor cannot be less
* than 100 since the current memory allocated by the GC cannot be less
* than the memory allocated after the last run of the GC.
*/
JSGC_TRIGGER_FACTOR = 3,
/* Amount of bytes allocated by the GC. */
JSGC_BYTES = 4,
/* Number of times when GC was invoked. */
JSGC_NUMBER = 5
} JSGCParamKey;
extern JS_PUBLIC_API(void)
JS_SetGCParameter(JSRuntime *rt, JSGCParamKey key, uint32 value);
extern JS_PUBLIC_API(uint32)
JS_GetGCParameter(JSRuntime *rt, JSGCParamKey key);
/*
* Add a finalizer for external strings created by JS_NewExternalString (see
* below) using a type-code returned from this function, and that understands

View File

@ -580,7 +580,9 @@ array_length_setter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
}
if (OBJ_IS_DENSE_ARRAY(cx, obj)) {
if (ARRAY_DENSE_LENGTH(obj) && !ResizeSlots(cx, obj, oldlen, newlen))
/* Don't reallocate if we're not actually shrinking our slots. */
jsuint oldsize = ARRAY_DENSE_LENGTH(obj);
if (oldsize >= newlen && !ResizeSlots(cx, obj, oldsize, newlen))
return JS_FALSE;
} else if (oldlen - newlen < (1 << 24)) {
do {

View File

@ -255,27 +255,27 @@ struct JSTraceableNative {
#define _JS_TN_INIT_HELPER_2(linkage, rt, op, at0, at1, cse, fold) \
&_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at0) _JS_CTYPE_ACH(at1), \
_JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_3(linkage, rt, op, at0, at1, at2, cse, fold) \
&_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at0) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at2), \
_JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_4(linkage, rt, op, at0, at1, at2, at3, cse, fold) \
&_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at3) _JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at0) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at3), \
_JS_CTYPE_ACH(at3) _JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_5(linkage, rt, op, at0, at1, at2, at3, at4, cse, fold) \
&_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at4) _JS_CTYPE_PCH(at3) _JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) \
_JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at0) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at3) \
_JS_CTYPE_ACH(at4), \
_JS_CTYPE_ACH(at4) _JS_CTYPE_ACH(at3) _JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) \
_JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt)
#define JS_DEFINE_TRCINFO_1(name, tn0) \

View File

@ -102,13 +102,15 @@ namespace nanojit {
class TraceRecorder;
extern "C++" { template<typename T> class Queue; }
typedef Queue<uint16> SlotList;
class TypeMap;
# define CLS(T) T*
#else
# define CLS(T) void*
#endif
#define FRAGMENT_TABLE_SIZE 512
struct VMFragment;
/*
* Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not
* JS_THREADSAFE) has an associated trace monitor that keeps track of loop
@ -127,10 +129,11 @@ typedef struct JSTraceMonitor {
CLS(TraceRecorder) recorder;
uint32 globalShape;
CLS(SlotList) globalSlots;
CLS(TypeMap) globalTypeMap;
jsval *reservedDoublePool;
jsval *reservedDoublePoolPtr;
struct VMFragment* vmfragments[FRAGMENT_TABLE_SIZE];
/*
* reservedObjects is a linked list (via fslots[0]) of preallocated JSObjects.
* The JIT uses this to ensure that leaving a trace tree can't fail.
@ -267,6 +270,7 @@ struct JSRuntime {
uint32 gcLevel;
uint32 gcNumber;
JSTracer *gcMarkingTracer;
uint32 gcTriggerFactor;
/*
* NB: do not pack another flag here by claiming gcPadding unless the new
@ -924,6 +928,15 @@ struct JSContext {
#endif
#ifdef __cplusplus
static inline JSAtom **
FrameAtomBase(JSContext *cx, JSStackFrame *fp)
{
return fp->imacpc
? COMMON_ATOMS_START(&cx->runtime->atomState)
: fp->script->atomMap.vector;
}
/* FIXME(bug 332648): Move this into a public header. */
class JSAutoTempValueRooter
{
@ -971,7 +984,8 @@ class JSAutoResolveFlags
JSContext *mContext;
uintN mSaved;
};
#endif
#endif /* __cpluscplus */
/*
* Slightly more readable macros for testing per-context option settings (also

View File

@ -1253,6 +1253,18 @@ js_InitGC(JSRuntime *rt, uint32 maxbytes)
rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
rt->gcEmptyArenaPoolLifespan = 30000;
/*
* By default the trigger factor gets maximum possible value. This
* means that GC will not be triggered by growth of GC memory (gcBytes).
*/
rt->gcTriggerFactor = (uint32) -1;
/*
* The assigned value prevents GC from running when GC memory is too low
* (during JS engine start).
*/
rt->gcLastBytes = 8192;
METER(memset(&rt->gcStats, 0, sizeof rt->gcStats));
return JS_TRUE;
}
@ -1757,6 +1769,17 @@ EnsureLocalFreeList(JSContext *cx)
#endif
static JS_INLINE JSBool
IsGCThresholdReached(JSRuntime *rt)
{
/*
* Since the initial value of the gcLastBytes parameter is not equal to
* zero (see the js_InitGC function) the return value is false when
* the gcBytes value is close to zero at the JS engine start.
*/
return rt->gcBytes / rt->gcTriggerFactor >= rt->gcLastBytes / 100;
}
void *
js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes)
{
@ -1823,7 +1846,8 @@ js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes)
return NULL;
}
doGC = (rt->gcMallocBytes >= rt->gcMaxMallocBytes && rt->gcPoke);
doGC = (rt->gcMallocBytes >= rt->gcMaxMallocBytes && rt->gcPoke) ||
IsGCThresholdReached(rt);
#ifdef JS_GC_ZEAL
doGC = doGC || rt->gcZeal >= 2 || (rt->gcZeal >= 1 && rt->gcPoke);
# ifdef JS_TRACER
@ -2056,9 +2080,10 @@ RefillDoubleFreeList(JSContext *cx)
return NULL;
}
if (rt->gcMallocBytes >= rt->gcMaxMallocBytes && rt->gcPoke
if ((rt->gcMallocBytes >= rt->gcMaxMallocBytes && rt->gcPoke) ||
IsGCThresholdReached(rt)
#ifdef JS_GC_ZEAL
&& (rt->gcZeal >= 2 || (rt->gcZeal >= 1 && rt->gcPoke))
|| rt->gcZeal >= 2 || (rt->gcZeal >= 1 && rt->gcPoke)
#endif
) {
goto do_gc;
@ -2257,7 +2282,8 @@ js_AddAsGCBytes(JSContext *cx, size_t sz)
rt = cx->runtime;
if (rt->gcBytes >= rt->gcMaxBytes ||
sz > (size_t) (rt->gcMaxBytes - rt->gcBytes)
sz > (size_t) (rt->gcMaxBytes - rt->gcBytes) ||
IsGCThresholdReached(rt)
#ifdef JS_GC_ZEAL
|| rt->gcZeal >= 2 || (rt->gcZeal >= 1 && rt->gcPoke)
#endif

View File

@ -241,9 +241,7 @@ js_FillPropertyCache(JSContext *cx, JSObject *obj, jsuword kshape,
}
/* If getting a value via a stub getter, we can cache the slot. */
if (!(cs->format & JOF_SET) &&
!((cs->format & (JOF_INCDEC | JOF_FOR)) &&
(sprop->attrs & JSPROP_READONLY)) &&
if (!(cs->format & (JOF_SET | JOF_INCDEC | JOF_FOR)) &&
SPROP_HAS_STUB_GETTER(sprop) &&
SPROP_HAS_VALID_SLOT(sprop, scope)) {
/* Great, let's cache sprop's slot and use it on cache hit. */
@ -2647,9 +2645,7 @@ js_Interpret(JSContext *cx)
} \
fp = cx->fp; \
script = fp->script; \
atoms = fp->imacpc \
? COMMON_ATOMS_START(&rt->atomState) \
: script->atomMap.vector; \
atoms = FrameAtomBase(cx, fp); \
currentVersion = (JSVersion) script->version; \
JS_ASSERT(fp->regs == &regs); \
if (cx->throwing) \
@ -3056,9 +3052,7 @@ js_Interpret(JSContext *cx)
/* Restore the calling script's interpreter registers. */
script = fp->script;
atoms = fp->imacpc
? COMMON_ATOMS_START(&rt->atomState)
: script->atomMap.vector;
atoms = FrameAtomBase(cx, fp);
/* Resume execution in the calling frame. */
inlineCallCount--;
@ -6880,6 +6874,17 @@ js_Interpret(JSContext *cx)
}
JS_ASSERT((size_t)(regs.pc - script->code) < script->length);
#ifdef JS_TRACER
/*
* This abort could be weakened to permit tracing through exceptions that
* are thrown and caught within a loop, with the co-operation of the tracer.
* For now just bail on any sign of trouble.
*/
if (TRACE_RECORDER(cx))
js_AbortRecording(cx, "error or exception while recording");
#endif
if (!cx->throwing) {
/* This is an error, not a catchable exception, quit the frame ASAP. */
ok = JS_FALSE;

View File

@ -5432,7 +5432,7 @@ js_GetWrappedObject(JSContext *cx, JSObject *obj)
return obj;
}
#if DEBUG
#ifdef DEBUG
/*
* Routines to print out values during debugging. These are FRIEND_API to help
@ -5602,8 +5602,12 @@ js_DumpObject(JSObject *obj)
sharesScope = (scope->object != obj);
if (sharesScope) {
fprintf(stderr, "no own properties - see proto (%s at %p)\n",
STOBJ_GET_CLASS(proto)->name, proto);
if (proto) {
fprintf(stderr, "no own properties - see proto (%s at %p)\n",
STOBJ_GET_CLASS(proto)->name, proto);
} else {
fprintf(stderr, "no own properties - null proto\n");
}
} else {
fprintf(stderr, "properties:\n");
for (JSScopeProperty *sprop = SCOPE_LAST_PROP(scope); sprop;

View File

@ -1447,8 +1447,11 @@ Statements(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
tt = js_PeekToken(cx, ts);
ts->flags &= ~TSF_OPERAND;
if (tt <= TOK_EOF || tt == TOK_RC) {
if (tt == TOK_ERROR)
if (tt == TOK_ERROR) {
if (ts->flags & TSF_EOF)
ts->flags |= TSF_UNEXPECTED_EOF;
return NULL;
}
break;
}
pn2 = Statement(cx, ts, tc);

File diff suppressed because it is too large Load Diff

View File

@ -66,6 +66,9 @@ class Queue : public avmplus::GCObject {
while (_max < size)
_max <<= 1;
_data = (T*)realloc(_data, _max * sizeof(T));
#if defined(DEBUG)
memset(&_data[_len], 0xcd, _max - _len);
#endif
}
public:
Queue(unsigned max = 16) {
@ -159,30 +162,46 @@ extern bool js_verboseDebug;
#endif
/*
* The oracle keeps track of slots that should not be demoted to int because we know them
* to overflow or they result in type-unstable traces. We are using a simple hash table.
* Collisions lead to loss of optimization (demotable slots are not demoted) but have no
* correctness implications.
* The oracle keeps track of hit counts for program counter locations, as
* well as slots that should not be demoted to int because we know them to
* overflow or they result in type-unstable traces. We are using simple
* hash tables. Collisions lead to loss of optimization (demotable slots
* are not demoted, etc.) but have no correctness implications.
*/
#define ORACLE_SIZE 4096
class Oracle {
uint32_t hits[ORACLE_SIZE];
uint32_t blacklistLevels[ORACLE_SIZE];
avmplus::BitSet _stackDontDemote;
avmplus::BitSet _globalDontDemote;
public:
Oracle();
int32_t hit(const void* ip);
int32_t getHits(const void* ip);
void resetHits(const void* ip);
void blacklist(const void* ip);
JS_REQUIRES_STACK void markGlobalSlotUndemotable(JSContext* cx, unsigned slot);
JS_REQUIRES_STACK bool isGlobalSlotUndemotable(JSContext* cx, unsigned slot) const;
JS_REQUIRES_STACK void markStackSlotUndemotable(JSContext* cx, unsigned slot);
JS_REQUIRES_STACK bool isStackSlotUndemotable(JSContext* cx, unsigned slot) const;
void clear();
void clearHitCounts();
void clearDemotability();
void clear() {
clearDemotability();
clearHitCounts();
}
};
typedef Queue<uint16> SlotList;
class TypeMap : public Queue<uint8> {
public:
JS_REQUIRES_STACK void captureGlobalTypes(JSContext* cx, SlotList& slots);
JS_REQUIRES_STACK void captureStackTypes(JSContext* cx, unsigned callDepth);
JS_REQUIRES_STACK void captureTypes(JSContext* cx, SlotList& slots, unsigned callDepth);
JS_REQUIRES_STACK void captureMissingGlobalTypes(JSContext* cx,
SlotList& slots,
unsigned stackSlots);
bool matches(TypeMap& other) const;
};
@ -210,11 +229,21 @@ struct VMSideExit : public nanojit::SideExit
ExitType exitType;
};
static inline uint8* getTypeMap(nanojit::SideExit* exit)
static inline uint8* getStackTypeMap(nanojit::SideExit* exit)
{
return (uint8*)(((VMSideExit*)exit) + 1);
}
static inline uint8* getGlobalTypeMap(nanojit::SideExit* exit)
{
return getStackTypeMap(exit) + ((VMSideExit*)exit)->numStackSlots;
}
static inline uint8* getFullTypeMap(nanojit::SideExit* exit)
{
return getStackTypeMap(exit);
}
struct InterpState
{
void* sp; /* native stack pointer, stack[0] is spbase[0] */
@ -227,6 +256,7 @@ struct InterpState
VMSideExit* lastTreeCallGuard; /* guard we want to grow from if the tree
call exit guard mismatched */
void* rpAtLastTreeCall; /* value of rp at innermost tree call guard */
JSObject* globalObj; /* pointer to the global object */
};
struct UnstableExit
@ -243,7 +273,8 @@ public:
unsigned maxNativeStackSlots;
ptrdiff_t nativeStackBase;
unsigned maxCallDepth;
TypeMap stackTypeMap;
TypeMap typeMap;
unsigned stackSlots;
Queue<nanojit::Fragment*> dependentTrees;
unsigned branchCount;
Queue<VMSideExit*> sideExits;
@ -253,6 +284,16 @@ public:
fragment = _fragment;
}
~TreeInfo();
inline unsigned globalSlots() {
return typeMap.length() - stackSlots;
}
inline uint8* globalTypeMap() {
return typeMap.data() + stackSlots;
}
inline uint8* stackTypeMap() {
return typeMap.data();
}
};
struct FrameInfo {
@ -300,6 +341,7 @@ class TraceRecorder : public avmplus::GCObject {
nanojit::LIns* gp_ins;
nanojit::LIns* eos_ins;
nanojit::LIns* eor_ins;
nanojit::LIns* globalObj_ins;
nanojit::LIns* rval_ins;
nanojit::LIns* inner_sp_ins;
bool deepAborted;
@ -311,7 +353,6 @@ class TraceRecorder : public avmplus::GCObject {
bool terminate;
intptr_t terminate_ip_adj;
nanojit::Fragment* outerToBlacklist;
nanojit::Fragment* promotedPeer;
TraceRecorder* nextRecorderToAbort;
bool wasRootFragment;
@ -320,8 +361,8 @@ class TraceRecorder : public avmplus::GCObject {
JS_REQUIRES_STACK ptrdiff_t nativeStackOffset(jsval* p) const;
JS_REQUIRES_STACK void import(nanojit::LIns* base, ptrdiff_t offset, jsval* p, uint8& t,
const char *prefix, uintN index, JSStackFrame *fp);
JS_REQUIRES_STACK void import(TreeInfo* treeInfo, nanojit::LIns* sp, unsigned ngslots,
unsigned callDepth, uint8* globalTypeMap, uint8* stackTypeMap);
JS_REQUIRES_STACK void import(TreeInfo* treeInfo, nanojit::LIns* sp, unsigned stackSlots,
unsigned callDepth, unsigned ngslots, uint8* typeMap);
void trackNativeStackUse(unsigned slots);
JS_REQUIRES_STACK bool lazilyImportGlobalSlot(unsigned slot);
@ -339,7 +380,8 @@ class TraceRecorder : public avmplus::GCObject {
JS_REQUIRES_STACK bool checkType(jsval& v, uint8 t, jsval*& stage_val,
nanojit::LIns*& stage_ins, unsigned& stage_count);
JS_REQUIRES_STACK bool deduceTypeStability(nanojit::Fragment* root_peer,
nanojit::Fragment** stable_peer, unsigned* demotes);
nanojit::Fragment** stable_peer,
bool& demote);
JS_REQUIRES_STACK jsval& argval(unsigned n) const;
JS_REQUIRES_STACK jsval& varval(unsigned n) const;
@ -439,12 +481,12 @@ class TraceRecorder : public avmplus::GCObject {
JS_REQUIRES_STACK void fuseIf(jsbytecode* pc, bool cond, nanojit::LIns* x);
bool hasMethod(JSObject* obj, jsid id);
bool hasIteratorMethod(JSObject* obj);
JS_REQUIRES_STACK bool hasIteratorMethod(JSObject* obj);
public:
JS_REQUIRES_STACK
TraceRecorder(JSContext* cx, VMSideExit*, nanojit::Fragment*, TreeInfo*,
unsigned ngslots, uint8* globalTypeMap, uint8* stackTypeMap,
unsigned stackSlots, unsigned ngslots, uint8* typeMap,
VMSideExit* expectedInnerExit, nanojit::Fragment* outerToBlacklist);
~TraceRecorder();
@ -454,15 +496,13 @@ public:
JS_REQUIRES_STACK nanojit::LIns* snapshot(ExitType exitType);
nanojit::Fragment* getFragment() const { return fragment; }
JS_REQUIRES_STACK bool isLoopHeader(JSContext* cx) const;
JS_REQUIRES_STACK void compile(nanojit::Fragmento* fragmento);
JS_REQUIRES_STACK bool closeLoop(nanojit::Fragmento* fragmento, bool& demote,
unsigned *demotes);
JS_REQUIRES_STACK void endLoop(nanojit::Fragmento* fragmento);
JS_REQUIRES_STACK void compile(JSTraceMonitor* tm);
JS_REQUIRES_STACK bool closeLoop(JSTraceMonitor* tm, bool& demote);
JS_REQUIRES_STACK void endLoop(JSTraceMonitor* tm);
JS_REQUIRES_STACK void joinEdgesToEntry(nanojit::Fragmento* fragmento,
nanojit::Fragment* peer_root);
void blacklist() { fragment->blacklist(); }
JS_REQUIRES_STACK bool adjustCallerTypes(nanojit::Fragment* f, unsigned* demote_slots,
bool& trash);
JS_REQUIRES_STACK bool adjustCallerTypes(nanojit::Fragment* f);
JS_REQUIRES_STACK nanojit::Fragment* findNestedCompatiblePeer(nanojit::Fragment* f,
nanojit::Fragment** empty);
JS_REQUIRES_STACK void prepareTreeCall(nanojit::Fragment* inner);
@ -484,7 +524,6 @@ public:
void deepAbort() { deepAborted = true; }
bool wasDeepAborted() { return deepAborted; }
bool walkedOutOfLoop() { return terminate; }
void setPromotedPeer(nanojit::Fragment* peer) { promotedPeer = peer; }
TreeInfo* getTreeInfo() { return treeInfo; }
#define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \

View File

@ -1030,7 +1030,7 @@ namespace nanojit
default:
NanoAssertMsgf(false, "unsupported LIR instruction: %d (~0x40: %d)", op, op&~LIR64);
break;
case LIR_live: {
countlir_live();
pending_lives.add(ins->oprnd1());
@ -1329,7 +1329,9 @@ namespace nanojit
verbose_only( if (_verbose) { outputAddr=true; asm_output("[%s]", _thisfrag->lirbuf->names->formatRef(ins)); } )
break;
}
case LIR_xbarrier: {
break;
}
case LIR_xt:
case LIR_xf:
{

View File

@ -1882,6 +1882,7 @@ namespace nanojit
case LIR_x:
case LIR_xt:
case LIR_xf:
case LIR_xbarrier:
formatGuard(i, s);
break;

View File

@ -140,7 +140,7 @@ namespace nanojit
};
inline bool isGuard(LOpcode op) {
return op==LIR_x || op==LIR_xf || op==LIR_xt || op==LIR_loop;
return op == LIR_x || op == LIR_xf || op == LIR_xt || op == LIR_loop || op == LIR_xbarrier;
}
inline bool isCall(LOpcode op) {

View File

@ -176,7 +176,7 @@ OPDEF(uge, 63, 2) // 0x3F 0011 1111
OPDEF64(2, 0, 2) // wraps a pair of refs
OPDEF64(file, 1, 2)
OPDEF64(line, 2, 2)
OPDEF64(unused3_64, 3, 2)
OPDEF64(xbarrier, 3, 1) // memory barrier (dummy guard)
OPDEF64(unused4_64, 4, 2)
OPDEF64(unused5_64, 5, 2)

View File

@ -66,6 +66,13 @@ const Register Assembler::argRegs[] = { R0, R1, R2, R3 };
const Register Assembler::retRegs[] = { R0, R1 };
const Register Assembler::savedRegs[] = { R4, R5, R6, R7, R8, R9, R10 };
const char *ccName(ConditionCode cc)
{
const char *ccNames[] = { "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
"hi", "ls", "ge", "lt", "gt", "le", "al", "nv" };
return ccNames[(int)cc];
}
void
Assembler::nInit(AvmCore*)
{
@ -1039,63 +1046,6 @@ Assembler::asm_fcmp(LInsp ins)
Register ra = findRegFor(lhs, FpRegs);
Register rb = findRegFor(rhs, FpRegs);
// We can't uniquely identify fge/fle via a single bit
// pattern (since equality and lt/gt are separate bits);
// so convert to the single-bit variant.
if (op == LIR_fge) {
Register temp = ra;
ra = rb;
rb = temp;
op = LIR_flt;
} else if (op == LIR_fle) {
Register temp = ra;
ra = rb;
rb = temp;
op = LIR_fgt;
}
// There is no way to test for an unordered result using
// the conditional form of an instruction; the encoding (C=1 V=1)
// ends up having overlaps with a few other tests. So, test for
// the explicit mask.
uint8_t mask = 0x0;
// NZCV
// for a valid ordered result, V is always 0 from VFP
if (op == LIR_feq)
// ZC // cond EQ (both equal and "not less than"
mask = 0x6;
else if (op == LIR_flt)
// N // cond MI
mask = 0x8;
else if (op == LIR_fgt)
// C // cond CS
mask = 0x2;
else
NanoAssert(0);
/*
// these were converted into gt and lt above.
if (op == LIR_fle)
// NZ // cond LE
mask = 0xC;
else if (op == LIR_fge)
// ZC // cond fail?
mask = 0x6;
*/
// TODO XXX could do this as fcmpd; fmstat; tstvs rX, #0 the tstvs
// would reset the status bits if V (NaN flag) is set, but that
// doesn't work for NE. For NE could teqvs rX, #1. rX needs to
// be any register that has lsb == 0, such as sp/fp/pc.
// Test explicily with the full mask; if V is set, test will fail.
// Assumption is that this will be followed up by a BEQ/BNE
CMPi(Scratch, mask);
// grab just the condition fields
SHRi(Scratch, 28);
MRS(Scratch);
// do the comparison and get results loaded in ARM status register
FMSTAT();
FCMPD(ra, rb);
}
@ -1120,10 +1070,28 @@ Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ, bool isfar)
if (condop >= LIR_feq && condop <= LIR_fge)
{
if (branchOnFalse)
JNE(targ);
else
JE(targ);
ConditionCode cc = NV;
if (branchOnFalse) {
switch (condop) {
case LIR_feq: cc = NE; break;
case LIR_flt: cc = PL; break;
case LIR_fgt: cc = LE; break;
case LIR_fle: cc = HI; break;
case LIR_fge: cc = LT; break;
}
} else {
switch (condop) {
case LIR_feq: cc = EQ; break;
case LIR_flt: cc = MI; break;
case LIR_fgt: cc = GT; break;
case LIR_fle: cc = LS; break;
case LIR_fge: cc = GE; break;
}
}
B_cond(cc, targ);
asm_output("b(%d) 0x%08x", cc, (unsigned int) targ);
NIns *at = _nIns;
asm_fcmp(cond);
@ -1240,7 +1208,14 @@ Assembler::asm_fcond(LInsp ins)
// only want certain regs
Register r = prepResultReg(ins, AllowableFlagRegs);
SETE(r);
switch (ins->opcode()) {
case LIR_feq: SET(r,EQ,NE); break;
case LIR_flt: SET(r,MI,PL); break;
case LIR_fgt: SET(r,GT,LE); break;
case LIR_fle: SET(r,LS,HI); break;
case LIR_fge: SET(r,GE,LT); break;
}
asm_fcmp(ins);
}

View File

@ -156,6 +156,7 @@ typedef enum {
NV = 0xF // NeVer
} ConditionCode;
const char *ccName(ConditionCode cc);
typedef int RegisterMask;
typedef struct _FragInfo {
@ -692,23 +693,26 @@ typedef enum {
// MOV(EQ) _r, #1
// EOR(NE) _r, _r
#define SET(_r,_cond,_opp) \
#define SET(_r,_cond,_opp) do { \
underrunProtect(8); \
*(--_nIns) = (NIns)( (_opp<<28) | (1<<21) | ((_r)<<16) | ((_r)<<12) | (_r) ); \
*(--_nIns) = (NIns)( (_cond<<28) | (0x3A<<20) | ((_r)<<12) | (1) );
*(--_nIns) = (NIns)( (_cond<<28) | (0x3A<<20) | ((_r)<<12) | (1) ); \
asm_output("mov%s %s, #1", ccName(_cond), gpn(r), gpn(r)); \
asm_output("eor%s %s, %s", ccName(_opp), gpn(r), gpn(r)); \
} while (0)
#define SETE(r) do {SET(r,EQ,NE); asm_output("sete %s",gpn(r)); } while(0)
#define SETL(r) do {SET(r,LT,GE); asm_output("setl %s",gpn(r)); } while(0)
#define SETLE(r) do {SET(r,LE,GT); asm_output("setle %s",gpn(r)); } while(0)
#define SETG(r) do {SET(r,GT,LE); asm_output("setg %s",gpn(r)); } while(0)
#define SETGE(r) do {SET(r,GE,LT); asm_output("setge %s",gpn(r)); } while(0)
#define SETB(r) do {SET(r,CC,CS); asm_output("setb %s",gpn(r)); } while(0)
#define SETBE(r) do {SET(r,LS,HI); asm_output("setb %s",gpn(r)); } while(0)
#define SETAE(r) do {SET(r,CS,CC); asm_output("setae %s",gpn(r)); } while(0)
#define SETA(r) do {SET(r,HI,LS); asm_output("seta %s",gpn(r)); } while(0)
#define SETO(r) do {SET(r,VS,LS); asm_output("seto %s",gpn(r)); } while(0)
#define SETC(r) do {SET(r,CS,LS); asm_output("setc %s",gpn(r)); } while(0)
#define SETE(r) SET(r,EQ,NE)
#define SETL(r) SET(r,LT,GE)
#define SETLE(r) SET(r,LE,GT)
#define SETG(r) SET(r,GT,LE)
#define SETGE(r) SET(r,GE,LT)
#define SETB(r) SET(r,CC,CS)
#define SETBE(r) SET(r,LS,HI)
#define SETAE(r) SET(r,CS,CC)
#define SETA(r) SET(r,HI,LS)
#define SETO(r) SET(r,VS,LS)
#define SETC(r) SET(r,CS,LS)
// This zero-extends a reg that has been set using one of the SET macros,
// but is a NOOP on ARM/Thumb

View File

@ -157,6 +157,14 @@ namespace avmplus {
{
return calloc(1, size);
}
inline void*
operator new(size_t size, char* c)
{
// We use placement-new in LIR buffers sometimes.
memset(c, 0, size);
return c;
}
static void operator delete (void *gcObject)
{

View File

@ -229,9 +229,6 @@ struct JSShellContextData {
PRIntervalTime timeout;
volatile PRIntervalTime startTime; /* startTime + timeout is time when
script must be stopped */
PRIntervalTime maybeGCPeriod;
volatile PRIntervalTime lastMaybeGCTime;/* lastMaybeGCTime + maybeGCPeriod
is the time to call MaybeGC */
PRIntervalTime yieldPeriod;
volatile PRIntervalTime lastYieldTime; /* lastYieldTime + yieldPeriod is
the time to call
@ -239,7 +236,6 @@ struct JSShellContextData {
#else
int64 stopTime; /* time when script must be
stopped */
int64 nextMaybeGCTime;/* time to call JS_MaybeGC */
#endif
};
@ -249,7 +245,6 @@ SetTimeoutValue(JSContext *cx, jsdouble t);
#ifdef JS_THREADSAFE
# define DEFAULT_YIELD_PERIOD() (PR_TicksPerSecond() / 50)
# define DEFAULT_MAYBEGC_PERIOD() (PR_TicksPerSecond() / 10)
/*
* The function assumes that the GC lock is already held on entry. On a
@ -261,8 +256,6 @@ RescheduleWatchdog(JSContext *cx, JSShellContextData *data, PRIntervalTime now);
#else
# define DEFAULT_MAYBEGC_PERIOD() (MICROSECONDS_PER_SECOND / 10)
const int64 MICROSECONDS_PER_SECOND = 1000000LL;
const int64 MAX_TIME_VALUE = 0x7FFFFFFFFFFFFFFFLL;
@ -277,16 +270,13 @@ NewContextData()
return NULL;
#ifdef JS_THREADSAFE
data->timeout = PR_INTERVAL_NO_TIMEOUT;
data->maybeGCPeriod = PR_INTERVAL_NO_TIMEOUT;
data->yieldPeriod = PR_INTERVAL_NO_TIMEOUT;
# ifdef DEBUG
data->startTime = 0;
data->lastMaybeGCTime = 0;
data->lastYieldTime = 0;
# endif
#else /* !JS_THREADSAFE */
data->stopTime = MAX_TIME_VALUE;
data->nextMaybeGCTime = MAX_TIME_VALUE;
#endif
return data;
@ -306,7 +296,6 @@ ShellOperationCallback(JSContext *cx)
{
JSShellContextData *data = GetContextData(cx);
JSBool doStop;
JSBool doMaybeGC;
#ifdef JS_THREADSAFE
JSBool doYield;
PRIntervalTime now = PR_IntervalNow();
@ -314,11 +303,6 @@ ShellOperationCallback(JSContext *cx)
doStop = (data->timeout != PR_INTERVAL_NO_TIMEOUT &&
now - data->startTime >= data->timeout);
doMaybeGC = (data->maybeGCPeriod != PR_INTERVAL_NO_TIMEOUT &&
now - data->lastMaybeGCTime >= data->maybeGCPeriod);
if (doMaybeGC)
data->lastMaybeGCTime = now;
doYield = (data->yieldPeriod != PR_INTERVAL_NO_TIMEOUT &&
now - data->lastYieldTime >= data->yieldPeriod);
if (doYield)
@ -328,9 +312,6 @@ ShellOperationCallback(JSContext *cx)
int64 now = JS_Now();
doStop = (now >= data->stopTime);
doMaybeGC = (now >= data->nextMaybeGCTime);
if (doMaybeGC)
data->nextMaybeGCTime = now + DEFAULT_MAYBEGC_PERIOD();
#endif
if (doStop) {
@ -338,9 +319,6 @@ ShellOperationCallback(JSContext *cx)
return JS_FALSE;
}
if (doMaybeGC)
JS_MaybeGC(cx);
#ifdef JS_THREADSAFE
if (doYield)
JS_YieldRequest(cx);
@ -493,7 +471,7 @@ Process(JSContext *cx, JSObject *obj, char *filename, JSBool forceTTY)
lineno++;
} while (!JS_BufferIsCompilableUnit(cx, obj, buffer, len));
if (hitEOF)
if (hitEOF && !buffer)
break;
/* Clear any pending exception from previous failed compiles. */
@ -522,7 +500,7 @@ Process(JSContext *cx, JSObject *obj, char *filename, JSBool forceTTY)
JS_DestroyScript(cx, script);
}
*buffer = '\0';
} while (!gQuitting);
} while (!hitEOF && !gQuitting);
free(buffer);
fprintf(gOutFile, "\n");
@ -1099,19 +1077,43 @@ GCParameter(JSContext *cx, uintN argc, jsval *vp)
param = JSGC_MAX_BYTES;
} else if (strcmp(paramName, "maxMallocBytes") == 0) {
param = JSGC_MAX_MALLOC_BYTES;
} else if (strcmp(paramName, "gcStackpoolLifespan") == 0) {
param = JSGC_STACKPOOL_LIFESPAN;
} else if (strcmp(paramName, "gcBytes") == 0) {
param = JSGC_BYTES;
} else if (strcmp(paramName, "gcNumber") == 0) {
param = JSGC_NUMBER;
} else if (strcmp(paramName, "gcTriggerFactor") == 0) {
param = JSGC_TRIGGER_FACTOR;
} else {
JS_ReportError(cx,
"the first argument argument must be either maxBytes "
"or maxMallocBytes");
"the first argument argument must be maxBytes, "
"maxMallocBytes, gcStackpoolLifespan, gcBytes, "
"gcNumber or gcTriggerFactor");
return JS_FALSE;
}
if (!JS_ValueToECMAUint32(cx, argc < 2 ? JSVAL_VOID : vp[3], &value))
if (argc == 1) {
value = JS_GetGCParameter(cx->runtime, param);
return JS_NewNumberValue(cx, value, &vp[0]);
}
if (param == JSGC_NUMBER ||
param == JSGC_BYTES) {
JS_ReportError(cx, "Attempt to change read-only parameter %s",
paramName);
return JS_FALSE;
if (value == 0) {
}
if (!JS_ValueToECMAUint32(cx, vp[3], &value)) {
JS_ReportError(cx,
"the second argument must be convertable to uint32 with "
"non-zero value");
"the second argument must be convertable to uint32 "
"with non-zero value");
return JS_FALSE;
}
if (param == JSGC_TRIGGER_FACTOR && value < 100) {
JS_ReportError(cx,
"the gcTriggerFactor value must be >= 100");
return JS_FALSE;
}
JS_SetGCParameter(cx->runtime, param, value);
@ -2226,8 +2228,10 @@ GetPDA(JSContext *cx, uintN argc, jsval *vp)
if (!JS_ValueToObject(cx, argc == 0 ? JSVAL_VOID : vp[2], &vobj))
return JS_FALSE;
if (!vobj)
if (!vobj) {
*vp = JSVAL_VOID;
return JS_TRUE;
}
aobj = JS_NewArrayObject(cx, 0, NULL);
if (!aobj)
@ -2238,7 +2242,7 @@ GetPDA(JSContext *cx, uintN argc, jsval *vp)
if (!ok)
return JS_FALSE;
pd = pda.array;
for (i = 0; i < pda.length; i++) {
for (i = 0; i < pda.length; i++, pd++) {
pdobj = JS_NewObject(cx, NULL, NULL, NULL);
if (!pdobj) {
ok = JS_FALSE;
@ -3151,8 +3155,6 @@ CheckCallbackTime(JSContext *cx, JSShellContextData *data, PRIntervalTime now,
UpdateSleepDuration(now, data->startTime, data->timeout,
sleepDuration, expired);
UpdateSleepDuration(now, data->lastMaybeGCTime, data->maybeGCPeriod,
sleepDuration, expired);
UpdateSleepDuration(now, data->lastYieldTime, data->yieldPeriod,
sleepDuration, expired);
if (expired) {
@ -3256,24 +3258,15 @@ SetTimeoutValue(JSContext *cx, jsdouble t)
return JS_FALSE;
}
/*
* For compatibility periodic MaybeGC calls are enabled only when the
* execution time is bounded.
*/
JSShellContextData *data = GetContextData(cx);
#ifdef JS_THREADSAFE
JS_LOCK_GC(cx->runtime);
if (t < 0) {
data->timeout = PR_INTERVAL_NO_TIMEOUT;
data->maybeGCPeriod = PR_INTERVAL_NO_TIMEOUT;
} else {
PRIntervalTime now = PR_IntervalNow();
data->timeout = PRIntervalTime(t * PR_TicksPerSecond());
data->startTime = now;
if (data->maybeGCPeriod == PR_INTERVAL_NO_TIMEOUT) {
data->maybeGCPeriod = DEFAULT_MAYBEGC_PERIOD();
data->lastMaybeGCTime = now;
}
if (!RescheduleWatchdog(cx, data, now)) {
/* The GC lock is already released here. */
return JS_FALSE;
@ -3284,13 +3277,10 @@ SetTimeoutValue(JSContext *cx, jsdouble t)
#else /* !JS_THREADSAFE */
if (t < 0) {
data->stopTime = MAX_TIME_VALUE;
data->nextMaybeGCTime = MAX_TIME_VALUE;
JS_SetOperationLimit(cx, JS_MAX_OPERATION_LIMIT);
} else {
int64 now = JS_Now();
data->stopTime = now + int64(t * MICROSECONDS_PER_SECOND);
if (data->nextMaybeGCTime == MAX_TIME_VALUE)
data->nextMaybeGCTime = now + DEFAULT_MAYBEGC_PERIOD();
/*
* Call the callback infrequently enough to avoid the overhead of

View File

@ -2435,11 +2435,11 @@ function testThinLoopDemote() {
}
testThinLoopDemote.expected = 100;
testThinLoopDemote.jitstats = {
recorderStarted: 3,
recorderStarted: 2,
recorderAborted: 0,
traceCompleted: 1,
traceTriggered: 0,
unstableLoopVariable: 2
traceCompleted: 2,
traceTriggered: 1,
unstableLoopVariable: 1
};
test(testThinLoopDemote);
@ -2482,11 +2482,11 @@ function testWeirdDateParse() {
}
testWeirdDateParse.expected = "11,17,2008,11,17,2008,11,17,2008,11,17,2008,11,17,2008";
testWeirdDateParse.jitstats = {
recorderStarted: 10,
recorderStarted: 7,
recorderAborted: 1,
traceCompleted: 5,
traceTriggered: 13,
unstableLoopVariable: 6,
traceCompleted: 6,
traceTriggered: 14,
unstableLoopVariable: 3,
noCompatInnerTrees: 1
};
test(testWeirdDateParse);
@ -3847,7 +3847,7 @@ function testBitOrInconvertibleObjectAny()
{
threw = true;
if (i !== 94)
return "expected i === 4, got " + i;
return "expected i === 94, got " + i;
if (q !== 95)
return "expected q === 95, got " + q;
if (count !== 95)
@ -3997,6 +3997,50 @@ function testStringResolve() {
testStringResolve.expected = 3;
test(testStringResolve);
//test no multitrees assert
function testGlobalMultitrees1() {
(function() {
for (var j = 0; j < 4; ++j) {
for each (e in ['A', 1, 'A']) {
}
}
})();
return true;
}
testGlobalMultitrees1.expected = true;
test(testGlobalMultitrees1);
var q = [];
for each (b in [0x3FFFFFFF, 0x3FFFFFFF, 0x3FFFFFFF]) {
for each (let e in [{}, {}, {}, "", {}]) {
b = (b | 0x40000000) + 1;
q.push(b);
}
}
function testLetWithUnstableGlobal() {
return q.join(",");
}
testLetWithUnstableGlobal.expected = "2147483648,-1073741823,-1073741822,-1073741821,-1073741820,2147483648,-1073741823,-1073741822,-1073741821,-1073741820,2147483648,-1073741823,-1073741822,-1073741821,-1073741820";
test(testLetWithUnstableGlobal);
delete b;
delete q;
for each (testBug474769_b in [1, 1, 1, 1.5, 1, 1]) {
(function() { for each (let testBug474769_h in [0, 0, 1.4, ""]) {} })()
}
function testBug474769() {
return testBug474769_b;
}
testBug474769.expected = 1;
test(testBug474769);
function testReverseArgTypes() {
for (var j = 0; j < 4; ++j) ''.replace('', /x/);
return 1;
}
testReverseArgTypes.expected = 1;
test(testReverseArgTypes);
/*****************************************************************************
* *
* _____ _ _ _____ ______ _____ _______ *