mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Backout all patches since last mozilla-central merge (Thu Jan 22 19:14:02 2009 -500 by sayrer).
This commit is contained in:
parent
c1e3906c82
commit
1d277bc633
@ -148,8 +148,10 @@ typedef struct JSTraceMonitor {
|
||||
|
||||
#ifdef JS_TRACER
|
||||
# define JS_ON_TRACE(cx) (JS_TRACE_MONITOR(cx).onTrace)
|
||||
# define JS_EXECUTING_TRACE(cx) (JS_ON_TRACE(cx) && !JS_TRACE_MONITOR(cx).recorder)
|
||||
#else
|
||||
# define JS_ON_TRACE(cx) JS_FALSE
|
||||
# define JS_EXECUTING_TRACE(cx) JS_FALSE
|
||||
#endif
|
||||
|
||||
#ifdef JS_THREADSAFE
|
||||
@ -920,15 +922,6 @@ struct JSContext {
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
static inline JSAtom **
|
||||
FrameAtomBase(JSContext *cx, JSStackFrame *fp)
|
||||
{
|
||||
return fp->imacpc
|
||||
? COMMON_ATOMS_START(&cx->runtime->atomState)
|
||||
: fp->script->atomMap.vector;
|
||||
}
|
||||
|
||||
/* FIXME(bug 332648): Move this into a public header. */
|
||||
class JSAutoTempValueRooter
|
||||
{
|
||||
@ -976,8 +969,7 @@ class JSAutoResolveFlags
|
||||
JSContext *mContext;
|
||||
uintN mSaved;
|
||||
};
|
||||
|
||||
#endif /* __cpluscplus */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Slightly more readable macros for testing per-context option settings (also
|
||||
|
@ -735,13 +735,10 @@ JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval idval,
|
||||
return JS_FALSE;
|
||||
}
|
||||
|
||||
if (JSVAL_IS_INT(idval)) {
|
||||
if (JSVAL_IS_INT(idval))
|
||||
propid = INT_JSVAL_TO_JSID(idval);
|
||||
} else {
|
||||
if (!js_ValueToStringId(cx, idval, &propid))
|
||||
return JS_FALSE;
|
||||
CHECK_FOR_STRING_INDEX(propid);
|
||||
}
|
||||
else if (!js_ValueToStringId(cx, idval, &propid))
|
||||
return JS_FALSE;
|
||||
|
||||
if (!js_LookupProperty(cx, obj, propid, &pobj, &prop))
|
||||
return JS_FALSE;
|
||||
|
@ -241,7 +241,9 @@ js_FillPropertyCache(JSContext *cx, JSObject *obj, jsuword kshape,
|
||||
}
|
||||
|
||||
/* If getting a value via a stub getter, we can cache the slot. */
|
||||
if (!(cs->format & (JOF_SET | JOF_INCDEC | JOF_FOR)) &&
|
||||
if (!(cs->format & JOF_SET) &&
|
||||
!((cs->format & (JOF_INCDEC | JOF_FOR)) &&
|
||||
(sprop->attrs & JSPROP_READONLY)) &&
|
||||
SPROP_HAS_STUB_GETTER(sprop) &&
|
||||
SPROP_HAS_VALID_SLOT(sprop, scope)) {
|
||||
/* Great, let's cache sprop's slot and use it on cache hit. */
|
||||
@ -2574,16 +2576,21 @@ js_Interpret(JSContext *cx)
|
||||
|
||||
#ifdef JS_TRACER
|
||||
/* We had better not be entering the interpreter from JIT-compiled code. */
|
||||
TraceRecorder *tr = TRACE_RECORDER(cx);
|
||||
|
||||
/* If a recorder is pending and we try to re-enter the interpreter, flag
|
||||
the recorder to be destroyed when we return. */
|
||||
if (tr) {
|
||||
TraceRecorder *tr = NULL;
|
||||
if (JS_ON_TRACE(cx)) {
|
||||
tr = TRACE_RECORDER(cx);
|
||||
SET_TRACE_RECORDER(cx, NULL);
|
||||
if (tr->wasDeepAborted())
|
||||
tr->removeFragmentoReferences();
|
||||
else
|
||||
tr->pushAbortStack();
|
||||
JS_TRACE_MONITOR(cx).onTrace = JS_FALSE;
|
||||
/*
|
||||
* ON_TRACE means either recording or coming from traced code.
|
||||
* If there's no recorder (the latter case), don't care.
|
||||
*/
|
||||
if (tr) {
|
||||
if (tr->wasDeepAborted())
|
||||
tr->removeFragmentoReferences();
|
||||
else
|
||||
tr->pushAbortStack();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2640,7 +2647,9 @@ js_Interpret(JSContext *cx)
|
||||
} \
|
||||
fp = cx->fp; \
|
||||
script = fp->script; \
|
||||
atoms = FrameAtomBase(cx, fp); \
|
||||
atoms = fp->imacpc \
|
||||
? COMMON_ATOMS_START(&rt->atomState) \
|
||||
: script->atomMap.vector; \
|
||||
currentVersion = (JSVersion) script->version; \
|
||||
JS_ASSERT(fp->regs == ®s); \
|
||||
if (cx->throwing) \
|
||||
@ -3047,7 +3056,9 @@ js_Interpret(JSContext *cx)
|
||||
|
||||
/* Restore the calling script's interpreter registers. */
|
||||
script = fp->script;
|
||||
atoms = FrameAtomBase(cx, fp);
|
||||
atoms = fp->imacpc
|
||||
? COMMON_ATOMS_START(&rt->atomState)
|
||||
: script->atomMap.vector;
|
||||
|
||||
/* Resume execution in the calling frame. */
|
||||
inlineCallCount--;
|
||||
@ -7084,6 +7095,7 @@ js_Interpret(JSContext *cx)
|
||||
|
||||
#ifdef JS_TRACER
|
||||
if (tr) {
|
||||
JS_TRACE_MONITOR(cx).onTrace = JS_TRUE;
|
||||
SET_TRACE_RECORDER(cx, tr);
|
||||
if (!tr->wasDeepAborted()) {
|
||||
tr->popAbortStack();
|
||||
|
@ -3614,7 +3614,7 @@ js_FindPropertyHelper(JSContext *cx, jsid id, JSObject **objp,
|
||||
JSProperty *prop;
|
||||
JSScopeProperty *sprop;
|
||||
|
||||
JS_ASSERT_IF(entryp, !JS_ON_TRACE(cx));
|
||||
JS_ASSERT_IF(entryp, !JS_EXECUTING_TRACE(cx));
|
||||
obj = js_GetTopStackFrame(cx)->scopeChain;
|
||||
shape = OBJ_SHAPE(obj);
|
||||
for (scopeIndex = 0; ; scopeIndex++) {
|
||||
@ -3891,7 +3891,7 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, jsval *vp,
|
||||
return JS_FALSE;
|
||||
|
||||
if (entryp) {
|
||||
JS_ASSERT_NOT_ON_TRACE(cx);
|
||||
JS_ASSERT_NOT_EXECUTING_TRACE(cx);
|
||||
js_FillPropertyCache(cx, obj, shape, 0, protoIndex, obj2, sprop, entryp);
|
||||
}
|
||||
JS_UNLOCK_OBJ(cx, obj2);
|
||||
@ -4097,7 +4097,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, jsval *vp,
|
||||
return JS_FALSE;
|
||||
|
||||
if (entryp) {
|
||||
JS_ASSERT_NOT_ON_TRACE(cx);
|
||||
JS_ASSERT_NOT_EXECUTING_TRACE(cx);
|
||||
if (!(attrs & JSPROP_SHARED))
|
||||
js_FillPropertyCache(cx, obj, shape, 0, 0, obj, sprop, entryp);
|
||||
else
|
||||
@ -5432,7 +5432,7 @@ js_GetWrappedObject(JSContext *cx, JSObject *obj)
|
||||
return obj;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
#if DEBUG
|
||||
|
||||
/*
|
||||
* Routines to print out values during debugging. These are FRIEND_API to help
|
||||
@ -5602,12 +5602,8 @@ js_DumpObject(JSObject *obj)
|
||||
|
||||
sharesScope = (scope->object != obj);
|
||||
if (sharesScope) {
|
||||
if (proto) {
|
||||
fprintf(stderr, "no own properties - see proto (%s at %p)\n",
|
||||
STOBJ_GET_CLASS(proto)->name, proto);
|
||||
} else {
|
||||
fprintf(stderr, "no own properties - null proto\n");
|
||||
}
|
||||
fprintf(stderr, "no own properties - see proto (%s at %p)\n",
|
||||
STOBJ_GET_CLASS(proto)->name, proto);
|
||||
} else {
|
||||
fprintf(stderr, "properties:\n");
|
||||
for (JSScopeProperty *sprop = SCOPE_LAST_PROP(scope); sprop;
|
||||
|
@ -53,16 +53,16 @@ inline __attribute__ ((unused)) void MUST_FLOW_THROUGH(const char *label) {
|
||||
inline JS_FORCES_STACK void VOUCH_DOES_NOT_REQUIRE_STACK() {}
|
||||
|
||||
inline JS_FORCES_STACK void
|
||||
JS_ASSERT_NOT_ON_TRACE(JSContext *cx)
|
||||
JS_ASSERT_NOT_EXECUTING_TRACE(JSContext *cx)
|
||||
{
|
||||
JS_ASSERT(!JS_ON_TRACE(cx));
|
||||
JS_ASSERT(!JS_EXECUTING_TRACE(cx));
|
||||
}
|
||||
|
||||
#else
|
||||
#define MUST_FLOW_THROUGH(label) ((void) 0)
|
||||
#define MUST_FLOW_LABEL(label)
|
||||
#define VOUCH_DOES_NOT_REQUIRE_STACK() ((void) 0)
|
||||
#define JS_ASSERT_NOT_ON_TRACE(cx) JS_ASSERT(!JS_ON_TRACE(cx))
|
||||
#define JS_ASSERT_NOT_EXECUTING_TRACE(cx) JS_ASSERT(!JS_EXECUTING_TRACE(cx))
|
||||
#endif
|
||||
|
||||
#endif /* jsstaticcheck_h___ */
|
||||
|
@ -105,9 +105,6 @@ static const char tagChar[] = "OIDISIBI";
|
||||
/* Max blacklist level of inner tree immediate recompiling */
|
||||
#define MAX_INNER_RECORD_BLACKLIST -16
|
||||
|
||||
/* Blacklist level to obtain on first blacklisting. */
|
||||
#define INITIAL_BLACKLIST_LEVEL 5
|
||||
|
||||
/* Max native stack size. */
|
||||
#define MAX_NATIVE_STACK_SLOTS 1024
|
||||
|
||||
@ -392,73 +389,12 @@ globalSlotHash(JSContext* cx, unsigned slot)
|
||||
fp = fp->down;
|
||||
|
||||
hash_accum(h, uintptr_t(fp->script));
|
||||
hash_accum(h, uintptr_t(OBJ_SHAPE(JS_GetGlobalForObject(cx, fp->scopeChain))));
|
||||
hash_accum(h, uintptr_t(cx->globalObject));
|
||||
hash_accum(h, uintptr_t(OBJ_SHAPE(cx->globalObject)));
|
||||
hash_accum(h, uintptr_t(slot));
|
||||
return int(h);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
hitHash(const void* ip)
|
||||
{
|
||||
uintptr_t h = 5381;
|
||||
hash_accum(h, uintptr_t(ip));
|
||||
return size_t(h);
|
||||
}
|
||||
|
||||
Oracle::Oracle()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
/* Fetch the jump-target hit count for the current pc. */
|
||||
int32_t
|
||||
Oracle::getHits(const void* ip)
|
||||
{
|
||||
size_t h = hitHash(ip);
|
||||
uint32_t hc = hits[h];
|
||||
uint32_t bl = blacklistLevels[h];
|
||||
|
||||
/* Clamp ranges for subtraction. */
|
||||
if (bl > 30)
|
||||
bl = 30;
|
||||
hc &= 0x7fffffff;
|
||||
|
||||
return hc - (bl ? (1<<bl) : 0);
|
||||
}
|
||||
|
||||
/* Fetch and increment the jump-target hit count for the current pc. */
|
||||
int32_t
|
||||
Oracle::hit(const void* ip)
|
||||
{
|
||||
size_t h = hitHash(ip);
|
||||
if (hits[h] < 0xffffffff)
|
||||
hits[h]++;
|
||||
|
||||
return getHits(ip);
|
||||
}
|
||||
|
||||
/* Reset the hit count for an jump-target and relax the blacklist count. */
|
||||
void
|
||||
Oracle::resetHits(const void* ip)
|
||||
{
|
||||
size_t h = hitHash(ip);
|
||||
if (hits[h] > 0)
|
||||
hits[h]--;
|
||||
if (blacklistLevels[h] > 0)
|
||||
blacklistLevels[h]--;
|
||||
}
|
||||
|
||||
/* Blacklist with saturation. */
|
||||
void
|
||||
Oracle::blacklist(const void* ip)
|
||||
{
|
||||
size_t h = hitHash(ip);
|
||||
if (blacklistLevels[h] == 0)
|
||||
blacklistLevels[h] = INITIAL_BLACKLIST_LEVEL;
|
||||
else if (blacklistLevels[h] < 0xffffffff)
|
||||
blacklistLevels[h]++;
|
||||
}
|
||||
|
||||
|
||||
/* Tell the oracle that a certain global variable should not be demoted. */
|
||||
JS_REQUIRES_STACK void
|
||||
@ -490,14 +426,7 @@ Oracle::isStackSlotUndemotable(JSContext* cx, unsigned slot) const
|
||||
|
||||
/* Clear the oracle. */
|
||||
void
|
||||
Oracle::clearHitCounts()
|
||||
{
|
||||
memset(hits, 0, sizeof(hits));
|
||||
memset(blacklistLevels, 0, sizeof(blacklistLevels));
|
||||
}
|
||||
|
||||
void
|
||||
Oracle::clearDemotability()
|
||||
Oracle::clear()
|
||||
{
|
||||
_stackDontDemote.reset();
|
||||
_globalDontDemote.reset();
|
||||
@ -1113,7 +1042,9 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _frag
|
||||
this->lirbuf = _fragment->lirbuf;
|
||||
this->treeInfo = ti;
|
||||
this->callDepth = _anchor ? _anchor->calldepth : 0;
|
||||
this->atoms = FrameAtomBase(cx, cx->fp);
|
||||
this->atoms = cx->fp->imacpc
|
||||
? COMMON_ATOMS_START(&cx->runtime->atomState)
|
||||
: cx->fp->script->atomMap.vector;
|
||||
this->deepAborted = false;
|
||||
this->trashSelf = false;
|
||||
this->global_dslots = this->globalObj->dslots;
|
||||
@ -1146,7 +1077,6 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _frag
|
||||
gp_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, gp)), "gp");
|
||||
eos_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eos)), "eos");
|
||||
eor_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eor)), "eor");
|
||||
globalObj_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, globalObj)), "globalObj");
|
||||
|
||||
/* If we came from exit, we might not have enough global types. */
|
||||
if (JS_TRACE_MONITOR(cx).globalSlots->length() > ti->globalSlots()) {
|
||||
@ -2854,6 +2784,9 @@ js_DeleteRecorder(JSContext* cx)
|
||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||
|
||||
/* Aborting and completing a trace end up here. */
|
||||
JS_ASSERT(tm->onTrace);
|
||||
tm->onTrace = false;
|
||||
|
||||
delete tm->recorder;
|
||||
tm->recorder = NULL;
|
||||
}
|
||||
@ -2881,6 +2814,15 @@ js_StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti,
|
||||
{
|
||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||
|
||||
/*
|
||||
* Emulate on-trace semantics and avoid rooting headaches while recording,
|
||||
* by suppressing last-ditch GC attempts while recording a trace. This does
|
||||
* means that trace recording must not nest or the following assertion will
|
||||
* botch.
|
||||
*/
|
||||
JS_ASSERT(!tm->onTrace);
|
||||
tm->onTrace = true;
|
||||
|
||||
/* start recording if no exception during construction */
|
||||
tm->recorder = new (&gc) TraceRecorder(cx, anchor, f, ti,
|
||||
stackSlots, ngslots, typeMap,
|
||||
@ -3189,16 +3131,6 @@ js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, Fragment* outer)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool isSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot)
|
||||
{
|
||||
if (slot < ti->stackSlots)
|
||||
return oracle.isStackSlotUndemotable(cx, slot);
|
||||
|
||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||
uint16* gslots = tm->globalSlots->data();
|
||||
return oracle.isGlobalSlotUndemotable(cx, gslots[slot - ti->stackSlots]);
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK static bool
|
||||
js_AttemptToStabilizeTree(JSContext* cx, VMSideExit* exit, Fragment* outer)
|
||||
{
|
||||
@ -3222,11 +3154,8 @@ js_AttemptToStabilizeTree(JSContext* cx, VMSideExit* exit, Fragment* outer)
|
||||
/* If this exit does not have enough globals, there might exist a peer with more globals that we
|
||||
* can join to.
|
||||
*/
|
||||
uint8* m2;
|
||||
Fragment* f;
|
||||
TreeInfo* ti;
|
||||
bool matched;
|
||||
bool undemote;
|
||||
Fragment* f;
|
||||
bool bound = false;
|
||||
unsigned int checkSlots;
|
||||
for (f = from->first; f != NULL; f = f->peer) {
|
||||
@ -3236,33 +3165,7 @@ js_AttemptToStabilizeTree(JSContext* cx, VMSideExit* exit, Fragment* outer)
|
||||
JS_ASSERT(exit->numStackSlots == ti->stackSlots);
|
||||
/* Check the minimum number of slots that need to be compared. */
|
||||
checkSlots = JS_MIN(exit->numStackSlots + exit->numGlobalSlots, ti->typeMap.length());
|
||||
m = getFullTypeMap(exit);
|
||||
m2 = ti->typeMap.data();
|
||||
/* Analyze the exit typemap against the peer typemap.
|
||||
* Two conditions are important:
|
||||
* 1) Typemaps are identical: these peers can be attached.
|
||||
* 2) Typemaps do not match, but only contain I->D mismatches.
|
||||
* In this case, the original tree must be trashed because it
|
||||
* will never connect to any peer.
|
||||
*/
|
||||
matched = true;
|
||||
undemote = false;
|
||||
for (uint32 i = 0; i < checkSlots; i++) {
|
||||
/* If the types are equal we're okay. */
|
||||
if (m[i] == m2[i])
|
||||
continue;
|
||||
matched = false;
|
||||
/* If there's an I->D that cannot be resolved, flag it.
|
||||
* Otherwise, break and go to the next peer.
|
||||
*/
|
||||
if (m[i] == JSVAL_INT && m2[i] == JSVAL_DOUBLE && isSlotUndemotable(cx, ti, i)) {
|
||||
undemote = true;
|
||||
} else {
|
||||
undemote = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (matched) {
|
||||
if (memcmp(getFullTypeMap(exit), ti->typeMap.data(), checkSlots) == 0) {
|
||||
/* Capture missing globals on both trees and link the fragments together. */
|
||||
if (from != f) {
|
||||
ti->dependentTrees.addUnique(from);
|
||||
@ -3277,20 +3180,12 @@ js_AttemptToStabilizeTree(JSContext* cx, VMSideExit* exit, Fragment* outer)
|
||||
for (UnstableExit* uexit = ti->unstableExits; uexit != NULL; uexit = uexit->next) {
|
||||
if (uexit->exit == exit) {
|
||||
*tail = uexit->next;
|
||||
delete uexit;
|
||||
bound = true;
|
||||
break;
|
||||
}
|
||||
tail = &uexit->next;
|
||||
}
|
||||
JS_ASSERT(bound);
|
||||
debug_only_v(js_DumpPeerStability(tm->fragmento, f->ip);)
|
||||
break;
|
||||
} else if (undemote) {
|
||||
/* The original tree is unconnectable, so trash it. */
|
||||
js_TrashTree(cx, f);
|
||||
/* We shouldn't attempt to record now, since we'll hit a duplicate. */
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (bound)
|
||||
@ -3319,9 +3214,9 @@ js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom
|
||||
c->root = f;
|
||||
}
|
||||
|
||||
debug_only_v(printf("trying to attach another branch to the tree (hits = %d)\n", oracle.getHits(c->ip));)
|
||||
debug_only_v(printf("trying to attach another branch to the tree (hits = %d)\n", c->hits());)
|
||||
|
||||
if (oracle.hit(c->ip) >= HOTEXIT) {
|
||||
if (++c->hits() >= HOTEXIT) {
|
||||
/* start tracing secondary trace from this point */
|
||||
c->lirbuf = f->lirbuf;
|
||||
unsigned stackSlots;
|
||||
@ -3453,10 +3348,10 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
|
||||
if (old == NULL)
|
||||
old = tm->recorder->getFragment();
|
||||
js_AbortRecording(cx, "No compatible inner tree");
|
||||
if (!f && oracle.hit(peer_root->ip) < MAX_INNER_RECORD_BLACKLIST)
|
||||
if (!f && ++peer_root->hits() < MAX_INNER_RECORD_BLACKLIST)
|
||||
return false;
|
||||
if (old->recordAttempts < MAX_MISMATCH)
|
||||
oracle.resetHits(old->ip);
|
||||
old->resetHits();
|
||||
f = empty ? empty : tm->fragmento->getAnchor(cx->fp->regs->pc);
|
||||
return js_RecordTree(cx, tm, f, old);
|
||||
}
|
||||
@ -3483,13 +3378,13 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
|
||||
/* abort recording so the inner loop can become type stable. */
|
||||
old = fragmento->getLoop(tm->recorder->getFragment()->root->ip);
|
||||
js_AbortRecording(cx, "Inner tree is trying to stabilize, abort outer recording");
|
||||
oracle.resetHits(old->ip);
|
||||
old->resetHits();
|
||||
return js_AttemptToStabilizeTree(cx, lr, old);
|
||||
case BRANCH_EXIT:
|
||||
/* abort recording the outer tree, extend the inner tree */
|
||||
old = fragmento->getLoop(tm->recorder->getFragment()->root->ip);
|
||||
js_AbortRecording(cx, "Inner tree is trying to grow, abort outer recording");
|
||||
oracle.resetHits(old->ip);
|
||||
old->resetHits();
|
||||
return js_AttemptToExtendTree(cx, lr, NULL, old);
|
||||
default:
|
||||
debug_only_v(printf("exit_type=%d\n", lr->exitType);)
|
||||
@ -3750,7 +3645,6 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
|
||||
state.eor = callstack + MAX_CALL_STACK_ENTRIES;
|
||||
state.gp = global;
|
||||
state.cx = cx;
|
||||
state.globalObj = globalObj;
|
||||
state.lastTreeExitGuard = NULL;
|
||||
state.lastTreeCallGuard = NULL;
|
||||
state.rpAtLastTreeCall = NULL;
|
||||
@ -3763,12 +3657,15 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Set a flag that indicates to the runtime system that we are running in native code
|
||||
now and we don't want automatic GC to happen. Instead we will get a silent failure,
|
||||
which will cause a trace exit at which point the interpreter re-tries the operation
|
||||
and eventually triggers the GC. */
|
||||
JS_ASSERT(!tm->onTrace);
|
||||
tm->onTrace = true;
|
||||
/*
|
||||
* We may be called from js_MonitorLoopEdge while not recording, or while
|
||||
* recording. Rather than over-generalize by using a counter instead of a
|
||||
* flag, we simply sample and update tm->onTrace if necessary.
|
||||
*/
|
||||
bool onTrace = tm->onTrace;
|
||||
if (!onTrace)
|
||||
tm->onTrace = true;
|
||||
VMSideExit* lr;
|
||||
|
||||
debug_only(fflush(NULL);)
|
||||
GuardRecord* rec;
|
||||
@ -3777,13 +3674,13 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
|
||||
#else
|
||||
rec = u.func(&state, NULL);
|
||||
#endif
|
||||
VMSideExit* lr = (VMSideExit*)rec->exit;
|
||||
lr = (VMSideExit*)rec->exit;
|
||||
|
||||
AUDIT(traceTriggered);
|
||||
|
||||
JS_ASSERT(lr->exitType != LOOP_EXIT || !lr->calldepth);
|
||||
|
||||
tm->onTrace = false;
|
||||
tm->onTrace = onTrace;
|
||||
|
||||
/* Except if we find that this is a nested bailout, the guard the call returned is the
|
||||
one we have to use to adjust pc and sp. */
|
||||
@ -3966,13 +3863,6 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount)
|
||||
js_FlushJITCache(cx);
|
||||
|
||||
jsbytecode* pc = cx->fp->regs->pc;
|
||||
|
||||
if (oracle.getHits(pc) >= 0 &&
|
||||
oracle.getHits(pc)+1 < HOTLOOP) {
|
||||
oracle.hit(pc);
|
||||
return false;
|
||||
}
|
||||
|
||||
Fragmento* fragmento = tm->fragmento;
|
||||
Fragment* f;
|
||||
f = fragmento->getLoop(pc);
|
||||
@ -3980,10 +3870,10 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount)
|
||||
f = fragmento->getAnchor(pc);
|
||||
|
||||
/* If we have no code in the anchor and no peers, we definitively won't be able to
|
||||
activate any trees so, start compiling. */
|
||||
activate any trees so increment the hit counter and start compiling if appropriate. */
|
||||
if (!f->code() && !f->peer) {
|
||||
monitor_loop:
|
||||
if (oracle.hit(pc) >= HOTLOOP) {
|
||||
if (++f->hits() >= HOTLOOP) {
|
||||
/* We can give RecordTree the root peer. If that peer is already taken, it will
|
||||
walk the peer list and find us a free slot or allocate a new tree if needed. */
|
||||
return js_RecordTree(cx, tm, f->first, NULL);
|
||||
@ -3995,7 +3885,7 @@ monitor_loop:
|
||||
debug_only_v(printf("Looking for compat peer %d@%d, from %p (ip: %p, hits=%d)\n",
|
||||
js_FramePCToLineNumber(cx, cx->fp),
|
||||
FramePCOffset(cx->fp),
|
||||
f, f->ip, oracle.getHits(f->ip));)
|
||||
f, f->ip, f->hits());)
|
||||
Fragment* match = js_FindVMCompatiblePeer(cx, f);
|
||||
/* If we didn't find a tree that actually matched, keep monitoring the loop. */
|
||||
if (!match)
|
||||
@ -4134,7 +4024,7 @@ js_BlacklistPC(Fragmento* frago, Fragment* frag)
|
||||
{
|
||||
if (frag->kind == LoopTrace)
|
||||
frag = frago->getLoop(frag->ip);
|
||||
oracle.blacklist(frag->ip);
|
||||
frag->blacklist();
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK void
|
||||
@ -4342,13 +4232,12 @@ js_FlushJITCache(JSContext* cx)
|
||||
tm->globalShape = OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain));
|
||||
tm->globalSlots->clear();
|
||||
}
|
||||
oracle.clearHitCounts();
|
||||
}
|
||||
|
||||
JS_FORCES_STACK JSStackFrame *
|
||||
js_GetTopStackFrame(JSContext *cx)
|
||||
{
|
||||
if (JS_ON_TRACE(cx)) {
|
||||
if (JS_EXECUTING_TRACE(cx)) {
|
||||
/*
|
||||
* TODO: If executing a tree, synthesize stack frames and bail off
|
||||
* trace. See bug 462027.
|
||||
@ -5756,7 +5645,7 @@ TraceRecorder::record_LeaveFrame()
|
||||
|
||||
// LeaveFrame gets called after the interpreter popped the frame and
|
||||
// stored rval, so cx->fp not cx->fp->down, and -1 not 0.
|
||||
atoms = FrameAtomBase(cx, cx->fp);
|
||||
atoms = cx->fp->script->atomMap.vector;
|
||||
set(&stackval(-1), rval_ins, true);
|
||||
return true;
|
||||
}
|
||||
@ -6816,7 +6705,7 @@ TraceRecorder::record_JSOP_CALLNAME()
|
||||
if (!activeCallOrGlobalSlot(obj, vp))
|
||||
return false;
|
||||
stack(0, get(vp));
|
||||
stack(1, globalObj_ins);
|
||||
stack(1, INS_CONSTPTR(globalObj));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -7108,15 +6997,12 @@ JS_REQUIRES_STACK bool
|
||||
TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32& slot, LIns*& v_ins)
|
||||
{
|
||||
/*
|
||||
* If the shape of the object matches the global object's shape, we
|
||||
* have to guard against aliasing to avoid aliasing stale homes of stacked
|
||||
* global variables.
|
||||
* Can't specialize to assert obj != global, must guard to avoid aliasing
|
||||
* stale homes of stacked global variables.
|
||||
*/
|
||||
if (OBJ_SHAPE(obj) == OBJ_SHAPE(globalObj)) {
|
||||
if (obj == globalObj)
|
||||
ABORT_TRACE("prop op aliases global");
|
||||
guard(false, lir->ins2(LIR_eq, obj_ins, globalObj_ins), MISMATCH_EXIT);
|
||||
}
|
||||
if (obj == globalObj)
|
||||
ABORT_TRACE("prop op aliases global");
|
||||
guard(false, lir->ins2(LIR_eq, obj_ins, INS_CONSTPTR(globalObj)), MISMATCH_EXIT);
|
||||
|
||||
/*
|
||||
* Property cache ensures that we are dealing with an existing property,
|
||||
|
@ -162,36 +162,22 @@ extern bool js_verboseDebug;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The oracle keeps track of hit counts for program counter locations, as
|
||||
* well as slots that should not be demoted to int because we know them to
|
||||
* overflow or they result in type-unstable traces. We are using simple
|
||||
* hash tables. Collisions lead to loss of optimization (demotable slots
|
||||
* are not demoted, etc.) but have no correctness implications.
|
||||
* The oracle keeps track of slots that should not be demoted to int because we know them
|
||||
* to overflow or they result in type-unstable traces. We are using a simple hash table.
|
||||
* Collisions lead to loss of optimization (demotable slots are not demoted) but have no
|
||||
* correctness implications.
|
||||
*/
|
||||
#define ORACLE_SIZE 4096
|
||||
|
||||
class Oracle {
|
||||
uint32_t hits[ORACLE_SIZE];
|
||||
uint32_t blacklistLevels[ORACLE_SIZE];
|
||||
avmplus::BitSet _stackDontDemote;
|
||||
avmplus::BitSet _globalDontDemote;
|
||||
public:
|
||||
Oracle();
|
||||
int32_t hit(const void* ip);
|
||||
int32_t getHits(const void* ip);
|
||||
void resetHits(const void* ip);
|
||||
void blacklist(const void* ip);
|
||||
|
||||
JS_REQUIRES_STACK void markGlobalSlotUndemotable(JSContext* cx, unsigned slot);
|
||||
JS_REQUIRES_STACK bool isGlobalSlotUndemotable(JSContext* cx, unsigned slot) const;
|
||||
JS_REQUIRES_STACK void markStackSlotUndemotable(JSContext* cx, unsigned slot);
|
||||
JS_REQUIRES_STACK bool isStackSlotUndemotable(JSContext* cx, unsigned slot) const;
|
||||
void clearHitCounts();
|
||||
void clearDemotability();
|
||||
void clear() {
|
||||
clearDemotability();
|
||||
clearHitCounts();
|
||||
}
|
||||
void clear();
|
||||
};
|
||||
|
||||
typedef Queue<uint16> SlotList;
|
||||
@ -256,7 +242,6 @@ struct InterpState
|
||||
VMSideExit* lastTreeCallGuard; /* guard we want to grow from if the tree
|
||||
call exit guard mismatched */
|
||||
void* rpAtLastTreeCall; /* value of rp at innermost tree call guard */
|
||||
JSObject* globalObj; /* pointer to the global object */
|
||||
};
|
||||
|
||||
struct UnstableExit
|
||||
@ -341,7 +326,6 @@ class TraceRecorder : public avmplus::GCObject {
|
||||
nanojit::LIns* gp_ins;
|
||||
nanojit::LIns* eos_ins;
|
||||
nanojit::LIns* eor_ins;
|
||||
nanojit::LIns* globalObj_ins;
|
||||
nanojit::LIns* rval_ins;
|
||||
nanojit::LIns* inner_sp_ins;
|
||||
bool deepAborted;
|
||||
|
@ -163,7 +163,7 @@ AutoPushJSContext::AutoPushJSContext(nsISupports* aSecuritySupports,
|
||||
// See if there are any scripts on the stack.
|
||||
// If not, we need to add a dummy frame with a principal.
|
||||
JSStackFrame* tempFP = JS_GetScriptedCaller(cx, NULL);
|
||||
JS_ASSERT_NOT_ON_TRACE(cx);
|
||||
JS_ASSERT_NOT_EXECUTING_TRACE(cx);
|
||||
|
||||
if (!tempFP)
|
||||
{
|
||||
|
@ -1030,7 +1030,7 @@ namespace nanojit
|
||||
default:
|
||||
NanoAssertMsgf(false, "unsupported LIR instruction: %d (~0x40: %d)", op, op&~LIR64);
|
||||
break;
|
||||
|
||||
|
||||
case LIR_live: {
|
||||
countlir_live();
|
||||
pending_lives.add(ins->oprnd1());
|
||||
@ -1329,9 +1329,7 @@ namespace nanojit
|
||||
verbose_only( if (_verbose) { outputAddr=true; asm_output("[%s]", _thisfrag->lirbuf->names->formatRef(ins)); } )
|
||||
break;
|
||||
}
|
||||
case LIR_xbarrier: {
|
||||
break;
|
||||
}
|
||||
|
||||
case LIR_xt:
|
||||
case LIR_xf:
|
||||
{
|
||||
|
@ -1882,7 +1882,6 @@ namespace nanojit
|
||||
case LIR_x:
|
||||
case LIR_xt:
|
||||
case LIR_xf:
|
||||
case LIR_xbarrier:
|
||||
formatGuard(i, s);
|
||||
break;
|
||||
|
||||
|
@ -140,7 +140,7 @@ namespace nanojit
|
||||
};
|
||||
|
||||
inline bool isGuard(LOpcode op) {
|
||||
return op == LIR_x || op == LIR_xf || op == LIR_xt || op == LIR_loop || op == LIR_xbarrier;
|
||||
return op==LIR_x || op==LIR_xf || op==LIR_xt || op==LIR_loop;
|
||||
}
|
||||
|
||||
inline bool isCall(LOpcode op) {
|
||||
|
@ -176,7 +176,7 @@ OPDEF(uge, 63, 2) // 0x3F 0011 1111
|
||||
OPDEF64(2, 0, 2) // wraps a pair of refs
|
||||
OPDEF64(file, 1, 2)
|
||||
OPDEF64(line, 2, 2)
|
||||
OPDEF64(xbarrier, 3, 1) // memory barrier (dummy guard)
|
||||
OPDEF64(unused3_64, 3, 2)
|
||||
|
||||
OPDEF64(unused4_64, 4, 2)
|
||||
OPDEF64(unused5_64, 5, 2)
|
||||
|
@ -66,13 +66,6 @@ const Register Assembler::argRegs[] = { R0, R1, R2, R3 };
|
||||
const Register Assembler::retRegs[] = { R0, R1 };
|
||||
const Register Assembler::savedRegs[] = { R4, R5, R6, R7, R8, R9, R10 };
|
||||
|
||||
const char *ccName(ConditionCode cc)
|
||||
{
|
||||
const char *ccNames[] = { "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
|
||||
"hi", "ls", "ge", "lt", "gt", "le", "al", "nv" };
|
||||
return ccNames[(int)cc];
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::nInit(AvmCore*)
|
||||
{
|
||||
@ -1046,6 +1039,63 @@ Assembler::asm_fcmp(LInsp ins)
|
||||
Register ra = findRegFor(lhs, FpRegs);
|
||||
Register rb = findRegFor(rhs, FpRegs);
|
||||
|
||||
// We can't uniquely identify fge/fle via a single bit
|
||||
// pattern (since equality and lt/gt are separate bits);
|
||||
// so convert to the single-bit variant.
|
||||
if (op == LIR_fge) {
|
||||
Register temp = ra;
|
||||
ra = rb;
|
||||
rb = temp;
|
||||
op = LIR_flt;
|
||||
} else if (op == LIR_fle) {
|
||||
Register temp = ra;
|
||||
ra = rb;
|
||||
rb = temp;
|
||||
op = LIR_fgt;
|
||||
}
|
||||
|
||||
// There is no way to test for an unordered result using
|
||||
// the conditional form of an instruction; the encoding (C=1 V=1)
|
||||
// ends up having overlaps with a few other tests. So, test for
|
||||
// the explicit mask.
|
||||
uint8_t mask = 0x0;
|
||||
|
||||
// NZCV
|
||||
// for a valid ordered result, V is always 0 from VFP
|
||||
if (op == LIR_feq)
|
||||
// ZC // cond EQ (both equal and "not less than"
|
||||
mask = 0x6;
|
||||
else if (op == LIR_flt)
|
||||
// N // cond MI
|
||||
mask = 0x8;
|
||||
else if (op == LIR_fgt)
|
||||
// C // cond CS
|
||||
mask = 0x2;
|
||||
else
|
||||
NanoAssert(0);
|
||||
/*
|
||||
// these were converted into gt and lt above.
|
||||
if (op == LIR_fle)
|
||||
// NZ // cond LE
|
||||
mask = 0xC;
|
||||
else if (op == LIR_fge)
|
||||
// ZC // cond fail?
|
||||
mask = 0x6;
|
||||
*/
|
||||
|
||||
// TODO XXX could do this as fcmpd; fmstat; tstvs rX, #0 the tstvs
|
||||
// would reset the status bits if V (NaN flag) is set, but that
|
||||
// doesn't work for NE. For NE could teqvs rX, #1. rX needs to
|
||||
// be any register that has lsb == 0, such as sp/fp/pc.
|
||||
|
||||
// Test explicily with the full mask; if V is set, test will fail.
|
||||
// Assumption is that this will be followed up by a BEQ/BNE
|
||||
CMPi(Scratch, mask);
|
||||
// grab just the condition fields
|
||||
SHRi(Scratch, 28);
|
||||
MRS(Scratch);
|
||||
|
||||
// do the comparison and get results loaded in ARM status register
|
||||
FMSTAT();
|
||||
FCMPD(ra, rb);
|
||||
}
|
||||
@ -1070,28 +1120,10 @@ Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ, bool isfar)
|
||||
|
||||
if (condop >= LIR_feq && condop <= LIR_fge)
|
||||
{
|
||||
ConditionCode cc = NV;
|
||||
|
||||
if (branchOnFalse) {
|
||||
switch (condop) {
|
||||
case LIR_feq: cc = NE; break;
|
||||
case LIR_flt: cc = PL; break;
|
||||
case LIR_fgt: cc = LE; break;
|
||||
case LIR_fle: cc = HI; break;
|
||||
case LIR_fge: cc = LT; break;
|
||||
}
|
||||
} else {
|
||||
switch (condop) {
|
||||
case LIR_feq: cc = EQ; break;
|
||||
case LIR_flt: cc = MI; break;
|
||||
case LIR_fgt: cc = GT; break;
|
||||
case LIR_fle: cc = LS; break;
|
||||
case LIR_fge: cc = GE; break;
|
||||
}
|
||||
}
|
||||
|
||||
B_cond(cc, targ);
|
||||
asm_output("b(%d) 0x%08x", cc, (unsigned int) targ);
|
||||
if (branchOnFalse)
|
||||
JNE(targ);
|
||||
else
|
||||
JE(targ);
|
||||
|
||||
NIns *at = _nIns;
|
||||
asm_fcmp(cond);
|
||||
@ -1208,14 +1240,7 @@ Assembler::asm_fcond(LInsp ins)
|
||||
// only want certain regs
|
||||
Register r = prepResultReg(ins, AllowableFlagRegs);
|
||||
|
||||
switch (ins->opcode()) {
|
||||
case LIR_feq: SET(r,EQ,NE); break;
|
||||
case LIR_flt: SET(r,MI,PL); break;
|
||||
case LIR_fgt: SET(r,GT,LE); break;
|
||||
case LIR_fle: SET(r,LS,HI); break;
|
||||
case LIR_fge: SET(r,GE,LT); break;
|
||||
}
|
||||
|
||||
SETE(r);
|
||||
asm_fcmp(ins);
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,6 @@ typedef enum {
|
||||
NV = 0xF // NeVer
|
||||
} ConditionCode;
|
||||
|
||||
const char *ccName(ConditionCode cc);
|
||||
|
||||
typedef int RegisterMask;
|
||||
typedef struct _FragInfo {
|
||||
@ -693,26 +692,23 @@ typedef enum {
|
||||
|
||||
// MOV(EQ) _r, #1
|
||||
// EOR(NE) _r, _r
|
||||
#define SET(_r,_cond,_opp) do { \
|
||||
#define SET(_r,_cond,_opp) \
|
||||
underrunProtect(8); \
|
||||
*(--_nIns) = (NIns)( (_opp<<28) | (1<<21) | ((_r)<<16) | ((_r)<<12) | (_r) ); \
|
||||
*(--_nIns) = (NIns)( (_cond<<28) | (0x3A<<20) | ((_r)<<12) | (1) ); \
|
||||
asm_output("mov%s %s, #1", ccName(_cond), gpn(r), gpn(r)); \
|
||||
asm_output("eor%s %s, %s", ccName(_opp), gpn(r), gpn(r)); \
|
||||
} while (0)
|
||||
*(--_nIns) = (NIns)( (_cond<<28) | (0x3A<<20) | ((_r)<<12) | (1) );
|
||||
|
||||
|
||||
#define SETE(r) SET(r,EQ,NE)
|
||||
#define SETL(r) SET(r,LT,GE)
|
||||
#define SETLE(r) SET(r,LE,GT)
|
||||
#define SETG(r) SET(r,GT,LE)
|
||||
#define SETGE(r) SET(r,GE,LT)
|
||||
#define SETB(r) SET(r,CC,CS)
|
||||
#define SETBE(r) SET(r,LS,HI)
|
||||
#define SETAE(r) SET(r,CS,CC)
|
||||
#define SETA(r) SET(r,HI,LS)
|
||||
#define SETO(r) SET(r,VS,LS)
|
||||
#define SETC(r) SET(r,CS,LS)
|
||||
#define SETE(r) do {SET(r,EQ,NE); asm_output("sete %s",gpn(r)); } while(0)
|
||||
#define SETL(r) do {SET(r,LT,GE); asm_output("setl %s",gpn(r)); } while(0)
|
||||
#define SETLE(r) do {SET(r,LE,GT); asm_output("setle %s",gpn(r)); } while(0)
|
||||
#define SETG(r) do {SET(r,GT,LE); asm_output("setg %s",gpn(r)); } while(0)
|
||||
#define SETGE(r) do {SET(r,GE,LT); asm_output("setge %s",gpn(r)); } while(0)
|
||||
#define SETB(r) do {SET(r,CC,CS); asm_output("setb %s",gpn(r)); } while(0)
|
||||
#define SETBE(r) do {SET(r,LS,HI); asm_output("setb %s",gpn(r)); } while(0)
|
||||
#define SETAE(r) do {SET(r,CS,CC); asm_output("setae %s",gpn(r)); } while(0)
|
||||
#define SETA(r) do {SET(r,HI,LS); asm_output("seta %s",gpn(r)); } while(0)
|
||||
#define SETO(r) do {SET(r,VS,LS); asm_output("seto %s",gpn(r)); } while(0)
|
||||
#define SETC(r) do {SET(r,CS,LS); asm_output("setc %s",gpn(r)); } while(0)
|
||||
|
||||
// This zero-extends a reg that has been set using one of the SET macros,
|
||||
// but is a NOOP on ARM/Thumb
|
||||
|
@ -4025,58 +4025,6 @@ test(testLetWithUnstableGlobal);
|
||||
delete b;
|
||||
delete q;
|
||||
|
||||
for each (testBug474769_b in [1, 1, 1, 1.5, 1, 1]) {
|
||||
(function() { for each (let testBug474769_h in [0, 0, 1.4, ""]) {} })()
|
||||
}
|
||||
function testBug474769() {
|
||||
return testBug474769_b;
|
||||
}
|
||||
testBug474769.expected = 1;
|
||||
test(testBug474769);
|
||||
|
||||
undeclaredGlobal = -1;
|
||||
function testGlobalAliasCheck() {
|
||||
var q;
|
||||
for (var i = 0; i < 10; ++i) {
|
||||
undeclaredGlobal = i;
|
||||
q = this.undeclaredGlobal;
|
||||
}
|
||||
return q;
|
||||
}
|
||||
testGlobalAliasCheck.expected = 9;
|
||||
test(testGlobalAliasCheck);
|
||||
delete undeclaredGlobal;
|
||||
|
||||
function testInterpreterReentry() {
|
||||
this.__defineSetter__('x', function(){})
|
||||
for (var j = 0; j < 5; ++j) { x = 3; }
|
||||
return 1;
|
||||
}
|
||||
testInterpreterReentry.expected = 1;
|
||||
test(testInterpreterReentry);
|
||||
|
||||
function testInterpreterReentry2() {
|
||||
var a = false;
|
||||
var b = {};
|
||||
var c = false;
|
||||
var d = {};
|
||||
this.__defineGetter__('e', function(){});
|
||||
for (let f in this) print(f);
|
||||
[1 for each (g in this) for each (h in [])]
|
||||
return 1;
|
||||
}
|
||||
testInterpreterReentry2.expected = 1;
|
||||
test(testInterpreterReentry2);
|
||||
|
||||
function testInterpreterReentry3() {
|
||||
for (let i=0;i<5;++i) this["y" + i] = function(){};
|
||||
this.__defineGetter__('e', function (x2) { yield; });
|
||||
[1 for each (a in this) for (b in {})];
|
||||
return 1;
|
||||
}
|
||||
testInterpreterReentry3.expected = 1;
|
||||
test(testInterpreterReentry3);
|
||||
|
||||
/*****************************************************************************
|
||||
* *
|
||||
* _____ _ _ _____ ______ _____ _______ *
|
||||
|
Loading…
Reference in New Issue
Block a user