mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 846111 - Part 3: Dispatch style ICs in Ion and ParallelGetPropertyIC. (r=nbp)
This commit is contained in:
parent
1ac435419e
commit
fcb34b03b7
@ -42,8 +42,8 @@ class OutOfLineUpdateCache :
|
||||
{
|
||||
private:
|
||||
LInstruction *lir_;
|
||||
RepatchLabel repatchEntry_;
|
||||
size_t cacheIndex_;
|
||||
AddCacheState state_;
|
||||
|
||||
public:
|
||||
OutOfLineUpdateCache(LInstruction *lir, size_t cacheIndex)
|
||||
@ -52,7 +52,8 @@ class OutOfLineUpdateCache :
|
||||
{ }
|
||||
|
||||
void bind(MacroAssembler *masm) {
|
||||
masm->bind(&repatchEntry_);
|
||||
// The binding of the initial jump is done in
|
||||
// CodeGenerator::visitOutOfLineCache.
|
||||
}
|
||||
|
||||
size_t getCacheIndex() const {
|
||||
@ -61,8 +62,8 @@ class OutOfLineUpdateCache :
|
||||
LInstruction *lir() const {
|
||||
return lir_;
|
||||
}
|
||||
RepatchLabel *repatchEntry() {
|
||||
return &repatchEntry_;
|
||||
AddCacheState &state() {
|
||||
return state_;
|
||||
}
|
||||
|
||||
bool accept(CodeGenerator *codegen) {
|
||||
@ -98,11 +99,12 @@ CodeGeneratorShared::addCache(LInstruction *lir, size_t cacheIndex)
|
||||
if (!addOutOfLineCode(ool))
|
||||
return false;
|
||||
|
||||
CodeOffsetJump jump = masm.jumpWithPatch(ool->repatchEntry());
|
||||
CodeOffsetLabel label = masm.labelForPatch();
|
||||
// OOL-specific state depends on the type of cache.
|
||||
cache->initializeAddCacheState(lir, &ool->state());
|
||||
|
||||
cache->emitInitialJump(masm, ool->state());
|
||||
masm.bind(ool->rejoin());
|
||||
|
||||
cache->setInlineJump(jump, label);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -114,6 +116,7 @@ CodeGenerator::visitOutOfLineCache(OutOfLineUpdateCache *ool)
|
||||
|
||||
// Register the location of the OOL path in the IC.
|
||||
cache->setFallbackLabel(masm.labelForPatch());
|
||||
cache->bindInitialJump(masm, ool->state());
|
||||
|
||||
// Dispatch to ICs' accept functions.
|
||||
return cache->accept(this, ool);
|
||||
@ -776,6 +779,7 @@ CodeGenerator::visitCallee(LCallee *lir)
|
||||
Address ptr(StackPointer, frameSize() + IonJSFrameLayout::offsetOfCalleeToken());
|
||||
|
||||
masm.loadPtr(ptr, callee);
|
||||
masm.clearCalleeTag(callee, gen->info().executionMode());
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1293,7 +1297,10 @@ CodeGenerator::visitCallGeneric(LCallGeneric *call)
|
||||
// Construct the IonFramePrefix.
|
||||
uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), IonFrame_OptimizedJS);
|
||||
masm.Push(Imm32(call->numActualArgs()));
|
||||
masm.tagCallee(calleereg, executionMode);
|
||||
masm.Push(calleereg);
|
||||
// Clear the tag after pushing it, as we load nargs below.
|
||||
masm.clearCalleeTag(calleereg, executionMode);
|
||||
masm.Push(Imm32(descriptor));
|
||||
|
||||
// Check whether the provided arguments satisfy target argc.
|
||||
@ -1431,8 +1438,11 @@ CodeGenerator::visitCallKnown(LCallKnown *call)
|
||||
|
||||
// Construct the IonFramePrefix.
|
||||
uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), IonFrame_OptimizedJS);
|
||||
masm.tagCallee(calleereg, executionMode);
|
||||
masm.Push(Imm32(call->numActualArgs()));
|
||||
masm.Push(calleereg);
|
||||
// Clear the tag after pushing it.
|
||||
masm.clearCalleeTag(calleereg, executionMode);
|
||||
masm.Push(Imm32(descriptor));
|
||||
|
||||
// Finally call the function in objreg.
|
||||
@ -4528,7 +4538,7 @@ CodeGenerator::link()
|
||||
safepointIndices_.length(), osiIndices_.length(),
|
||||
cacheList_.length(), runtimeData_.length(),
|
||||
safepoints_.size(), graph.mir().numScripts(),
|
||||
executionMode == ParallelExecution ? ForkJoinSlices(cx) : 0);
|
||||
executionMode == ParallelExecution ? ForkJoinSlices(cx) : 0);
|
||||
|
||||
ionScript->setMethod(code);
|
||||
|
||||
@ -4809,6 +4819,25 @@ CodeGenerator::visitNameIC(OutOfLineUpdateCache *ool, NameIC *ic)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::addGetPropertyCache(LInstruction *ins, RegisterSet liveRegs, Register objReg,
|
||||
PropertyName *name, TypedOrValueRegister output,
|
||||
bool allowGetters)
|
||||
{
|
||||
switch (gen->info().executionMode()) {
|
||||
case SequentialExecution: {
|
||||
GetPropertyIC cache(liveRegs, objReg, name, output, allowGetters);
|
||||
return addCache(ins, allocateCache(cache));
|
||||
}
|
||||
case ParallelExecution: {
|
||||
ParallelGetPropertyIC cache(objReg, name, output);
|
||||
return addCache(ins, allocateCache(cache));
|
||||
}
|
||||
default:
|
||||
JS_NOT_REACHED("Bad execution mode");
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitGetPropertyCacheV(LGetPropertyCacheV *ins)
|
||||
{
|
||||
@ -4818,8 +4847,7 @@ CodeGenerator::visitGetPropertyCacheV(LGetPropertyCacheV *ins)
|
||||
bool allowGetters = ins->mir()->allowGetters();
|
||||
TypedOrValueRegister output = TypedOrValueRegister(GetValueOutput(ins));
|
||||
|
||||
GetPropertyIC cache(liveRegs, objReg, name, output, allowGetters);
|
||||
return addCache(ins, allocateCache(cache));
|
||||
return addGetPropertyCache(ins, liveRegs, objReg, name, output, allowGetters);
|
||||
}
|
||||
|
||||
bool
|
||||
@ -4831,8 +4859,7 @@ CodeGenerator::visitGetPropertyCacheT(LGetPropertyCacheT *ins)
|
||||
bool allowGetters = ins->mir()->allowGetters();
|
||||
TypedOrValueRegister output(ins->mir()->type(), ToAnyRegister(ins->getDef(0)));
|
||||
|
||||
GetPropertyIC cache(liveRegs, objReg, name, output, allowGetters);
|
||||
return addCache(ins, allocateCache(cache));
|
||||
return addGetPropertyCache(ins, liveRegs, objReg, name, output, allowGetters);
|
||||
}
|
||||
|
||||
typedef bool (*GetPropertyICFn)(JSContext *, size_t, HandleObject, MutableHandleValue);
|
||||
@ -4856,6 +4883,28 @@ CodeGenerator::visitGetPropertyIC(OutOfLineUpdateCache *ool, GetPropertyIC *ic)
|
||||
return true;
|
||||
}
|
||||
|
||||
typedef ParallelResult (*ParallelGetPropertyICFn)(ForkJoinSlice *, size_t, HandleObject,
|
||||
MutableHandleValue);
|
||||
const VMFunction ParallelGetPropertyIC::UpdateInfo =
|
||||
FunctionInfo<ParallelGetPropertyICFn>(ParallelGetPropertyIC::update);
|
||||
|
||||
bool
|
||||
CodeGenerator::visitParallelGetPropertyIC(OutOfLineUpdateCache *ool, ParallelGetPropertyIC *ic)
|
||||
{
|
||||
LInstruction *lir = ool->lir();
|
||||
saveLive(lir);
|
||||
|
||||
pushArg(ic->object());
|
||||
pushArg(Imm32(ool->getCacheIndex()));
|
||||
if (!callVM(ParallelGetPropertyIC::UpdateInfo, lir))
|
||||
return false;
|
||||
StoreValueTo(ic->output()).generate(this);
|
||||
restoreLiveIgnore(lir, StoreValueTo(ic->output()).clobbered());
|
||||
|
||||
masm.jump(ool->rejoin());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitGetElementCacheV(LGetElementCacheV *ins)
|
||||
{
|
||||
@ -5994,4 +6043,3 @@ CodeGenerator::visitAsmJSCheckOverRecursed(LAsmJSCheckOverRecursed *lir)
|
||||
|
||||
} // namespace ion
|
||||
} // namespace js
|
||||
|
||||
|
@ -251,6 +251,7 @@ class CodeGenerator : public CodeGeneratorSpecific
|
||||
bool visitCallsiteCloneCache(LCallsiteCloneCache *ins);
|
||||
|
||||
bool visitGetPropertyIC(OutOfLineUpdateCache *ool, GetPropertyIC *ic);
|
||||
bool visitParallelGetPropertyIC(OutOfLineUpdateCache *ool, ParallelGetPropertyIC *ic);
|
||||
bool visitSetPropertyIC(OutOfLineUpdateCache *ool, SetPropertyIC *ic);
|
||||
bool visitGetElementIC(OutOfLineUpdateCache *ool, GetElementIC *ic);
|
||||
bool visitBindNameIC(OutOfLineUpdateCache *ool, BindNameIC *ic);
|
||||
@ -258,6 +259,10 @@ class CodeGenerator : public CodeGeneratorSpecific
|
||||
bool visitCallsiteCloneIC(OutOfLineUpdateCache *ool, CallsiteCloneIC *ic);
|
||||
|
||||
private:
|
||||
bool addGetPropertyCache(LInstruction *ins, RegisterSet liveRegs, Register objReg,
|
||||
PropertyName *name, TypedOrValueRegister output,
|
||||
bool allowGetters);
|
||||
|
||||
bool checkForParallelBailout();
|
||||
bool generateBranchV(const ValueOperand &value, Label *ifTrue, Label *ifFalse, FloatRegister fr);
|
||||
|
||||
|
@ -558,6 +558,7 @@ IonScript::IonScript()
|
||||
scriptList_(0),
|
||||
scriptEntries_(0),
|
||||
parallelInvalidatedScriptList_(0),
|
||||
parallelInvalidatedScriptEntries_(0),
|
||||
refcount_(0),
|
||||
recompileInfo_(),
|
||||
slowCallCount(0)
|
||||
@ -651,7 +652,7 @@ IonScript::New(JSContext *cx, uint32_t frameSlots, uint32_t frameSize, size_t sn
|
||||
|
||||
script->parallelInvalidatedScriptList_ = offsetCursor;
|
||||
script->parallelInvalidatedScriptEntries_ = parallelInvalidatedScriptEntries;
|
||||
offsetCursor += parallelInvalidatedScriptEntries;
|
||||
offsetCursor += paddedParallelInvalidatedScriptSize;
|
||||
|
||||
script->frameSlots_ = frameSlots;
|
||||
script->frameSize_ = frameSize;
|
||||
@ -839,6 +840,7 @@ IonScript::Trace(JSTracer *trc, IonScript *script)
|
||||
void
|
||||
IonScript::Destroy(FreeOp *fop, IonScript *script)
|
||||
{
|
||||
script->destroyCaches();
|
||||
fop->free_(script);
|
||||
}
|
||||
|
||||
@ -865,6 +867,13 @@ IonScript::purgeCaches(Zone *zone)
|
||||
getCache(i).reset();
|
||||
}
|
||||
|
||||
void
|
||||
IonScript::destroyCaches()
|
||||
{
|
||||
for (size_t i = 0; i < numCaches(); i++)
|
||||
getCache(i).destroy();
|
||||
}
|
||||
|
||||
void
|
||||
ion::ToggleBarriers(JS::Zone *zone, bool needs)
|
||||
{
|
||||
|
@ -246,44 +246,130 @@ class IonCache::StubAttacher
|
||||
|
||||
const ImmWord IonCache::StubAttacher::STUB_ADDR = ImmWord(uintptr_t(0xdeadc0de));
|
||||
|
||||
// Repatch-style stubs are daisy chained in such a fashion that when
|
||||
// generating a new stub, the previous stub's nextStub jump is patched to the
|
||||
// entry of our new stub.
|
||||
class RepatchStubAppender : public IonCache::StubAttacher
|
||||
class RepatchIonCache::RepatchStubAppender : public IonCache::StubAttacher
|
||||
{
|
||||
CodeLocationLabel nextStubLabel_;
|
||||
CodeLocationJump *lastJump_;
|
||||
RepatchIonCache &cache_;
|
||||
|
||||
public:
|
||||
RepatchStubAppender(CodeLocationLabel rejoinLabel, CodeLocationLabel nextStubLabel,
|
||||
CodeLocationJump *lastJump)
|
||||
: StubAttacher(rejoinLabel),
|
||||
nextStubLabel_(nextStubLabel),
|
||||
lastJump_(lastJump)
|
||||
RepatchStubAppender(RepatchIonCache &cache)
|
||||
: StubAttacher(cache.rejoinLabel()),
|
||||
cache_(cache)
|
||||
{
|
||||
JS_ASSERT(lastJump);
|
||||
}
|
||||
|
||||
void patchNextStubJump(MacroAssembler &masm, IonCode *code) {
|
||||
// Patch the previous nextStubJump of the last stub, or the jump from the
|
||||
// codeGen, to jump into the newly allocated code.
|
||||
PatchJump(*lastJump_, CodeLocationLabel(code));
|
||||
PatchJump(cache_.lastJump_, CodeLocationLabel(code));
|
||||
|
||||
// If this path is not taken, we are producing an entry which can no
|
||||
// longer go back into the update function.
|
||||
if (hasNextStubOffset_) {
|
||||
nextStubOffset_.fixup(&masm);
|
||||
CodeLocationJump nextStubJump(code, nextStubOffset_);
|
||||
PatchJump(nextStubJump, nextStubLabel_);
|
||||
PatchJump(nextStubJump, cache_.fallbackLabel_);
|
||||
|
||||
// When the last stub fails, it fallback to the ool call which can
|
||||
// produce a stub. Next time we generate a stub, we will patch the
|
||||
// nextStub jump to try the new stub.
|
||||
*lastJump_ = nextStubJump;
|
||||
cache_.lastJump_ = nextStubJump;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void
|
||||
RepatchIonCache::reset()
|
||||
{
|
||||
IonCache::reset();
|
||||
PatchJump(initialJump_, fallbackLabel_);
|
||||
lastJump_ = initialJump_;
|
||||
}
|
||||
|
||||
void
|
||||
RepatchIonCache::emitInitialJump(MacroAssembler &masm, AddCacheState &addState)
|
||||
{
|
||||
initialJump_ = masm.jumpWithPatch(&addState.repatchEntry);
|
||||
lastJump_ = initialJump_;
|
||||
}
|
||||
|
||||
void
|
||||
RepatchIonCache::bindInitialJump(MacroAssembler &masm, AddCacheState &addState)
|
||||
{
|
||||
masm.bind(&addState.repatchEntry);
|
||||
}
|
||||
|
||||
void
|
||||
RepatchIonCache::updateBaseAddress(IonCode *code, MacroAssembler &masm)
|
||||
{
|
||||
IonCache::updateBaseAddress(code, masm);
|
||||
initialJump_.repoint(code, &masm);
|
||||
lastJump_.repoint(code, &masm);
|
||||
}
|
||||
|
||||
class DispatchIonCache::DispatchStubPrepender : public IonCache::StubAttacher
|
||||
{
|
||||
DispatchIonCache &cache_;
|
||||
|
||||
public:
|
||||
DispatchStubPrepender(DispatchIonCache &cache)
|
||||
: StubAttacher(cache.rejoinLabel_),
|
||||
cache_(cache)
|
||||
{
|
||||
}
|
||||
|
||||
void patchNextStubJump(MacroAssembler &masm, IonCode *code) {
|
||||
JS_ASSERT(hasNextStubOffset_);
|
||||
|
||||
// Jump to the previous entry in the stub dispatch table. We
|
||||
// have not yet executed the code we're patching the jump in.
|
||||
nextStubOffset_.fixup(&masm);
|
||||
CodeLocationJump nextStubJump(code, nextStubOffset_);
|
||||
PatchJump(nextStubJump, CodeLocationLabel(cache_.firstStub_));
|
||||
|
||||
// Update the dispatch table. Modification to jumps after the dispatch
|
||||
// table is updated is disallowed, lest we race on entry into an
|
||||
// unfinalized stub.
|
||||
cache_.firstStub_ = code->raw();
|
||||
}
|
||||
};
|
||||
|
||||
void
|
||||
DispatchIonCache::reset()
|
||||
{
|
||||
IonCache::reset();
|
||||
firstStub_ = fallbackLabel_.raw();
|
||||
}
|
||||
void
|
||||
DispatchIonCache::emitInitialJump(MacroAssembler &masm, AddCacheState &addState)
|
||||
{
|
||||
Register scratch = addState.dispatchScratch;
|
||||
dispatchLabel_ = masm.movWithPatch(ImmWord(uintptr_t(-1)), scratch);
|
||||
masm.loadPtr(Address(scratch, 0), scratch);
|
||||
masm.jump(scratch);
|
||||
rejoinLabel_ = masm.labelForPatch();
|
||||
}
|
||||
|
||||
void
|
||||
DispatchIonCache::bindInitialJump(MacroAssembler &masm, AddCacheState &addState)
|
||||
{
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
void
|
||||
DispatchIonCache::updateBaseAddress(IonCode *code, MacroAssembler &masm)
|
||||
{
|
||||
// The address of firstStub_ should be pointer aligned.
|
||||
JS_ASSERT(uintptr_t(&firstStub_) % sizeof(uintptr_t) == 0);
|
||||
|
||||
IonCache::updateBaseAddress(code, masm);
|
||||
dispatchLabel_.fixup(&masm);
|
||||
Assembler::patchDataWithValueCheck(CodeLocationLabel(code, dispatchLabel_),
|
||||
ImmWord(uintptr_t(&firstStub_)),
|
||||
ImmWord(uintptr_t(-1)));
|
||||
firstStub_ = fallbackLabel_.raw();
|
||||
rejoinLabel_.repoint(code, &masm);
|
||||
}
|
||||
|
||||
void
|
||||
IonCache::attachStub(MacroAssembler &masm, StubAttacher &attacher, IonCode *code)
|
||||
{
|
||||
@ -318,6 +404,17 @@ IonCache::linkAndAttachStub(JSContext *cx, MacroAssembler &masm, StubAttacher &a
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
IonCache::updateBaseAddress(IonCode *code, MacroAssembler &masm)
|
||||
{
|
||||
fallbackLabel_.repoint(code, &masm);
|
||||
}
|
||||
|
||||
void
|
||||
IonCache::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
|
||||
{
|
||||
}
|
||||
|
||||
static bool
|
||||
IsCacheableListBase(JSObject *obj)
|
||||
{
|
||||
@ -857,11 +954,9 @@ bool
|
||||
GetPropertyIC::attachReadSlot(JSContext *cx, IonScript *ion, JSObject *obj, JSObject *holder,
|
||||
HandleShape shape)
|
||||
{
|
||||
RepatchStubAppender attacher(*this);
|
||||
MacroAssembler masm(cx);
|
||||
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
GenerateReadSlot(cx, masm, attacher, obj, name(), holder, shape, object(), output());
|
||||
|
||||
const char *attachKind = "non idempotent reading";
|
||||
if (idempotent())
|
||||
attachKind = "idempotent reading";
|
||||
@ -882,7 +977,7 @@ GetPropertyIC::attachCallGetter(JSContext *cx, IonScript *ion, JSObject *obj,
|
||||
// properly constructed.
|
||||
masm.setFramePushed(ion->frameSize());
|
||||
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
if (!GenerateCallGetter(cx, masm, attacher, obj, name(), holder, shape, liveRegs_,
|
||||
object(), output(), returnAddr, pc))
|
||||
{
|
||||
@ -903,7 +998,7 @@ GetPropertyIC::attachArrayLength(JSContext *cx, IonScript *ion, JSObject *obj)
|
||||
|
||||
Label failures;
|
||||
MacroAssembler masm(cx);
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
|
||||
// Guard object is a dense array.
|
||||
RootedObject globalObj(cx, &script->global());
|
||||
@ -951,7 +1046,7 @@ GetPropertyIC::attachTypedArrayLength(JSContext *cx, IonScript *ion, JSObject *o
|
||||
|
||||
Label failures;
|
||||
MacroAssembler masm(cx);
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
|
||||
Register tmpReg;
|
||||
if (output().hasValue()) {
|
||||
@ -982,6 +1077,68 @@ GetPropertyIC::attachTypedArrayLength(JSContext *cx, IonScript *ion, JSObject *o
|
||||
return linkAndAttachStub(cx, masm, attacher, ion, "typed array length");
|
||||
}
|
||||
|
||||
static bool
|
||||
IsIdempotentAndMaybeHasHooks(IonCache &cache, JSObject *obj)
|
||||
{
|
||||
// If the cache is idempotent, watch out for resolve hooks or non-native
|
||||
// objects on the proto chain. We check this before calling lookupProperty,
|
||||
// to make sure no effectful lookup hooks or resolve hooks are called.
|
||||
return cache.idempotent() && !obj->hasIdempotentProtoChain();
|
||||
}
|
||||
|
||||
static bool
|
||||
DetermineGetPropKind(JSContext *cx, IonCache &cache,
|
||||
JSObject *checkObj, JSObject *holder, HandleShape shape,
|
||||
TypedOrValueRegister output, bool allowGetters,
|
||||
bool *readSlot, bool *callGetter)
|
||||
{
|
||||
// Check what kind of cache stub we can emit: either a slot read,
|
||||
// or a getter call.
|
||||
*readSlot = false;
|
||||
*callGetter = false;
|
||||
|
||||
RootedScript script(cx);
|
||||
jsbytecode *pc;
|
||||
cache.getScriptedLocation(&script, &pc);
|
||||
|
||||
if (IsCacheableGetPropReadSlot(checkObj, holder, shape) ||
|
||||
IsCacheableNoProperty(checkObj, holder, shape, pc, output))
|
||||
{
|
||||
// With Proxies, we cannot garantee any property access as the proxy can
|
||||
// mask any property from the prototype chain.
|
||||
JS_ASSERT(!checkObj->isProxy());
|
||||
*readSlot = true;
|
||||
} else if (IsCacheableGetPropCallNative(checkObj, holder, shape) ||
|
||||
IsCacheableGetPropCallPropertyOp(checkObj, holder, shape))
|
||||
{
|
||||
// Don't enable getter call if cache is idempotent, since
|
||||
// they can be effectful.
|
||||
if (!cache.idempotent() && allowGetters)
|
||||
*callGetter = true;
|
||||
}
|
||||
|
||||
// readSlot and callGetter are mutually exclusive
|
||||
JS_ASSERT_IF(*readSlot, !*callGetter);
|
||||
JS_ASSERT_IF(*callGetter, !*readSlot);
|
||||
|
||||
// Return true only if one strategy is viable.
|
||||
return *readSlot || *callGetter;
|
||||
}
|
||||
|
||||
static bool
|
||||
IsIdempotentAndHasSingletonHolder(IonCache &cache, HandleObject holder, HandleShape shape)
|
||||
{
|
||||
// TI infers the possible types of native object properties. There's one
|
||||
// edge case though: for singleton objects it does not add the initial
|
||||
// "undefined" type, see the propertySet comment in jsinfer.h. We can't
|
||||
// monitor the return type inside an idempotent cache though, so we don't
|
||||
// handle this case.
|
||||
return (cache.idempotent() &&
|
||||
holder &&
|
||||
holder->hasSingletonType() &&
|
||||
holder->getSlot(shape->slot()).isUndefined());
|
||||
}
|
||||
|
||||
static bool
|
||||
TryAttachNativeGetPropStub(JSContext *cx, IonScript *ion,
|
||||
GetPropertyIC &cache, HandleObject obj,
|
||||
@ -1009,10 +1166,7 @@ TryAttachNativeGetPropStub(JSContext *cx, IonScript *ion,
|
||||
if (!checkObj || !checkObj->isNative())
|
||||
return true;
|
||||
|
||||
// If the cache is idempotent, watch out for resolve hooks or non-native
|
||||
// objects on the proto chain. We check this before calling lookupProperty,
|
||||
// to make sure no effectful lookup hooks or resolve hooks are called.
|
||||
if (cache.idempotent() && !checkObj->hasIdempotentProtoChain())
|
||||
if (IsIdempotentAndMaybeHasHooks(cache, checkObj))
|
||||
return true;
|
||||
|
||||
RootedShape shape(cx);
|
||||
@ -1020,54 +1174,19 @@ TryAttachNativeGetPropStub(JSContext *cx, IonScript *ion,
|
||||
if (!JSObject::lookupProperty(cx, checkObj, name, &holder, &shape))
|
||||
return false;
|
||||
|
||||
// Check what kind of cache stub we can emit: either a slot read,
|
||||
// or a getter call.
|
||||
bool readSlot = false;
|
||||
bool callGetter = false;
|
||||
|
||||
RootedScript script(cx);
|
||||
jsbytecode *pc;
|
||||
cache.getScriptedLocation(&script, &pc);
|
||||
|
||||
if (IsCacheableGetPropReadSlot(checkObj, holder, shape) ||
|
||||
IsCacheableNoProperty(checkObj, holder, shape, pc, cache.output()))
|
||||
{
|
||||
// With Proxies, we cannot garantee any property access as the proxy can
|
||||
// mask any property from the prototype chain.
|
||||
JS_ASSERT(!checkObj->isProxy());
|
||||
readSlot = true;
|
||||
} else if (IsCacheableGetPropCallNative(checkObj, holder, shape) ||
|
||||
IsCacheableGetPropCallPropertyOp(checkObj, holder, shape))
|
||||
{
|
||||
// Don't enable getter call if cache is idempotent, since
|
||||
// they can be effectful.
|
||||
if (!cache.idempotent() && cache.allowGetters())
|
||||
callGetter = true;
|
||||
}
|
||||
|
||||
// Only continue if one of the cache methods is viable.
|
||||
if (!readSlot && !callGetter)
|
||||
return true;
|
||||
|
||||
// TI infers the possible types of native object properties. There's one
|
||||
// edge case though: for singleton objects it does not add the initial
|
||||
// "undefined" type, see the propertySet comment in jsinfer.h. We can't
|
||||
// monitor the return type inside an idempotent cache though, so we don't
|
||||
// handle this case.
|
||||
if (cache.idempotent() &&
|
||||
holder &&
|
||||
holder->hasSingletonType() &&
|
||||
holder->getSlot(shape->slot()).isUndefined())
|
||||
bool readSlot;
|
||||
bool callGetter;
|
||||
if (!DetermineGetPropKind(cx, cache, checkObj, holder, shape, cache.output(),
|
||||
cache.allowGetters(), &readSlot, &callGetter))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
if (IsIdempotentAndHasSingletonHolder(cache, holder, shape))
|
||||
return true;
|
||||
|
||||
*isCacheable = true;
|
||||
|
||||
// readSlot and callGetter are mutually exclusive
|
||||
JS_ASSERT_IF(readSlot, !callGetter);
|
||||
JS_ASSERT_IF(callGetter, !readSlot);
|
||||
|
||||
// Falback to the interpreter function.
|
||||
if (!cache.canAttachStub())
|
||||
return true;
|
||||
@ -1178,17 +1297,170 @@ GetPropertyIC::update(JSContext *cx, size_t cacheIndex,
|
||||
void
|
||||
GetPropertyIC::reset()
|
||||
{
|
||||
IonCache::reset();
|
||||
RepatchIonCache::reset();
|
||||
hasArrayLengthStub_ = false;
|
||||
hasTypedArrayLengthStub_ = false;
|
||||
}
|
||||
|
||||
void
|
||||
IonCache::updateBaseAddress(IonCode *code, MacroAssembler &masm)
|
||||
ParallelGetPropertyIC::reset()
|
||||
{
|
||||
initialJump_.repoint(code, &masm);
|
||||
lastJump_.repoint(code, &masm);
|
||||
fallbackLabel_.repoint(code, &masm);
|
||||
DispatchIonCache::reset();
|
||||
if (stubbedObjects_)
|
||||
stubbedObjects_->clear();
|
||||
}
|
||||
|
||||
void
|
||||
ParallelGetPropertyIC::destroy()
|
||||
{
|
||||
if (stubbedObjects_)
|
||||
js_delete(stubbedObjects_);
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelGetPropertyIC::initStubbedObjects(JSContext *cx)
|
||||
{
|
||||
JS_ASSERT(isAllocated());
|
||||
if (!stubbedObjects_) {
|
||||
stubbedObjects_ = cx->new_<ObjectSet>(cx);
|
||||
return stubbedObjects_ && stubbedObjects_->init();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelGetPropertyIC::canAttachReadSlot(LockedJSContext &cx, JSObject *obj,
|
||||
MutableHandleObject holder, MutableHandleShape shape)
|
||||
{
|
||||
// Parallel execution should only cache native objects.
|
||||
if (!obj->isNative())
|
||||
return false;
|
||||
|
||||
if (IsIdempotentAndMaybeHasHooks(*this, obj))
|
||||
return false;
|
||||
|
||||
// Bail if we have hooks.
|
||||
if (obj->getOps()->lookupProperty || obj->getOps()->lookupGeneric)
|
||||
return false;
|
||||
|
||||
if (!js::LookupPropertyPure(obj, NameToId(name()), holder.address(), shape.address()))
|
||||
return false;
|
||||
|
||||
// In parallel execution we can't cache getters due to possible
|
||||
// side-effects, so only check if we can cache slot reads.
|
||||
bool readSlot;
|
||||
bool callGetter;
|
||||
if (!DetermineGetPropKind(cx, *this, obj, holder, shape, output(), false,
|
||||
&readSlot, &callGetter) || !readSlot)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (IsIdempotentAndHasSingletonHolder(*this, holder, shape))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelGetPropertyIC::attachReadSlot(LockedJSContext &cx, IonScript *ion,
|
||||
JSObject *obj, bool *attachedStub)
|
||||
{
|
||||
*attachedStub = false;
|
||||
|
||||
RootedShape shape(cx);
|
||||
RootedObject holder(cx);
|
||||
if (!canAttachReadSlot(cx, obj, &holder, &shape))
|
||||
return true;
|
||||
|
||||
// Ready to generate the read slot stub.
|
||||
DispatchStubPrepender attacher(*this);
|
||||
MacroAssembler masm(cx);
|
||||
GenerateReadSlot(cx, masm, attacher, obj, name(), holder, shape, object(), output());
|
||||
|
||||
const char *attachKind = "parallel non-idempotent reading";
|
||||
if (idempotent())
|
||||
attachKind = "parallel idempotent reading";
|
||||
|
||||
if (!linkAndAttachStub(cx, masm, attacher, ion, attachKind))
|
||||
return false;
|
||||
|
||||
*attachedStub = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
ParallelResult
|
||||
ParallelGetPropertyIC::update(ForkJoinSlice *slice, size_t cacheIndex,
|
||||
HandleObject obj, MutableHandleValue vp)
|
||||
{
|
||||
AutoFlushCache afc("ParallelGetPropertyCache");
|
||||
PerThreadData *pt = slice->perThreadData;
|
||||
|
||||
const SafepointIndex *safepointIndex;
|
||||
void *returnAddr;
|
||||
RootedScript topScript(pt, GetTopIonJSScript(pt, &safepointIndex, &returnAddr));
|
||||
IonScript *ion = topScript->parallelIonScript();
|
||||
|
||||
ParallelGetPropertyIC &cache = ion->getCache(cacheIndex).toParallelGetProperty();
|
||||
|
||||
RootedScript script(pt);
|
||||
jsbytecode *pc;
|
||||
cache.getScriptedLocation(&script, &pc);
|
||||
|
||||
// Grab the property early, as the pure path is fast anyways and doesn't
|
||||
// need a lock. If we can't do it purely, bail out of parallel execution.
|
||||
if (!GetPropertyPure(obj, NameToId(cache.name()), vp.address()))
|
||||
return TP_RETRY_SEQUENTIALLY;
|
||||
|
||||
// Avoid unnecessary locking if cannot attach stubs and idempotent.
|
||||
if (cache.idempotent() && !cache.canAttachStub())
|
||||
return TP_SUCCESS;
|
||||
|
||||
{
|
||||
// Lock the context before mutating the cache. Ideally we'd like to do
|
||||
// finer-grained locking, with one lock per cache. However, generating
|
||||
// new jitcode uses a global ExecutableAllocator tied to the runtime.
|
||||
LockedJSContext cx(slice);
|
||||
|
||||
if (cache.canAttachStub()) {
|
||||
// Check if we have already stubbed the current object to avoid
|
||||
// attaching a duplicate stub.
|
||||
if (!cache.initStubbedObjects(cx))
|
||||
return TP_FATAL;
|
||||
ObjectSet::AddPtr p = cache.stubbedObjects()->lookupForAdd(obj);
|
||||
if (p)
|
||||
return TP_SUCCESS;
|
||||
if (!cache.stubbedObjects()->add(p, obj))
|
||||
return TP_FATAL;
|
||||
|
||||
// See note about the stub limit in GetPropertyCache.
|
||||
bool attachedStub;
|
||||
if (!cache.attachReadSlot(cx, ion, obj, &attachedStub))
|
||||
return TP_FATAL;
|
||||
|
||||
if (!attachedStub) {
|
||||
if (cache.idempotent())
|
||||
topScript->invalidatedIdempotentCache = true;
|
||||
|
||||
// ParallelDo will take care of invalidating all bailed out
|
||||
// scripts, so just bail out now.
|
||||
return TP_RETRY_SEQUENTIALLY;
|
||||
}
|
||||
}
|
||||
|
||||
if (!cache.idempotent()) {
|
||||
#if JS_HAS_NO_SUCH_METHOD
|
||||
// Straight up bail if there's this __noSuchMethod__ hook.
|
||||
if (JSOp(*pc) == JSOP_CALLPROP && JS_UNLIKELY(vp.isPrimitive()))
|
||||
return TP_RETRY_SEQUENTIALLY;
|
||||
#endif
|
||||
|
||||
// Monitor changes to cache entry.
|
||||
types::TypeScript::Monitor(cx, script, pc, vp);
|
||||
}
|
||||
}
|
||||
|
||||
return TP_SUCCESS;
|
||||
}
|
||||
|
||||
void
|
||||
@ -1201,12 +1473,12 @@ IonCache::disable()
|
||||
void
|
||||
IonCache::reset()
|
||||
{
|
||||
// Skip all generated stub by patching the original stub to go directly to
|
||||
// the update function.
|
||||
PatchJump(initialJump_, fallbackLabel_);
|
||||
|
||||
this->stubCount_ = 0;
|
||||
this->lastJump_ = initialJump_;
|
||||
}
|
||||
|
||||
void
|
||||
IonCache::destroy()
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
@ -1214,7 +1486,7 @@ SetPropertyIC::attachNativeExisting(JSContext *cx, IonScript *ion,
|
||||
HandleObject obj, HandleShape shape)
|
||||
{
|
||||
MacroAssembler masm(cx);
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
|
||||
attacher.branchNextStub(masm, Assembler::NotEqual,
|
||||
Address(object(), JSObject::offsetOfShape()),
|
||||
@ -1250,7 +1522,7 @@ SetPropertyIC::attachSetterCall(JSContext *cx, IonScript *ion,
|
||||
void *returnAddr)
|
||||
{
|
||||
MacroAssembler masm(cx);
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
|
||||
// Need to set correct framePushed on the masm so that exit frame descriptors are
|
||||
// properly constructed.
|
||||
@ -1400,7 +1672,7 @@ SetPropertyIC::attachNativeAdding(JSContext *cx, IonScript *ion, JSObject *obj,
|
||||
HandleShape propShape)
|
||||
{
|
||||
MacroAssembler masm(cx);
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
|
||||
Label failures;
|
||||
|
||||
@ -1662,7 +1934,7 @@ GetElementIC::attachGetProp(JSContext *cx, IonScript *ion, HandleObject obj,
|
||||
ValueOperand val = index().reg().valueReg();
|
||||
masm.branchTestValue(Assembler::NotEqual, val, idval, &failures);
|
||||
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
GenerateReadSlot(cx, masm, attacher, obj, name, holder, shape, object(), output(),
|
||||
&failures);
|
||||
|
||||
@ -1677,7 +1949,7 @@ GetElementIC::attachDenseElement(JSContext *cx, IonScript *ion, JSObject *obj, c
|
||||
|
||||
Label failures;
|
||||
MacroAssembler masm(cx);
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
|
||||
// Guard object's shape.
|
||||
RootedObject globalObj(cx, &script->global());
|
||||
@ -1739,7 +2011,7 @@ GetElementIC::attachTypedArrayElement(JSContext *cx, IonScript *ion, JSObject *o
|
||||
|
||||
Label failures;
|
||||
MacroAssembler masm(cx);
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
|
||||
// The array type is the object within the table of typed array classes.
|
||||
int arrayType = TypedArray::type(obj);
|
||||
@ -1921,7 +2193,7 @@ GetElementIC::update(JSContext *cx, size_t cacheIndex, HandleObject obj,
|
||||
void
|
||||
GetElementIC::reset()
|
||||
{
|
||||
IonCache::reset();
|
||||
RepatchIonCache::reset();
|
||||
hasDenseStub_ = false;
|
||||
}
|
||||
|
||||
@ -1931,7 +2203,7 @@ BindNameIC::attachGlobal(JSContext *cx, IonScript *ion, JSObject *scopeChain)
|
||||
JS_ASSERT(scopeChain->isGlobal());
|
||||
|
||||
MacroAssembler masm(cx);
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
|
||||
// Guard on the scope chain.
|
||||
attacher.branchNextStub(masm, Assembler::NotEqual, scopeChainReg(),
|
||||
@ -1997,7 +2269,7 @@ BindNameIC::attachNonGlobal(JSContext *cx, IonScript *ion, JSObject *scopeChain,
|
||||
JS_ASSERT(IsCacheableNonGlobalScope(scopeChain));
|
||||
|
||||
MacroAssembler masm(cx);
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
|
||||
// Guard on the shape of the scope chain.
|
||||
Label failures;
|
||||
@ -2091,7 +2363,7 @@ NameIC::attachReadSlot(JSContext *cx, IonScript *ion, HandleObject scopeChain, H
|
||||
{
|
||||
MacroAssembler masm(cx);
|
||||
Label failures;
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
|
||||
Register scratchReg = outputReg().valueReg().scratchReg();
|
||||
|
||||
@ -2169,7 +2441,7 @@ NameIC::attachCallGetter(JSContext *cx, IonScript *ion, JSObject *obj, JSObject
|
||||
// properly constructed.
|
||||
masm.setFramePushed(ion->frameSize());
|
||||
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
if (!GenerateCallGetter(cx, masm, attacher, obj, name(), holder, shape, liveRegs_,
|
||||
scopeChainReg(), outputReg(), returnAddr, pc))
|
||||
{
|
||||
@ -2245,7 +2517,7 @@ CallsiteCloneIC::attach(JSContext *cx, IonScript *ion, HandleFunction original,
|
||||
HandleFunction clone)
|
||||
{
|
||||
MacroAssembler masm(cx);
|
||||
RepatchStubAppender attacher(rejoinLabel(), fallbackLabel_, &lastJump_);
|
||||
RepatchStubAppender attacher(*this);
|
||||
|
||||
// Guard against object identity on the original.
|
||||
attacher.branchNextStub(masm, Assembler::NotEqual, calleeReg(),
|
||||
|
@ -12,6 +12,8 @@
|
||||
#include "TypeOracle.h"
|
||||
#include "Registers.h"
|
||||
|
||||
#include "vm/ForkJoin.h"
|
||||
|
||||
class JSFunction;
|
||||
class JSScript;
|
||||
|
||||
@ -24,7 +26,8 @@ namespace ion {
|
||||
_(GetElement) \
|
||||
_(BindName) \
|
||||
_(Name) \
|
||||
_(CallsiteClone)
|
||||
_(CallsiteClone) \
|
||||
_(ParallelGetProperty)
|
||||
|
||||
// Forward declarations of Cache kinds.
|
||||
#define FORWARD_DECLARE(kind) class kind##IC;
|
||||
@ -44,18 +47,27 @@ class IonCacheVisitor
|
||||
#undef VISIT_INS
|
||||
};
|
||||
|
||||
// Common shared temporary state needed during codegen between the different
|
||||
// kinds of caches. Used by OutOfLineUpdateCache.
|
||||
struct AddCacheState
|
||||
{
|
||||
RepatchLabel repatchEntry;
|
||||
Register dispatchScratch;
|
||||
};
|
||||
|
||||
|
||||
// Common structure encoding the state of a polymorphic inline cache contained
|
||||
// in the code for an IonScript. IonCaches are used for polymorphic operations
|
||||
// where multiple implementations may be required.
|
||||
//
|
||||
// The cache is initially compiled as a patchable jump to an out of line
|
||||
// fragment which invokes a cache function to perform the operation. The cache
|
||||
// function may generate a stub to perform the operation in certain cases
|
||||
// (e.g. a particular shape for an input object), patch the cache's jump to
|
||||
// that stub and patch any failure conditions in the stub to jump back to the
|
||||
// cache fragment. When those failure conditions are hit, the cache function
|
||||
// may attach new stubs, forming a daisy chain of tests for how to perform the
|
||||
// operation in different circumstances.
|
||||
// Roughly speaking, the cache initially jumps to an out of line fragment
|
||||
// which invokes a cache function to perform the operation. The cache function
|
||||
// may generate a stub to perform the operation in certain cases (e.g. a
|
||||
// particular shape for an input object) and attach the stub to existing
|
||||
// stubs, forming a daisy chain of tests for how to perform the operation in
|
||||
// different circumstances. The details of how stubs are linked up as
|
||||
// described in comments below for the classes RepatchIonCache and
|
||||
// DispatchIonCache.
|
||||
//
|
||||
// Eventually, if too many stubs are generated the cache function may disable
|
||||
// the cache, by generating a stub to make a call and perform the operation
|
||||
@ -145,34 +157,12 @@ class IonCache
|
||||
bool disabled_ : 1;
|
||||
size_t stubCount_ : 5;
|
||||
|
||||
CodeLocationJump initialJump_;
|
||||
CodeLocationJump lastJump_;
|
||||
CodeLocationLabel fallbackLabel_;
|
||||
|
||||
// Offset from the initial jump to the rejoin label.
|
||||
#ifdef JS_CPU_ARM
|
||||
static const size_t REJOIN_LABEL_OFFSET = 4;
|
||||
#else
|
||||
static const size_t REJOIN_LABEL_OFFSET = 0;
|
||||
#endif
|
||||
|
||||
// Location of this operation, NULL for idempotent caches.
|
||||
JSScript *script;
|
||||
jsbytecode *pc;
|
||||
|
||||
CodeLocationLabel fallbackLabel() const {
|
||||
return fallbackLabel_;
|
||||
}
|
||||
CodeLocationLabel rejoinLabel() const {
|
||||
uint8_t *ptr = initialJump_.raw();
|
||||
#ifdef JS_CPU_ARM
|
||||
uint32_t i = 0;
|
||||
while (i < REJOIN_LABEL_OFFSET)
|
||||
ptr = Assembler::nextInstruction(ptr, &i);
|
||||
#endif
|
||||
return CodeLocationLabel(ptr);
|
||||
}
|
||||
|
||||
private:
|
||||
static const size_t MAX_STUBS;
|
||||
void incrementStubCount() {
|
||||
@ -188,30 +178,17 @@ class IonCache
|
||||
idempotent_(false),
|
||||
disabled_(false),
|
||||
stubCount_(0),
|
||||
initialJump_(),
|
||||
lastJump_(),
|
||||
fallbackLabel_(),
|
||||
script(NULL),
|
||||
pc(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
void disable();
|
||||
virtual void disable();
|
||||
inline bool isDisabled() const {
|
||||
return disabled_;
|
||||
}
|
||||
|
||||
// Set the initial jump state of the cache. The initialJump is the inline
|
||||
// jump that will point to out-of-line code (such as the slow path, or
|
||||
// stubs), and the rejoinLabel is the position that all out-of-line paths
|
||||
// will rejoin to.
|
||||
void setInlineJump(CodeOffsetJump initialJump, CodeOffsetLabel rejoinLabel) {
|
||||
initialJump_ = initialJump;
|
||||
lastJump_ = initialJump;
|
||||
|
||||
JS_ASSERT(rejoinLabel.offset() == initialJump.offset() + REJOIN_LABEL_OFFSET);
|
||||
}
|
||||
|
||||
// Set the initial 'out-of-line' jump state of the cache. The fallbackLabel is
|
||||
// the location of the out-of-line update (slow) path. This location will
|
||||
// be set to the exitJump of the last generated stub.
|
||||
@ -219,15 +196,26 @@ class IonCache
|
||||
fallbackLabel_ = fallbackLabel;
|
||||
}
|
||||
|
||||
// Update labels once the code is copied and finalized.
|
||||
void updateBaseAddress(IonCode *code, MacroAssembler &masm);
|
||||
virtual void emitInitialJump(MacroAssembler &masm, AddCacheState &addState) = 0;
|
||||
virtual void bindInitialJump(MacroAssembler &masm, AddCacheState &addState) = 0;
|
||||
virtual void updateBaseAddress(IonCode *code, MacroAssembler &masm);
|
||||
|
||||
// Initialize the AddCacheState depending on the kind of cache, like
|
||||
// setting a scratch register. Defaults to doing nothing.
|
||||
virtual void initializeAddCacheState(LInstruction *ins, AddCacheState *addState);
|
||||
|
||||
// Reset the cache around garbage collection.
|
||||
virtual void reset();
|
||||
|
||||
// Destroy any extra resources the cache uses upon IonScript finalization.
|
||||
virtual void destroy();
|
||||
|
||||
bool canAttachStub() const {
|
||||
return stubCount_ < MAX_STUBS;
|
||||
}
|
||||
bool empty() const {
|
||||
return stubCount_ == 0;
|
||||
}
|
||||
|
||||
enum LinkStatus {
|
||||
LINK_ERROR,
|
||||
@ -242,13 +230,16 @@ class IonCache
|
||||
LinkStatus linkCode(JSContext *cx, MacroAssembler &masm, IonScript *ion, IonCode **code);
|
||||
// Fixup variables and update jumps in the list of stubs. Increment the
|
||||
// number of attached stubs accordingly.
|
||||
void attachStub(MacroAssembler &masm, StubAttacher &patcher, IonCode *code);
|
||||
void attachStub(MacroAssembler &masm, StubAttacher &attacher, IonCode *code);
|
||||
|
||||
// Combine both linkStub and attachStub into one function. In addition, it
|
||||
// produces a spew augmented with the attachKind string.
|
||||
bool linkAndAttachStub(JSContext *cx, MacroAssembler &masm, StubAttacher &patcher,
|
||||
bool linkAndAttachStub(JSContext *cx, MacroAssembler &masm, StubAttacher &attacher,
|
||||
IonScript *ion, const char *attachKind);
|
||||
|
||||
bool isAllocated() {
|
||||
return fallbackLabel_.isSet();
|
||||
}
|
||||
bool pure() {
|
||||
return pure_;
|
||||
}
|
||||
@ -274,6 +265,210 @@ class IonCache
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// Repatch caches initially generate a patchable jump to an out of line call
|
||||
// to the cache function. Stubs are attached by appending: when attaching a
|
||||
// new stub, we patch the any failure conditions in last generated stub to
|
||||
// jump to the new stub. Failure conditions in the new stub jump to the cache
|
||||
// function which may generate new stubs.
|
||||
//
|
||||
// Control flow Pointers
|
||||
// =======# ----. .---->
|
||||
// # | |
|
||||
// #======> \-----/
|
||||
//
|
||||
// Initial state:
|
||||
//
|
||||
// JIT Code
|
||||
// +--------+ .---------------.
|
||||
// | | | |
|
||||
// |========| v +----------+ |
|
||||
// |== IC ==|====>| Cache Fn | |
|
||||
// |========| +----------+ |
|
||||
// | |<=# # |
|
||||
// | | #=======# |
|
||||
// +--------+ Rejoin path |
|
||||
// |________ |
|
||||
// | |
|
||||
// Repatch | |
|
||||
// IC | |
|
||||
// Entry | |
|
||||
// +------------+ |
|
||||
// | lastJump_ |---------------/
|
||||
// +------------+
|
||||
// | ... |
|
||||
// +------------+
|
||||
//
|
||||
// Attaching stubs:
|
||||
//
|
||||
// Patch the jump pointed to by lastJump_ to jump to the new stub. Update
|
||||
// lastJump_ to be the new stub's failure jump. The failure jump of the new
|
||||
// stub goes to the fallback label, which is the cache function. In this
|
||||
// fashion, new stubs are _appended_ to the chain of stubs, as lastJump_
|
||||
// points to the _tail_ of the stub chain.
|
||||
//
|
||||
// JIT Code
|
||||
// +--------+ #=======================#
|
||||
// | | # v
|
||||
// |========| # +----------+ +------+
|
||||
// |== IC ==|=# | Cache Fn |<====| Stub |
|
||||
// |========| +----------+ ^ +------+
|
||||
// | |<=# # | #
|
||||
// | | #======#=========|=====#
|
||||
// +--------+ Rejoin path |
|
||||
// |________ |
|
||||
// | |
|
||||
// Repatch | |
|
||||
// IC | |
|
||||
// Entry | |
|
||||
// +------------+ |
|
||||
// | lastJump_ |---------------/
|
||||
// +------------+
|
||||
// | ... |
|
||||
// +------------+
|
||||
//
|
||||
class RepatchIonCache : public IonCache
|
||||
{
|
||||
protected:
|
||||
class RepatchStubAppender;
|
||||
|
||||
CodeLocationJump initialJump_;
|
||||
CodeLocationJump lastJump_;
|
||||
|
||||
// Offset from the initial jump to the rejoin label.
|
||||
#ifdef JS_CPU_ARM
|
||||
static const size_t REJOIN_LABEL_OFFSET = 4;
|
||||
#else
|
||||
static const size_t REJOIN_LABEL_OFFSET = 0;
|
||||
#endif
|
||||
|
||||
CodeLocationLabel rejoinLabel() const {
|
||||
uint8_t *ptr = initialJump_.raw();
|
||||
#ifdef JS_CPU_ARM
|
||||
uint32_t i = 0;
|
||||
while (i < REJOIN_LABEL_OFFSET)
|
||||
ptr = Assembler::nextInstruction(ptr, &i);
|
||||
#endif
|
||||
return CodeLocationLabel(ptr);
|
||||
}
|
||||
|
||||
public:
|
||||
RepatchIonCache()
|
||||
: initialJump_(),
|
||||
lastJump_()
|
||||
{
|
||||
}
|
||||
|
||||
virtual void reset();
|
||||
|
||||
// Set the initial jump state of the cache. The initialJump is the inline
|
||||
// jump that will point to out-of-line code (such as the slow path, or
|
||||
// stubs), and the rejoinLabel is the position that all out-of-line paths
|
||||
// will rejoin to.
|
||||
void emitInitialJump(MacroAssembler &masm, AddCacheState &addState);
|
||||
void bindInitialJump(MacroAssembler &masm, AddCacheState &addState);
|
||||
|
||||
// Update the labels once the code is finalized.
|
||||
void updateBaseAddress(IonCode *code, MacroAssembler &masm);
|
||||
};
|
||||
|
||||
//
|
||||
// Dispatch caches avoid patching already-running code. Instead, the jump to
|
||||
// the stub chain is indirect by way of the firstStub_ pointer
|
||||
// below. Initially the pointer points to the cache function which may attach
|
||||
// new stubs. Stubs are attached by prepending: when attaching a new stub, we
|
||||
// jump to the previous stub on failure conditions, then overwrite the
|
||||
// firstStub_ pointer with the newly generated stub.
|
||||
//
|
||||
// This style does not patch the already executing instruction stream, does
|
||||
// not need to worry about cache coherence of cached jump addresses, and does
|
||||
// not have to worry about aligning the exit jumps to ensure atomic patching,
|
||||
// at the expense of an extra memory read to load the very first stub.
|
||||
//
|
||||
// ICs that need to work in parallel execution need to be dispatch style.
|
||||
//
|
||||
// Control flow Pointers Memory load
|
||||
// =======# ----. .----> ******
|
||||
// # | | *
|
||||
// #======> \-----/ *******
|
||||
//
|
||||
// Initial state:
|
||||
//
|
||||
// The first stub points to the cache function.
|
||||
//
|
||||
// JIT Code
|
||||
// +--------+ .-------.
|
||||
// | | v |
|
||||
// |========| +---------------+ +----------+ |
|
||||
// |== IC ==|====>| Load and jump |====>| Cache Fn | |
|
||||
// |========| +---------------+ +----------+ |
|
||||
// | |<=# * # |
|
||||
// | | #===========*================# |
|
||||
// +--------+ Rejoin * path |
|
||||
// |________ * |
|
||||
// | * |
|
||||
// Dispatch | * |
|
||||
// IC **|************ |
|
||||
// Entry * | |
|
||||
// +------------+ |
|
||||
// | firstStub_ |-------------------------------------/
|
||||
// +------------+
|
||||
// | ... |
|
||||
// +------------+
|
||||
//
|
||||
// Attaching stubs:
|
||||
//
|
||||
// Assign the address of the new stub to firstStub_. The new stub jumps to
|
||||
// the old address held in firstStub_ on failure. Note that there is no
|
||||
// concept of a fallback label here, new stubs are _prepended_, as
|
||||
// firstStub_ always points to the _head_ of the stub chain.
|
||||
//
|
||||
// JIT Code
|
||||
// +--------+ #=====================# .-----.
|
||||
// | | # v v |
|
||||
// |========| +---------------+ # +----------+ +------+ |
|
||||
// |== IC ==|====>| Load and jump |==# | Cache Fn |<====| Stub | |
|
||||
// |========| +---------------+ +----------+ +------+ |
|
||||
// | |<=# * # # |
|
||||
// | | #===========*================#================# |
|
||||
// +--------+ Rejoin * path |
|
||||
// |________ * |
|
||||
// | * |
|
||||
// Dispatch | * |
|
||||
// IC **|************ |
|
||||
// Entry * | |
|
||||
// +------------+ |
|
||||
// | firstStub_ |----------------------------------------------------/
|
||||
// +------------+
|
||||
// | ... |
|
||||
// +------------+
|
||||
//
|
||||
class DispatchIonCache : public IonCache
|
||||
{
|
||||
protected:
|
||||
class DispatchStubPrepender;
|
||||
|
||||
uint8_t *firstStub_;
|
||||
CodeLocationLabel rejoinLabel_;
|
||||
CodeOffsetLabel dispatchLabel_;
|
||||
|
||||
public:
|
||||
DispatchIonCache()
|
||||
: firstStub_(NULL),
|
||||
rejoinLabel_(),
|
||||
dispatchLabel_()
|
||||
{
|
||||
}
|
||||
|
||||
virtual void reset();
|
||||
|
||||
void emitInitialJump(MacroAssembler &masm, AddCacheState &addState);
|
||||
void bindInitialJump(MacroAssembler &masm, AddCacheState &addState);
|
||||
|
||||
// Fix up the first stub pointer once the code is finalized.
|
||||
void updateBaseAddress(IonCode *code, MacroAssembler &masm);
|
||||
};
|
||||
|
||||
// Define the cache kind and pre-declare data structures used for calling inline
|
||||
// caches.
|
||||
#define CACHE_HEADER(ickind) \
|
||||
@ -290,7 +485,7 @@ class IonCache
|
||||
// Subclasses of IonCache for the various kinds of caches. These do not define
|
||||
// new data members; all caches must be of the same size.
|
||||
|
||||
class GetPropertyIC : public IonCache
|
||||
class GetPropertyIC : public RepatchIonCache
|
||||
{
|
||||
protected:
|
||||
// Registers live after the cache, excluding output registers. The initial
|
||||
@ -353,7 +548,7 @@ class GetPropertyIC : public IonCache
|
||||
static bool update(JSContext *cx, size_t cacheIndex, HandleObject obj, MutableHandleValue vp);
|
||||
};
|
||||
|
||||
class SetPropertyIC : public IonCache
|
||||
class SetPropertyIC : public RepatchIonCache
|
||||
{
|
||||
protected:
|
||||
// Registers live after the cache, excluding output registers. The initial
|
||||
@ -406,7 +601,7 @@ class SetPropertyIC : public IonCache
|
||||
update(JSContext *cx, size_t cacheIndex, HandleObject obj, HandleValue value);
|
||||
};
|
||||
|
||||
class GetElementIC : public IonCache
|
||||
class GetElementIC : public RepatchIonCache
|
||||
{
|
||||
protected:
|
||||
Register object_;
|
||||
@ -476,7 +671,7 @@ class GetElementIC : public IonCache
|
||||
}
|
||||
};
|
||||
|
||||
class BindNameIC : public IonCache
|
||||
class BindNameIC : public RepatchIonCache
|
||||
{
|
||||
protected:
|
||||
Register scopeChain_;
|
||||
@ -510,7 +705,7 @@ class BindNameIC : public IonCache
|
||||
update(JSContext *cx, size_t cacheIndex, HandleObject scopeChain);
|
||||
};
|
||||
|
||||
class NameIC : public IonCache
|
||||
class NameIC : public RepatchIonCache
|
||||
{
|
||||
protected:
|
||||
// Registers live after the cache, excluding output registers. The initial
|
||||
@ -559,7 +754,7 @@ class NameIC : public IonCache
|
||||
update(JSContext *cx, size_t cacheIndex, HandleObject scopeChain, MutableHandleValue vp);
|
||||
};
|
||||
|
||||
class CallsiteCloneIC : public IonCache
|
||||
class CallsiteCloneIC : public RepatchIonCache
|
||||
{
|
||||
protected:
|
||||
Register callee_;
|
||||
@ -596,6 +791,56 @@ class CallsiteCloneIC : public IonCache
|
||||
static JSObject *update(JSContext *cx, size_t cacheIndex, HandleObject callee);
|
||||
};
|
||||
|
||||
class ParallelGetPropertyIC : public DispatchIonCache
|
||||
{
|
||||
protected:
|
||||
Register object_;
|
||||
PropertyName *name_;
|
||||
TypedOrValueRegister output_;
|
||||
|
||||
// A set of all objects that are stubbed. Used to detect duplicates in
|
||||
// parallel execution.
|
||||
ObjectSet *stubbedObjects_;
|
||||
|
||||
public:
|
||||
ParallelGetPropertyIC(Register object, PropertyName *name, TypedOrValueRegister output)
|
||||
: object_(object),
|
||||
name_(name),
|
||||
output_(output),
|
||||
stubbedObjects_(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
CACHE_HEADER(ParallelGetProperty)
|
||||
|
||||
void reset();
|
||||
void destroy();
|
||||
void initializeAddCacheState(LInstruction *ins, AddCacheState *addState);
|
||||
|
||||
Register object() const {
|
||||
return object_;
|
||||
}
|
||||
PropertyName *name() const {
|
||||
return name_;
|
||||
}
|
||||
TypedOrValueRegister output() const {
|
||||
return output_;
|
||||
}
|
||||
|
||||
bool initStubbedObjects(JSContext *cx);
|
||||
ObjectSet *stubbedObjects() const {
|
||||
JS_ASSERT_IF(stubbedObjects_, stubbedObjects_->initialized());
|
||||
return stubbedObjects_;
|
||||
}
|
||||
|
||||
bool canAttachReadSlot(LockedJSContext &cx, JSObject *obj, MutableHandleObject holder,
|
||||
MutableHandleShape shape);
|
||||
bool attachReadSlot(LockedJSContext &cx, IonScript *ion, JSObject *obj, bool *attachedStub);
|
||||
|
||||
static ParallelResult update(ForkJoinSlice *slice, size_t cacheIndex, HandleObject obj,
|
||||
MutableHandleValue vp);
|
||||
};
|
||||
|
||||
#undef CACHE_HEADER
|
||||
|
||||
// Implement cache casts now that the compiler can see the inheritance.
|
||||
|
@ -431,6 +431,7 @@ struct IonScript
|
||||
}
|
||||
void toggleBarriers(bool enabled);
|
||||
void purgeCaches(JS::Zone *zone);
|
||||
void destroyCaches();
|
||||
void copySnapshots(const SnapshotWriter *writer);
|
||||
void copyBailoutTable(const SnapshotOffset *table);
|
||||
void copyConstants(const HeapValue *vp);
|
||||
|
@ -924,4 +924,3 @@ class ABIArgIter
|
||||
} // namespace js
|
||||
|
||||
#endif // jsion_macro_assembler_h__
|
||||
|
||||
|
@ -3329,13 +3329,17 @@ class LGetPropertyCacheV : public LInstructionHelper<BOX_PIECES, 1, 0>
|
||||
|
||||
// Patchable jump to stubs generated for a GetProperty cache, which loads a
|
||||
// value of a known type, possibly into an FP register.
|
||||
class LGetPropertyCacheT : public LInstructionHelper<1, 1, 0>
|
||||
class LGetPropertyCacheT : public LInstructionHelper<1, 1, 1>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(GetPropertyCacheT)
|
||||
|
||||
LGetPropertyCacheT(const LAllocation &object) {
|
||||
LGetPropertyCacheT(const LAllocation &object, const LDefinition &temp) {
|
||||
setOperand(0, object);
|
||||
setTemp(0, temp);
|
||||
}
|
||||
const LDefinition *temp() {
|
||||
return getTemp(0);
|
||||
}
|
||||
const MGetPropertyCache *mir() const {
|
||||
return mir_->toGetPropertyCache();
|
||||
|
@ -2145,7 +2145,7 @@ LIRGenerator::visitGetPropertyCache(MGetPropertyCache *ins)
|
||||
return assignSafepoint(lir, ins);
|
||||
}
|
||||
|
||||
LGetPropertyCacheT *lir = new LGetPropertyCacheT(useRegister(ins->object()));
|
||||
LGetPropertyCacheT *lir = newLGetPropertyCacheT(ins);
|
||||
if (!define(lir, ins))
|
||||
return false;
|
||||
return assignSafepoint(lir, ins);
|
||||
@ -2769,4 +2769,3 @@ LIRGenerator::generate()
|
||||
JS_ASSERT(prepareCallStack_.empty());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -193,7 +193,7 @@ class ParallelArrayVisitor : public MInstructionVisitor
|
||||
SAFE_OP(FunctionEnvironment) // just a load of func env ptr
|
||||
SAFE_OP(TypeBarrier) // causes a bailout if the type is not found: a-ok with us
|
||||
SAFE_OP(MonitorTypes) // causes a bailout if the type is not found: a-ok with us
|
||||
UNSAFE_OP(GetPropertyCache)
|
||||
SAFE_OP(GetPropertyCache)
|
||||
UNSAFE_OP(GetElementCache)
|
||||
UNSAFE_OP(BindNameCache)
|
||||
SAFE_OP(GuardShape)
|
||||
|
@ -1016,7 +1016,7 @@ CodeGeneratorARM::visitMathD(LMathD *math)
|
||||
const LAllocation *src1 = math->getOperand(0);
|
||||
const LAllocation *src2 = math->getOperand(1);
|
||||
const LDefinition *output = math->getDef(0);
|
||||
|
||||
|
||||
switch (math->jsop()) {
|
||||
case JSOP_ADD:
|
||||
masm.ma_vadd(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
|
||||
@ -1630,6 +1630,14 @@ CodeGeneratorARM::generateInvalidateEpilogue()
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
ParallelGetPropertyIC::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
|
||||
{
|
||||
// Can always use the scratch register on ARM.
|
||||
JS_ASSERT(ins->isGetPropertyCacheV() || ins->isGetPropertyCacheT());
|
||||
addState->dispatchScratch = ScratchRegister;
|
||||
}
|
||||
|
||||
template <class U>
|
||||
Register
|
||||
getBase(U *mir)
|
||||
|
@ -296,6 +296,12 @@ LIRGeneratorARM::newLTableSwitchV(MTableSwitch *tableswitch)
|
||||
return new LTableSwitchV(temp(), tempFloat(), tableswitch);
|
||||
}
|
||||
|
||||
LGetPropertyCacheT *
|
||||
LIRGeneratorARM::newLGetPropertyCacheT(MGetPropertyCache *ins)
|
||||
{
|
||||
return new LGetPropertyCacheT(useRegister(ins->object()), LDefinition::BogusTemp());
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGeneratorARM::visitGuardShape(MGuardShape *ins)
|
||||
{
|
||||
|
@ -29,7 +29,7 @@ class LIRGeneratorARM : public LIRGeneratorShared
|
||||
|
||||
void lowerUntypedPhiInput(MPhi *phi, uint32_t inputPosition, LBlock *block, size_t lirIndex);
|
||||
bool defineUntypedPhi(MPhi *phi, size_t lirIndex);
|
||||
bool lowerForShift(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir, MDefinition *lhs,
|
||||
bool lowerForShift(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir, MDefinition *lhs,
|
||||
MDefinition *rhs);
|
||||
bool lowerUrshD(MUrsh *mir);
|
||||
|
||||
@ -55,6 +55,7 @@ class LIRGeneratorARM : public LIRGeneratorShared
|
||||
LTableSwitch *newLTableSwitch(const LAllocation &in, const LDefinition &inputCopy,
|
||||
MTableSwitch *ins);
|
||||
LTableSwitchV *newLTableSwitchV(MTableSwitch *ins);
|
||||
LGetPropertyCacheT *newLGetPropertyCacheT(MGetPropertyCache *ins);
|
||||
|
||||
public:
|
||||
bool visitConstant(MConstant *ins);
|
||||
@ -77,4 +78,3 @@ typedef LIRGeneratorARM LIRGeneratorSpecific;
|
||||
} // namespace js
|
||||
|
||||
#endif // jsion_ion_lowering_arm_h__
|
||||
|
||||
|
@ -575,8 +575,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
||||
}
|
||||
|
||||
CodeOffsetLabel pushWithPatch(ImmWord imm) {
|
||||
CodeOffsetLabel label = currentOffset();
|
||||
ma_movPatchable(Imm32(imm.value), ScratchRegister, Always, hasMOVWT() ? L_MOVWT : L_LDR);
|
||||
CodeOffsetLabel label = movWithPatch(imm, ScratchRegister);
|
||||
ma_push(ScratchRegister);
|
||||
return label;
|
||||
}
|
||||
|
@ -350,8 +350,7 @@ class Assembler : public AssemblerX86Shared
|
||||
movsd(src, Operand(StackPointer, 0));
|
||||
}
|
||||
CodeOffsetLabel pushWithPatch(const ImmWord &word) {
|
||||
movq(word, ScratchReg);
|
||||
CodeOffsetLabel label = masm.currentOffset();
|
||||
CodeOffsetLabel label = movWithPatch(word, ScratchReg);
|
||||
push(ScratchReg);
|
||||
return label;
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ CodeGeneratorX64::visitUnbox(LUnbox *unbox)
|
||||
JS_NOT_REACHED("Given MIRType cannot be unboxed.");
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -516,3 +516,10 @@ CodeGeneratorX64::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins)
|
||||
return gen->noteGlobalAccess(label.offset(), mir->globalDataOffset());
|
||||
}
|
||||
|
||||
void
|
||||
ParallelGetPropertyIC::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
|
||||
{
|
||||
// Can always use the scratch register on x64.
|
||||
JS_ASSERT(ins->isGetPropertyCacheV() || ins->isGetPropertyCacheT());
|
||||
addState->dispatchScratch = ScratchReg;
|
||||
}
|
||||
|
@ -200,3 +200,9 @@ LIRGeneratorX64::visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins)
|
||||
{
|
||||
return define(new LAsmJSLoadFuncPtr(useRegister(ins->index()), temp()), ins);
|
||||
}
|
||||
|
||||
LGetPropertyCacheT *
|
||||
LIRGeneratorX64::newLGetPropertyCacheT(MGetPropertyCache *ins)
|
||||
{
|
||||
return new LGetPropertyCacheT(useRegister(ins->object()), LDefinition::BogusTemp());
|
||||
}
|
||||
|
@ -37,6 +37,8 @@ class LIRGeneratorX64 : public LIRGeneratorX86Shared
|
||||
MDefinition *rhs);
|
||||
bool lowerForFPU(LMathD *ins, MDefinition *mir, MDefinition *lhs, MDefinition *rhs);
|
||||
|
||||
LGetPropertyCacheT *newLGetPropertyCacheT(MGetPropertyCache *ins);
|
||||
|
||||
public:
|
||||
bool visitBox(MBox *box);
|
||||
bool visitUnbox(MUnbox *unbox);
|
||||
@ -54,4 +56,3 @@ typedef LIRGeneratorX64 LIRGeneratorSpecific;
|
||||
} // namespace js
|
||||
|
||||
#endif // jsion_ion_lowering_x64_h__
|
||||
|
||||
|
@ -546,4 +546,3 @@ GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out)
|
||||
} // namespace js
|
||||
|
||||
#endif // jsion_cpu_x86_assembler_h__
|
||||
|
||||
|
@ -564,3 +564,15 @@ CodeGeneratorX86::postAsmJSCall(LAsmJSCall *lir)
|
||||
masm.movsd(Operand(esp, 0), ReturnFloatReg);
|
||||
masm.freeStack(sizeof(double));
|
||||
}
|
||||
|
||||
void
|
||||
ParallelGetPropertyIC::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
|
||||
{
|
||||
// We don't have a scratch register, but only use the temp if we needed
|
||||
// one, it's BogusTemp otherwise.
|
||||
JS_ASSERT(ins->isGetPropertyCacheV() || ins->isGetPropertyCacheT());
|
||||
if (ins->isGetPropertyCacheV() || ins->toGetPropertyCacheT()->temp()->isBogusTemp())
|
||||
addState->dispatchScratch = output_.scratchReg().gpr();
|
||||
else
|
||||
addState->dispatchScratch = ToRegister(ins->toGetPropertyCacheT()->temp());
|
||||
}
|
||||
|
@ -286,3 +286,17 @@ LIRGeneratorX86::visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins)
|
||||
{
|
||||
return define(new LAsmJSLoadFuncPtr(useRegisterAtStart(ins->index())), ins);
|
||||
}
|
||||
|
||||
LGetPropertyCacheT *
|
||||
LIRGeneratorX86::newLGetPropertyCacheT(MGetPropertyCache *ins)
|
||||
{
|
||||
// Since x86 doesn't have a scratch register and we need one for the
|
||||
// indirect jump for dispatch-style ICs, we need a temporary in the case
|
||||
// of a double output type as we can't get a scratch from the output.
|
||||
LDefinition scratch;
|
||||
if (ins->type() == MIRType_Double)
|
||||
scratch = temp();
|
||||
else
|
||||
scratch = LDefinition::BogusTemp();
|
||||
return new LGetPropertyCacheT(useRegister(ins->object()), scratch);
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ class LIRGeneratorX86 : public LIRGeneratorX86Shared
|
||||
void lowerUntypedPhiInput(MPhi *phi, uint32_t inputPosition, LBlock *block, size_t lirIndex);
|
||||
bool defineUntypedPhi(MPhi *phi, size_t lirIndex);
|
||||
|
||||
bool lowerForShift(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir, MDefinition *lhs,
|
||||
bool lowerForShift(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir, MDefinition *lhs,
|
||||
MDefinition *rhs);
|
||||
|
||||
bool lowerForALU(LInstructionHelper<1, 1, 0> *ins, MDefinition *mir, MDefinition *input);
|
||||
@ -39,6 +39,8 @@ class LIRGeneratorX86 : public LIRGeneratorX86Shared
|
||||
bool lowerForFPU(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir, MDefinition *lhs,
|
||||
MDefinition *rhs);
|
||||
|
||||
LGetPropertyCacheT *newLGetPropertyCacheT(MGetPropertyCache *ins);
|
||||
|
||||
public:
|
||||
bool visitBox(MBox *box);
|
||||
bool visitUnbox(MUnbox *unbox);
|
||||
@ -61,4 +63,3 @@ typedef LIRGeneratorX86 LIRGeneratorSpecific;
|
||||
} // namespace ion
|
||||
|
||||
#endif // jsion_ion_lowering_x86_h__
|
||||
|
||||
|
@ -0,0 +1,15 @@
|
||||
load(libdir + "parallelarray-helpers.js");
|
||||
|
||||
function testIC() {
|
||||
function C() {}
|
||||
C.prototype.foo = "foo";
|
||||
var c = new C;
|
||||
assertParallelArrayModesCommute(["seq", "par"], function (m) {
|
||||
return new ParallelArray(minItemsTestingThreshold, function (i) {
|
||||
return c.foo;
|
||||
}, m);
|
||||
});
|
||||
}
|
||||
|
||||
if (getBuildConfiguration().parallelJS)
|
||||
testIC();
|
@ -35,6 +35,7 @@ class js::ForkJoinShared : public TaskExecutor, public Monitor
|
||||
ForkJoinOp &op_; // User-defined operations to be perf. in par.
|
||||
const uint32_t numSlices_; // Total number of threads.
|
||||
PRCondVar *rendezvousEnd_; // Cond. var used to signal end of rendezvous.
|
||||
PRLock *cxLock_; // Locks cx_ for parallel VM calls.
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// Per-thread arenas
|
||||
@ -128,6 +129,9 @@ class js::ForkJoinShared : public TaskExecutor, public Monitor
|
||||
void setAbortFlag(bool fatal);
|
||||
|
||||
JSRuntime *runtime() { return cx_->runtime; }
|
||||
|
||||
JSContext *acquireContext() { PR_Lock(cxLock_); return cx_; }
|
||||
void releaseContext() { PR_Unlock(cxLock_); }
|
||||
};
|
||||
|
||||
class js::AutoRendezvous
|
||||
@ -173,6 +177,7 @@ ForkJoinShared::ForkJoinShared(JSContext *cx,
|
||||
op_(op),
|
||||
numSlices_(numSlices),
|
||||
rendezvousEnd_(NULL),
|
||||
cxLock_(NULL),
|
||||
allocators_(cx),
|
||||
uncompleted_(uncompleted),
|
||||
blocked_(0),
|
||||
@ -205,6 +210,10 @@ ForkJoinShared::init()
|
||||
if (!rendezvousEnd_)
|
||||
return false;
|
||||
|
||||
cxLock_ = PR_NewLock();
|
||||
if (!cxLock_)
|
||||
return false;
|
||||
|
||||
for (unsigned i = 0; i < numSlices_; i++) {
|
||||
Allocator *allocator = cx_->runtime->new_<Allocator>(cx_->zone());
|
||||
if (!allocator)
|
||||
@ -224,6 +233,8 @@ ForkJoinShared::~ForkJoinShared()
|
||||
if (rendezvousEnd_)
|
||||
PR_DestroyCondVar(rendezvousEnd_);
|
||||
|
||||
PR_DestroyLock(cxLock_);
|
||||
|
||||
while (allocators_.length() > 0)
|
||||
js_delete(allocators_.popCopy());
|
||||
}
|
||||
@ -504,6 +515,18 @@ ForkJoinSlice::runtime()
|
||||
return shared->runtime();
|
||||
}
|
||||
|
||||
JSContext *
|
||||
ForkJoinSlice::acquireContext()
|
||||
{
|
||||
return shared->acquireContext();
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinSlice::releaseContext()
|
||||
{
|
||||
return shared->releaseContext();
|
||||
}
|
||||
|
||||
bool
|
||||
ForkJoinSlice::check()
|
||||
{
|
||||
@ -666,4 +689,3 @@ js::ExecuteForkJoinOp(JSContext *cx, ForkJoinOp &op)
|
||||
}
|
||||
|
||||
#endif // defined(JS_THREADSAFE) && defined(JS_ION)
|
||||
|
||||
|
@ -197,6 +197,10 @@ struct ForkJoinSlice
|
||||
// Be wary, the runtime is shared between all threads!
|
||||
JSRuntime *runtime();
|
||||
|
||||
// Acquire and release the JSContext from the runtime.
|
||||
JSContext *acquireContext();
|
||||
void releaseContext();
|
||||
|
||||
// Check the current state of parallel execution.
|
||||
static inline ForkJoinSlice *Current();
|
||||
|
||||
@ -235,6 +239,32 @@ struct ForkJoinOp
|
||||
virtual bool parallel(ForkJoinSlice &slice) = 0;
|
||||
};
|
||||
|
||||
// Locks a JSContext for its scope.
|
||||
class LockedJSContext
|
||||
{
|
||||
ForkJoinSlice *slice_;
|
||||
JSContext *cx_;
|
||||
|
||||
public:
|
||||
LockedJSContext(ForkJoinSlice *slice)
|
||||
: slice_(slice),
|
||||
#if defined(JS_THREADSAFE) && defined(JS_ION)
|
||||
cx_(slice->acquireContext())
|
||||
#else
|
||||
cx_(NULL)
|
||||
#endif
|
||||
{ }
|
||||
|
||||
~LockedJSContext() {
|
||||
#if defined(JS_THREADSAFE) && defined(JS_ION)
|
||||
slice_->releaseContext();
|
||||
#endif
|
||||
}
|
||||
|
||||
operator JSContext *() { return cx_; }
|
||||
JSContext *operator->() { return cx_; }
|
||||
};
|
||||
|
||||
static inline bool
|
||||
InParallelSection()
|
||||
{
|
||||
|
@ -403,7 +403,7 @@ class ParallelIonInvoke
|
||||
IonCode *code = ion->method();
|
||||
jitcode_ = code->raw();
|
||||
enter_ = cx->compartment->ionCompartment()->enterJIT();
|
||||
calleeToken_ = CalleeToToken(callee);
|
||||
calleeToken_ = CalleeToParallelToken(callee);
|
||||
}
|
||||
|
||||
bool invoke(JSContext *cx) {
|
||||
|
Loading…
Reference in New Issue
Block a user