Bug 1030446 - Build a list of code ranges and use this instead of CallSite for describing functions (r=dougc)

--HG--
extra : rebase_source : 842f64b989e6be4a8e530c219ffe6eec6701c49f
This commit is contained in:
Luke Wagner 2014-06-25 17:34:23 -05:00
parent f836c90c3e
commit 94add28e32
21 changed files with 202 additions and 194 deletions

View File

@ -16,7 +16,6 @@
#include "jsprf.h"
#include "prmjtime.h"
#include "assembler/assembler/MacroAssembler.h"
#include "frontend/Parser.h"
#include "jit/AsmJSLink.h"
#include "jit/AsmJSModule.h"
@ -1409,9 +1408,6 @@ class MOZ_STACK_CLASS ModuleCompiler
return false;
return exits_.add(p, Move(exitDescriptor), *exitIndex);
}
bool addFunctionName(PropertyName *name, uint32_t *index) {
return module_->addFunctionName(name, index);
}
// Note a constraint on the minimum size of the heap. The heap size is
// constrained when linking to be at least the maximum of all such constraints.
@ -1443,6 +1439,11 @@ class MOZ_STACK_CLASS ModuleCompiler
bool finishGeneratingFunction(Func &func, MIRGenerator &mir, CodeGenerator &codegen) {
JS_ASSERT(func.defined() && func.code()->bound());
uint32_t beginOffset = func.code()->offset();
uint32_t endOffset = masm_.currentOffset();
if (!module_->addFunctionCodeRange(func.name(), beginOffset, endOffset))
return false;
jit::IonScriptCounts *counts = codegen.extractScriptCounts();
if (counts && !module_->addFunctionCounts(counts)) {
js_delete(counts);
@ -1480,15 +1481,19 @@ class MOZ_STACK_CLASS ModuleCompiler
module_->finishFunctionBodies(masm_.currentOffset());
}
void startGeneratingEntry(unsigned exportIndex) {
module_->exportedFunction(exportIndex).initCodeOffset(masm_.currentOffset());
}
bool finishGeneratingEntry(unsigned exportIndex) {
return module_->addEntryCodeRange(exportIndex, masm_.currentOffset());
}
void setInterpExitOffset(unsigned exitIndex) {
module_->exit(exitIndex).initInterpOffset(masm_.currentOffset());
}
void setIonExitOffset(unsigned exitIndex) {
module_->exit(exitIndex).initIonOffset(masm_.currentOffset());
}
void setEntryOffset(unsigned exportIndex) {
module_->exportedFunction(exportIndex).initCodeOffset(masm_.currentOffset());
}
void buildCompilationTimeReport(bool storedInCache, ScopedJSFreePtr<char> *out) {
ScopedJSFreePtr<char> slowFuns;
@ -1808,7 +1813,6 @@ class FunctionCompiler
ModuleCompiler & m_;
LifoAlloc & lifo_;
ParseNode * fn_;
uint32_t functionNameIndex_;
LocalMap locals_;
VarInitializerVector varInitializers_;
@ -1829,15 +1833,11 @@ class FunctionCompiler
LabeledBlockMap labeledBreaks_;
LabeledBlockMap labeledContinues_;
static const uint32_t NO_FUNCTION_NAME_INDEX = UINT32_MAX;
JS_STATIC_ASSERT(NO_FUNCTION_NAME_INDEX > CallSiteDesc::FUNCTION_NAME_INDEX_MAX);
public:
FunctionCompiler(ModuleCompiler &m, ParseNode *fn, LifoAlloc &lifo)
: m_(m),
lifo_(lifo),
fn_(fn),
functionNameIndex_(NO_FUNCTION_NAME_INDEX),
locals_(m.cx()),
varInitializers_(m.cx()),
alloc_(nullptr),
@ -2279,12 +2279,7 @@ class FunctionCompiler
uint32_t line, column;
m_.tokenStream().srcCoords.lineNumAndColumnIndex(call.node_->pn_pos.begin, &line, &column);
if (functionNameIndex_ == NO_FUNCTION_NAME_INDEX) {
if (!m_.addFunctionName(FunctionName(fn_), &functionNameIndex_))
return false;
}
CallSiteDesc desc(line, column, functionNameIndex_);
CallSiteDesc desc(line, column);
MAsmJSCall *ins = MAsmJSCall::New(alloc(), desc, callee, call.regArgs_, returnType,
call.spIncrement_);
if (!ins)
@ -5955,7 +5950,7 @@ GenerateEntry(ModuleCompiler &m, const AsmJSModule::ExportedFunction &exportedFu
// PushRegsInMask(NonVolatileRegs).
masm.setFramePushed(0);
// See AsmJSFrameSize comment in Assembler-*.h.
// See AsmJSFrameSize comment in Assembler-shared.h.
#if defined(JS_CODEGEN_ARM)
masm.push(lr);
#endif // JS_CODEGEN_ARM
@ -6030,7 +6025,7 @@ GenerateEntry(ModuleCompiler &m, const AsmJSModule::ExportedFunction &exportedFu
// Call into the real function.
AssertStackAlignment(masm);
masm.call(CallSiteDesc::Entry(), func.code());
masm.call(func.code());
// Pop the stack and recover the original 'argv' argument passed to the
// trampoline (which was pushed on the stack).
@ -6214,7 +6209,7 @@ GenerateFFIInterpreterExit(ModuleCompiler &m, const ModuleCompiler::ExitDescript
m.setInterpExitOffset(exitIndex);
masm.setFramePushed(0);
// See AsmJSFrameSize comment in Assembler-*.h.
// See AsmJSFrameSize comment in Assembler-shared.h.
#if defined(JS_CODEGEN_ARM)
masm.push(lr);
#elif defined(JS_CODEGEN_MIPS)
@ -6389,7 +6384,7 @@ GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit
m.setIonExitOffset(exitIndex);
masm.setFramePushed(0);
// See AsmJSFrameSize comment in Assembler-*.h.
// See AsmJSFrameSize comment in Assembler-shared.h.
#if defined(JS_CODEGEN_ARM)
masm.push(lr);
#elif defined(JS_CODEGEN_MIPS)
@ -6871,10 +6866,10 @@ static bool
GenerateStubs(ModuleCompiler &m)
{
for (unsigned i = 0; i < m.module().numExportedFunctions(); i++) {
m.setEntryOffset(i);
m.startGeneratingEntry(i);
if (!GenerateEntry(m, m.module().exportedFunction(i)))
return false;
if (m.masm().oom())
if (m.masm().oom() || !m.finishGeneratingEntry(i))
return false;
}

View File

@ -12,7 +12,7 @@
using namespace js;
using namespace js::jit;
static uint8_t *
static void *
ReturnAddressFromFP(uint8_t *fp)
{
// In asm.js code, the "frame" consists of a single word: the saved
@ -39,25 +39,29 @@ AsmJSFrameIterator::operator++()
}
void
AsmJSFrameIterator::settle(uint8_t *returnAddress)
AsmJSFrameIterator::settle(void *returnAddress)
{
callsite_ = module_->lookupCallSite(returnAddress);
JS_ASSERT(callsite_);
const AsmJSModule::CodeRange *codeRange = module_->lookupCodeRange(ReturnAddressFromFP(fp_));
JS_ASSERT(codeRange);
codeRange_ = codeRange;
if (callsite_->isEntry()) {
switch (codeRange->kind()) {
case AsmJSModule::CodeRange::Entry:
fp_ = nullptr;
JS_ASSERT(done());
return;
case AsmJSModule::CodeRange::Function:
callsite_ = module_->lookupCallSite(returnAddress);
JS_ASSERT(callsite_);
break;
}
JS_ASSERT(callsite_->isNormal());
}
JSAtom *
AsmJSFrameIterator::functionDisplayAtom() const
{
JS_ASSERT(!done());
return module_->functionName(callsite_->functionNameIndex());
return reinterpret_cast<const AsmJSModule::CodeRange*>(codeRange_)->functionName(*module_);
}
unsigned

View File

@ -24,7 +24,11 @@ class AsmJSFrameIterator
const jit::CallSite *callsite_;
uint8_t *fp_;
void settle(uint8_t *returnAddress);
// Really, a const AsmJSModule::CodeRange*, but no forward declarations of
// nested classes, so use void* to avoid pulling in all of AsmJSModule.h.
const void *codeRange_;
void settle(void *returnAddress);
public:
explicit AsmJSFrameIterator() : module_(nullptr) {}

View File

@ -167,6 +167,7 @@ AsmJSModule::addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t *asmJSModu
exits_.sizeOfExcludingThis(mallocSizeOf) +
exports_.sizeOfExcludingThis(mallocSizeOf) +
callSites_.sizeOfExcludingThis(mallocSizeOf) +
codeRanges_.sizeOfExcludingThis(mallocSizeOf) +
functionNames_.sizeOfExcludingThis(mallocSizeOf) +
heapAccesses_.sizeOfExcludingThis(mallocSizeOf) +
functionCounts_.sizeOfExcludingThis(mallocSizeOf) +
@ -189,11 +190,11 @@ struct CallSiteRetAddrOffset
};
const CallSite *
AsmJSModule::lookupCallSite(uint8_t *returnAddress) const
AsmJSModule::lookupCallSite(void *returnAddress) const
{
JS_ASSERT(isFinished());
uint32_t target = returnAddress - code_;
uint32_t target = ((uint8_t*)returnAddress) - code_;
size_t lowerBound = 0;
size_t upperBound = callSites_.length();
@ -204,6 +205,45 @@ AsmJSModule::lookupCallSite(uint8_t *returnAddress) const
return &callSites_[match];
}
namespace js {
// Create an ordering on CodeRange and pc offsets suitable for BinarySearch.
// Stick these in the same namespace as AsmJSModule so that argument-dependent
// lookup will find it.
bool
operator==(size_t pcOffset, const AsmJSModule::CodeRange &rhs)
{
return pcOffset >= rhs.beginOffset() && pcOffset < rhs.endOffset();
}
bool
operator<=(const AsmJSModule::CodeRange &lhs, const AsmJSModule::CodeRange &rhs)
{
return lhs.beginOffset() <= rhs.beginOffset();
}
bool
operator<(size_t pcOffset, const AsmJSModule::CodeRange &rhs)
{
return pcOffset < rhs.beginOffset();
}
} // namespace js
const AsmJSModule::CodeRange *
AsmJSModule::lookupCodeRange(void *pc) const
{
JS_ASSERT(isFinished());
uint32_t target = ((uint8_t*)pc) - code_;
size_t lowerBound = 0;
size_t upperBound = codeRanges_.length();
size_t match;
if (!BinarySearch(codeRanges_, lowerBound, upperBound, target, &match))
return nullptr;
return &codeRanges_[match];
}
struct HeapAccessOffset
{
const AsmJSHeapAccessVector &accesses;
@ -214,12 +254,12 @@ struct HeapAccessOffset
};
const AsmJSHeapAccess *
AsmJSModule::lookupHeapAccess(uint8_t *pc) const
AsmJSModule::lookupHeapAccess(void *pc) const
{
JS_ASSERT(isFinished());
JS_ASSERT(containsPC(pc));
uint32_t target = pc - code_;
uint32_t target = ((uint8_t*)pc) - code_;
size_t lowerBound = 0;
size_t upperBound = heapAccesses_.length();
@ -293,6 +333,11 @@ AsmJSModule::finish(ExclusiveContext *cx, TokenStream &tokenStream, MacroAssembl
CallSite &c = callSites_[i];
c.setReturnAddressOffset(masm.actualOffset(c.returnAddressOffset()));
}
for (size_t i = 0; i < codeRanges_.length(); i++) {
CodeRange &c = codeRanges_[i];
c.beginOffset_ = masm.actualOffset(c.beginOffset_);
c.endOffset_ = masm.actualOffset(c.endOffset_);
}
#endif
JS_ASSERT(pod.functionBytes_ % AsmJSPageSize == 0);
@ -1084,6 +1129,7 @@ AsmJSModule::serializedSize() const
SerializedVectorSize(exits_) +
SerializedVectorSize(exports_) +
SerializedPodVectorSize(callSites_) +
SerializedPodVectorSize(codeRanges_) +
SerializedVectorSize(functionNames_) +
SerializedPodVectorSize(heapAccesses_) +
#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
@ -1104,6 +1150,7 @@ AsmJSModule::serialize(uint8_t *cursor) const
cursor = SerializeVector(cursor, exits_);
cursor = SerializeVector(cursor, exports_);
cursor = SerializePodVector(cursor, callSites_);
cursor = SerializePodVector(cursor, codeRanges_);
cursor = SerializeVector(cursor, functionNames_);
cursor = SerializePodVector(cursor, heapAccesses_);
#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
@ -1130,6 +1177,7 @@ AsmJSModule::deserialize(ExclusiveContext *cx, const uint8_t *cursor)
(cursor = DeserializeVector(cx, cursor, &exits_)) &&
(cursor = DeserializeVector(cx, cursor, &exports_)) &&
(cursor = DeserializePodVector(cx, cursor, &callSites_)) &&
(cursor = DeserializePodVector(cx, cursor, &codeRanges_)) &&
(cursor = DeserializeVector(cx, cursor, &functionNames_)) &&
(cursor = DeserializePodVector(cx, cursor, &heapAccesses_)) &&
#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
@ -1200,6 +1248,7 @@ AsmJSModule::clone(JSContext *cx, ScopedJSDeletePtr<AsmJSModule> *moduleOut) con
!CloneVector(cx, exits_, &out.exits_) ||
!CloneVector(cx, exports_, &out.exports_) ||
!ClonePodVector(cx, callSites_, &out.callSites_) ||
!ClonePodVector(cx, codeRanges_, &out.codeRanges_) ||
!CloneVector(cx, functionNames_, &out.functionNames_) ||
!ClonePodVector(cx, heapAccesses_, &out.heapAccesses_) ||
!staticLinkData_.clone(cx, &out.staticLinkData_))

View File

@ -313,6 +313,33 @@ class AsmJSModule
bool clone(ExclusiveContext *cx, ExportedFunction *out) const;
};
class CodeRange
{
public:
enum Kind { Entry, Function };
private:
Kind kind_;
uint32_t beginOffset_;
uint32_t endOffset_;
uint32_t functionNameIndex_;
friend class AsmJSModule;
CodeRange(Kind kind, uint32_t beginOffset, uint32_t endOffset)
: kind_(kind), beginOffset_(beginOffset), endOffset_(endOffset)
{}
public:
CodeRange() {}
Kind kind() const { return kind_; }
uint32_t beginOffset() const { return beginOffset_; }
uint32_t endOffset() const { return endOffset_; }
PropertyName *functionName(const AsmJSModule &module) const {
JS_ASSERT(kind_ == Function);
return module.functionNames_[functionNameIndex_].name();
}
};
class Name
{
PropertyName *name_;
@ -479,6 +506,7 @@ class AsmJSModule
Vector<Exit, 0, SystemAllocPolicy> exits_;
Vector<ExportedFunction, 0, SystemAllocPolicy> exports_;
Vector<jit::CallSite, 0, SystemAllocPolicy> callSites_;
Vector<CodeRange, 0, SystemAllocPolicy> codeRanges_;
Vector<Name, 0, SystemAllocPolicy> functionNames_;
Vector<jit::AsmJSHeapAccess, 0, SystemAllocPolicy> heapAccesses_;
Vector<jit::IonScriptCounts*, 0, SystemAllocPolicy> functionCounts_;
@ -666,17 +694,21 @@ class AsmJSModule
if (len > pod.minHeapLength_)
pod.minHeapLength_ = len;
}
bool addFunctionName(PropertyName *name, uint32_t *nameIndex) {
bool addFunctionCodeRange(PropertyName *name, uint32_t beginOffset, uint32_t endOffset) {
JS_ASSERT(isFinishedWithModulePrologue() && !isFinishedWithFunctionBodies());
JS_ASSERT(name->isTenured());
if (functionNames_.length() > jit::CallSiteDesc::FUNCTION_NAME_INDEX_MAX)
JS_ASSERT(beginOffset <= endOffset);
JS_ASSERT_IF(!codeRanges_.empty(), codeRanges_.back().endOffset() <= beginOffset);
if (functionNames_.length() >= UINT32_MAX)
return false;
*nameIndex = functionNames_.length();
return functionNames_.append(name);
CodeRange codeRange(CodeRange::Function, beginOffset, endOffset);
codeRange.functionNameIndex_ = functionNames_.length();
return functionNames_.append(name) && codeRanges_.append(codeRange);
}
PropertyName *functionName(uint32_t i) const {
JS_ASSERT(isFinished());
return functionNames_[i].name();
bool addEntryCodeRange(unsigned exportIndex, uint32_t endOffset) {
uint32_t beginOffset = exports_[exportIndex].pod.codeOffset_;
CodeRange codeRange(CodeRange::Entry, beginOffset, endOffset);
return codeRanges_.append(codeRange);
}
bool addExit(unsigned ffiIndex, unsigned *exitIndex) {
JS_ASSERT(isFinishedWithModulePrologue() && !isFinishedWithFunctionBodies());
@ -852,11 +884,15 @@ class AsmJSModule
// Lookup a callsite by the return pc (from the callee to the caller).
// Return null if no callsite was found.
const jit::CallSite *lookupCallSite(uint8_t *returnAddress) const;
const jit::CallSite *lookupCallSite(void *returnAddress) const;
// Lookup the name the code range containing the given pc. Return null if no
// code range was found.
const CodeRange *lookupCodeRange(void *pc) const;
// Lookup a heap access site by the pc which performs the access. Return
// null if no heap access was found.
const jit::AsmJSHeapAccess *lookupHeapAccess(uint8_t *pc) const;
const jit::AsmJSHeapAccess *lookupHeapAccess(void *pc) const;
// The global data section is placed after the executable code (i.e., at
// offset codeBytes_) in the module's linear allocation. The global data

View File

@ -8573,7 +8573,7 @@ CodeGenerator::visitAsmJSCall(LAsmJSCall *ins)
masm.call(mir->desc(), ToRegister(ins->getOperand(mir->dynamicCalleeOperandIndex())));
break;
case MAsmJSCall::Callee::Builtin:
masm.call(mir->desc(), AsmJSImmPtr(callee.builtin()));
masm.call(AsmJSImmPtr(callee.builtin()));
break;
}

View File

@ -141,12 +141,6 @@ static const uint32_t StackAlignment = 8;
static const uint32_t CodeAlignment = 8;
static const bool StackKeptAligned = true;
// As an invariant across architectures, within asm.js code:
// $sp % StackAlignment = (AsmJSFrameSize + masm.framePushed) % StackAlignment
// To achieve this on ARM, the first instruction of the asm.js prologue pushes
// lr without incrementing masm.framePushed.
static const uint32_t AsmJSFrameSize = sizeof(void*);
static const Scale ScalePointer = TimesFour;
class Instruction;

View File

@ -53,7 +53,7 @@ CodeGeneratorARM::generateAsmJSPrologue(Label *stackOverflowLabel)
{
JS_ASSERT(gen->compilingAsmJS());
// See comment in Assembler-arm.h about AsmJSFrameSize.
// See comment in Assembler-shared.h about AsmJSFrameSize.
masm.push(lr);
// The asm.js over-recursed handler wants to be able to assume that SP

View File

@ -1789,6 +1789,17 @@ MacroAssemblerARMCompat::callIon(Register callee)
}
}
void
MacroAssemblerARMCompat::callIonFromAsmJS(Register callee)
{
ma_callIonNoPush(callee);
// The Ion ABI has the callee pop the return address off the stack.
// The asm.js caller assumes that the call leaves sp unchanged, so bump
// the stack.
subPtr(Imm32(sizeof(void*)), sp);
}
void
MacroAssemblerARMCompat::reserveStack(uint32_t amount)
{

View File

@ -563,33 +563,13 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_movPatchable(ImmPtr(c->raw()), ScratchRegister, Always, rs);
ma_callIonHalfPush(ScratchRegister);
}
void appendCallSite(const CallSiteDesc &desc) {
// Add an extra sizeof(void*) to include the return address that was
// pushed by the call instruction (see CallSite::stackDepth).
enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_ + AsmJSFrameSize));
}
void call(const CallSiteDesc &desc, const Register reg) {
call(reg);
appendCallSite(desc);
enoughMemory_ &= append(desc, currentOffset(), framePushed_);
}
void call(const CallSiteDesc &desc, Label *label) {
call(label);
appendCallSite(desc);
}
void call(const CallSiteDesc &desc, AsmJSImmPtr imm) {
call(imm);
appendCallSite(desc);
}
void callIonFromAsmJS(const Register reg) {
ma_callIonNoPush(reg);
appendCallSite(CallSiteDesc::Exit());
// The Ion ABI has the callee pop the return address off the stack.
// The asm.js caller assumes that the call leaves sp unchanged, so bump
// the stack.
subPtr(Imm32(sizeof(void*)), sp);
enoughMemory_ &= append(desc, currentOffset(), framePushed_);
}
void branch(JitCode *c) {
@ -1279,6 +1259,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
// Makes an Ion call using the only two methods that it is sane for
// indep code to make a call
void callIon(Register callee);
void callIonFromAsmJS(Register callee);
void reserveStack(uint32_t amount);
void freeStack(uint32_t amount);

View File

@ -152,12 +152,6 @@ static const uint32_t StackAlignment = 8;
static const uint32_t CodeAlignment = 4;
static const bool StackKeptAligned = true;
// As an invariant across architectures, within asm.js code:
// $sp % StackAlignment = (AsmJSFrameSize + masm.framePushed) % StackAlignment
// To achieve this on MIPS, the first instruction of the asm.js prologue pushes
// ra without incrementing masm.framePushed.
static const uint32_t AsmJSFrameSize = sizeof(void*);
static const Scale ScalePointer = TimesFour;
// MIPS instruction types

View File

@ -52,7 +52,7 @@ CodeGeneratorMIPS::generateAsmJSPrologue(Label *stackOverflowLabel)
{
JS_ASSERT(gen->compilingAsmJS());
// See comment in Assembler-mips.h about AsmJSFrameSize.
// See comment in Assembler-shared.h about AsmJSFrameSize.
masm.push(ra);
// The asm.js over-recursed handler wants to be able to assume that SP

View File

@ -1525,6 +1525,16 @@ MacroAssemblerMIPSCompat::callIon(Register callee)
ma_callIon(callee);
}
}
void
MacroAssemblerMIPSCompat::callIonFromAsmJS(Register callee)
{
ma_callIonNoPush(reg);
// The Ion ABI has the callee pop the return address off the stack.
// The asm.js caller assumes that the call leaves sp unchanged, so bump
// the stack.
subPtr(Imm32(sizeof(void*)), StackPointer);
}
void
MacroAssemblerMIPSCompat::reserveStack(uint32_t amount)

View File

@ -412,33 +412,13 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
ma_liPatchable(ScratchRegister, Imm32((uint32_t)c->raw()));
ma_callIonHalfPush(ScratchRegister);
}
void appendCallSite(const CallSiteDesc &desc) {
// Add an extra sizeof(void*) to include the return address that was
// pushed by the call instruction (see CallSite::stackDepth).
enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_ + AsmJSFrameSize));
}
void call(const CallSiteDesc &desc, const Register reg) {
call(reg);
appendCallSite(desc);
enoughMemory_ &= append(desc, currentOffset(), framePushed_);
}
void call(const CallSiteDesc &desc, Label *label) {
call(label);
appendCallSite(desc);
}
void call(const CallSiteDesc &desc, AsmJSImmPtr imm) {
call(imm);
appendCallSite(desc);
}
void callIonFromAsmJS(const Register reg) {
ma_callIonNoPush(reg);
appendCallSite(CallSiteDesc::Exit());
// The Ion ABI has the callee pop the return address off the stack.
// The asm.js caller assumes that the call leaves sp unchanged, so bump
// the stack.
subPtr(Imm32(sizeof(void*)), StackPointer);
enoughMemory_ &= append(desc, currentOffset(), framePushed_);
}
void branch(JitCode *c) {
@ -985,6 +965,7 @@ public:
// Makes an Ion call using the only two methods that it is sane for
// indep code to make a call
void callIon(Register callee);
void callIonFromAsmJS(Register callee);
void reserveStack(uint32_t amount);
void freeStack(uint32_t amount);

View File

@ -577,48 +577,19 @@ class CodeLocationLabel
}
};
// Describes the user-visible properties of a callsite.
//
// A few general notes about the stack-walking supported by CallSite(Desc):
// - This information facilitates stack-walking performed by FrameIter which
// is used by Error.stack and other user-visible stack-walking functions.
// - Ion/asm.js calling conventions do not maintain a frame-pointer so
// stack-walking must lookup the stack depth based on the PC.
// - Stack-walking only occurs from C++ after a synchronous calls (JS-to-JS and
// JS-to-C++). Thus, we do not need to map arbitrary PCs to stack-depths,
// just the return address at callsites.
// - An exception to the above rule is the interrupt callback which can happen
// at arbitrary PCs. In such cases, we drop frames from the stack-walk. In
// the future when a full PC->stack-depth map is maintained, we handle this
// case.
// While the frame-pointer chain allows the stack to be unwound without
// metadata, Error.stack still needs to know the line/column of every call in
// the chain. A CallSiteDesc describes the line/column of a single callsite.
// A CallSiteDesc is created by callers of MacroAssembler.
class CallSiteDesc
{
uint32_t line_;
uint32_t column_;
uint32_t functionNameIndex_;
static const uint32_t sEntryTrampoline = UINT32_MAX;
static const uint32_t sExit = UINT32_MAX - 1;
public:
static const uint32_t FUNCTION_NAME_INDEX_MAX = UINT32_MAX - 2;
CallSiteDesc() {}
CallSiteDesc(uint32_t line, uint32_t column, uint32_t functionNameIndex)
: line_(line), column_(column), functionNameIndex_(functionNameIndex)
{}
static CallSiteDesc Entry() { return CallSiteDesc(0, 0, sEntryTrampoline); }
static CallSiteDesc Exit() { return CallSiteDesc(0, 0, sExit); }
bool isEntry() const { return functionNameIndex_ == sEntryTrampoline; }
bool isExit() const { return functionNameIndex_ == sExit; }
bool isNormal() const { return !(isEntry() || isExit()); }
uint32_t line() const { JS_ASSERT(isNormal()); return line_; }
uint32_t column() const { JS_ASSERT(isNormal()); return column_; }
uint32_t functionNameIndex() const { JS_ASSERT(isNormal()); return functionNameIndex_; }
CallSiteDesc(uint32_t line, uint32_t column) : line_(line), column_(column) {}
uint32_t line() const { return line_; }
uint32_t column() const { return column_; }
};
// Adds to CallSiteDesc the metadata necessary to walk the stack given an
@ -641,13 +612,21 @@ struct CallSite : public CallSiteDesc
uint32_t returnAddressOffset() const { return returnAddressOffset_; }
// The stackDepth measures the amount of stack space pushed since the
// function was called. In particular, this includes the word pushed by the
// call instruction on x86/x64.
uint32_t stackDepth() const { JS_ASSERT(!isEntry()); return stackDepth_; }
// function was called. In particular, this includes the pushed return
// address on all archs (whether or not the call instruction pushes the
// return address (x86/x64) or the prologue does (ARM/MIPS).
uint32_t stackDepth() const { return stackDepth_; }
};
typedef Vector<CallSite, 0, SystemAllocPolicy> CallSiteVector;
// As an invariant across architectures, within asm.js code:
// $sp % StackAlignment = (AsmJSFrameSize + masm.framePushed) % StackAlignment
// AsmJSFrameSize is 1 word, for the return address pushed by the call (or, in
// the case of ARM/MIPS, by the first instruction of the prologue). This means
// masm.framePushed never includes the pushed return address.
static const uint32_t AsmJSFrameSize = sizeof(void*);
// Summarizes a heap access made by asm.js code that needs to be patched later
// and/or looked up by the asm.js signal handlers. Different architectures need
// to know different things (x64: offset and length, ARM: where to patch in
@ -821,7 +800,11 @@ class AssemblerShared
return !enoughMemory_;
}
bool append(CallSite callsite) { return callsites_.append(callsite); }
bool append(const CallSiteDesc &desc, size_t currentOffset, size_t framePushed) {
// framePushed does not include AsmJSFrameSize, so add it in here (see
// CallSite::stackDepth).
return callsites_.append(CallSite(desc, currentOffset, framePushed + AsmJSFrameSize));
}
CallSiteVector &&extractCallSites() { return Move(callsites_); }
bool append(AsmJSHeapAccess access) { return asmJSHeapAccesses_.append(access); }

View File

@ -667,26 +667,23 @@ class MacroAssemblerX86Shared : public Assembler
bool buildFakeExitFrame(Register scratch, uint32_t *offset);
void callWithExitFrame(JitCode *target);
void callIon(Register callee) {
call(callee);
}
void appendCallSite(const CallSiteDesc &desc) {
// Add an extra sizeof(void*) to include the return address that was
// pushed by the call instruction (see CallSite::stackDepth).
enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_ + AsmJSFrameSize));
}
void call(const CallSiteDesc &desc, Label *label) {
call(label);
appendCallSite(desc);
enoughMemory_ &= append(desc, currentOffset(), framePushed_);
}
void call(const CallSiteDesc &desc, Register reg) {
call(reg);
appendCallSite(desc);
enoughMemory_ &= append(desc, currentOffset(), framePushed_);
}
void callIonFromAsmJS(Register reg) {
call(CallSiteDesc::Exit(), reg);
void callIon(Register callee) {
call(callee);
}
void callIonFromAsmJS(Register callee) {
call(callee);
}
void call(AsmJSImmPtr target) {
mov(target, eax);
call(eax);
}
void checkStackAlignment() {

View File

@ -185,13 +185,6 @@ static const uint32_t StackAlignment = 16;
static const bool StackKeptAligned = false;
static const uint32_t CodeAlignment = 8;
// As an invariant across architectures, within asm.js code:
// $sp % StackAlignment = (AsmJSFrameSize + masm.framePushed) % StackAlignment
// On x64, this naturally falls out of the fact that the 'call' instruction
// pushes the return address on the stack and masm.framePushed = 0 at the first
// instruction of the prologue.
static const uint32_t AsmJSFrameSize = sizeof(void*);
static const Scale ScalePointer = TimesEight;
} // namespace jit

View File

@ -100,15 +100,6 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
void call(ImmPtr target) {
call(ImmWord(uintptr_t(target.value)));
}
void call(AsmJSImmPtr target) {
mov(target, rax);
call(rax);
}
void call(const CallSiteDesc &desc, AsmJSImmPtr target) {
call(target);
appendCallSite(desc);
}
// Refers to the upper 32 bits of a 64-bit Value operand.
// On x86_64, the upper 32 bits do not necessarily only contain the type.

View File

@ -113,13 +113,6 @@ static const uint32_t StackAlignment = 4;
static const bool StackKeptAligned = false;
static const uint32_t CodeAlignment = 8;
// As an invariant across architectures, within asm.js code:
// $sp % StackAlignment = (AsmJSFrameSize + masm.framePushed) % StackAlignment
// On x86, this naturally falls out of the fact that the 'call' instruction
// pushes the return address on the stack and masm.framePushed = 0 at the first
// instruction of the prologue.
static const uint32_t AsmJSFrameSize = sizeof(void*);
struct ImmTag : public Imm32
{
ImmTag(JSValueTag mask)
@ -382,13 +375,6 @@ class Assembler : public AssemblerX86Shared
JmpSrc src = masm.call();
addPendingJump(src, target, Relocation::HARDCODED);
}
void call(AsmJSImmPtr target) {
// Moving to a register is suboptimal. To fix (use a single
// call-immediate instruction) we'll need to distinguish a new type of
// relative patch to an absolute address in AsmJSAbsoluteLink.
mov(target, eax);
call(eax);
}
// Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
// this instruction.

View File

@ -1114,10 +1114,6 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
Push(dynStack);
call(target);
}
void call(const CallSiteDesc &desc, AsmJSImmPtr target) {
call(target);
appendCallSite(desc);
}
#ifdef JSGC_GENERATIONAL
void branchPtrInNurseryRange(Condition cond, Register ptr, Register temp, Label *label);

View File

@ -41,7 +41,10 @@ BinarySearch(const Container& aContainer, size_t aBegin, size_t aEnd,
size_t high = aEnd;
while (low != high) {
size_t middle = low + (high - low) / 2;
const T& middleValue = aContainer[middle];
// Allow any intermediate type so long as it provides a suitable ordering
// relation.
const auto& middleValue = aContainer[middle];
MOZ_ASSERT(aContainer[low] <= aContainer[middle]);
MOZ_ASSERT(aContainer[middle] <= aContainer[high - 1]);