Bug 1229642 - Split wasm::Module out of AsmJSModule (r=bbouvier)

This commit is contained in:
Luke Wagner 2015-12-28 17:39:21 -06:00
parent c99e07f6f3
commit 7963ee4e6d
60 changed files with 4535 additions and 4308 deletions

View File

@ -19,12 +19,14 @@ class JSScript;
namespace js {
class Activation;
class AsmJSProfilingFrameIterator;
namespace jit {
class JitActivation;
class JitProfilingFrameIterator;
class JitcodeGlobalEntry;
} // namespace jit
namespace wasm {
class ProfilingFrameIterator;
} // namespace wasm
} // namespace js
namespace JS {
@ -49,15 +51,15 @@ class JS_PUBLIC_API(ProfilingFrameIterator)
static const unsigned StorageSpace = 8 * sizeof(void*);
mozilla::AlignedStorage<StorageSpace> storage_;
js::AsmJSProfilingFrameIterator& asmJSIter() {
js::wasm::ProfilingFrameIterator& asmJSIter() {
MOZ_ASSERT(!done());
MOZ_ASSERT(isAsmJS());
return *reinterpret_cast<js::AsmJSProfilingFrameIterator*>(storage_.addr());
return *reinterpret_cast<js::wasm::ProfilingFrameIterator*>(storage_.addr());
}
const js::AsmJSProfilingFrameIterator& asmJSIter() const {
const js::wasm::ProfilingFrameIterator& asmJSIter() const {
MOZ_ASSERT(!done());
MOZ_ASSERT(isAsmJS());
return *reinterpret_cast<const js::AsmJSProfilingFrameIterator*>(storage_.addr());
return *reinterpret_cast<const js::wasm::ProfilingFrameIterator*>(storage_.addr());
}
js::jit::JitProfilingFrameIterator& jitIter() {

View File

@ -1,160 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* Copyright 2014 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef asmjs_AsmJSFrameIterator_h
#define asmjs_AsmJSFrameIterator_h
#include <stdint.h>
#include "asmjs/Wasm.h"
#include "js/ProfilingFrameIterator.h"
class JSAtom;
namespace js {
class AsmJSActivation;
class AsmJSModule;
namespace jit { class MacroAssembler; class Label; }
namespace wasm { class CallSite; }
// Iterates over the frames of a single AsmJSActivation, called synchronously
// from C++ in the thread of the asm.js. The one exception is that this iterator
// may be called from the interrupt callback which may be called asynchronously
// from asm.js code; in this case, the backtrace may not be correct.
class AsmJSFrameIterator
{
const AsmJSModule* module_;
const wasm::CallSite* callsite_;
uint8_t* fp_;
// Really, a const AsmJSModule::CodeRange*, but no forward declarations of
// nested classes, so use void* to avoid pulling in all of AsmJSModule.h.
const void* codeRange_;
void settle();
public:
explicit AsmJSFrameIterator() : module_(nullptr) {}
explicit AsmJSFrameIterator(const AsmJSActivation& activation);
void operator++();
bool done() const { return !fp_; }
JSAtom* functionDisplayAtom() const;
unsigned computeLine(uint32_t* column) const;
};
// Iterates over the frames of a single AsmJSActivation, given an
// asynchrously-interrupted thread's state. If the activation's
// module is not in profiling mode, the activation is skipped.
class AsmJSProfilingFrameIterator
{
const AsmJSModule* module_;
uint8_t* callerFP_;
void* callerPC_;
void* stackAddress_;
wasm::ExitReason exitReason_;
// Really, a const AsmJSModule::CodeRange*, but no forward declarations of
// nested classes, so use void* to avoid pulling in all of AsmJSModule.h.
const void* codeRange_;
void initFromFP(const AsmJSActivation& activation);
public:
AsmJSProfilingFrameIterator() : codeRange_(nullptr) {}
explicit AsmJSProfilingFrameIterator(const AsmJSActivation& activation);
AsmJSProfilingFrameIterator(const AsmJSActivation& activation,
const JS::ProfilingFrameIterator::RegisterState& state);
void operator++();
bool done() const { return !codeRange_; }
void* stackAddress() const { MOZ_ASSERT(!done()); return stackAddress_; }
const char* label() const;
};
/******************************************************************************/
// Prologue/epilogue code generation.
struct AsmJSOffsets
{
MOZ_IMPLICIT AsmJSOffsets(uint32_t begin = 0,
uint32_t end = 0)
: begin(begin), end(end)
{}
// These define a [begin, end) contiguous range of instructions compiled
// into an AsmJSModule::CodeRange.
uint32_t begin;
uint32_t end;
};
struct AsmJSProfilingOffsets : AsmJSOffsets
{
MOZ_IMPLICIT AsmJSProfilingOffsets(uint32_t profilingReturn = 0)
: AsmJSOffsets(), profilingReturn(profilingReturn)
{}
// For CodeRanges with AsmJSProfilingOffsets, 'begin' is the offset of the
// profiling entry.
uint32_t profilingEntry() const { return begin; }
// The profiling return is the offset of the return instruction, which
// precedes the 'end' by a variable number of instructions due to
// out-of-line codegen.
uint32_t profilingReturn;
};
struct AsmJSFunctionOffsets : AsmJSProfilingOffsets
{
MOZ_IMPLICIT AsmJSFunctionOffsets(uint32_t nonProfilingEntry = 0,
uint32_t profilingJump = 0,
uint32_t profilingEpilogue = 0)
: AsmJSProfilingOffsets(),
nonProfilingEntry(nonProfilingEntry),
profilingJump(profilingJump),
profilingEpilogue(profilingEpilogue)
{}
// Function CodeRanges have an additional non-profiling entry that comes
// after the profiling entry and a non-profiling epilogue that comes before
// the profiling epilogue.
uint32_t nonProfilingEntry;
// When profiling is enabled, the 'nop' at offset 'profilingJump' is
// overwritten to be a jump to 'profilingEpilogue'.
uint32_t profilingJump;
uint32_t profilingEpilogue;
};
void
GenerateAsmJSExitPrologue(jit::MacroAssembler& masm, unsigned framePushed, wasm::ExitReason reason,
AsmJSProfilingOffsets* offsets, jit::Label* maybeEntry = nullptr);
void
GenerateAsmJSExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed, wasm::ExitReason reason,
AsmJSProfilingOffsets* offsets);
void
GenerateAsmJSFunctionPrologue(jit::MacroAssembler& masm, unsigned framePushed,
AsmJSFunctionOffsets* offsets);
void
GenerateAsmJSFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed,
AsmJSFunctionOffsets* offsets);
} // namespace js
#endif // asmjs_AsmJSFrameIterator_h

View File

@ -20,10 +20,6 @@
#include "mozilla/PodOperations.h"
#ifdef MOZ_VTUNE
# include "vtune/VTuneWrapper.h"
#endif
#include "jscntxt.h"
#include "jsmath.h"
#include "jsprf.h"
@ -35,9 +31,6 @@
#include "frontend/BytecodeCompiler.h"
#include "jit/Ion.h"
#include "jit/JitCommon.h"
#ifdef JS_ION_PERF
# include "jit/PerfSpewer.h"
#endif
#include "vm/ArrayBufferObject.h"
#include "vm/SharedArrayObject.h"
#include "vm/StringBuffer.h"
@ -54,21 +47,6 @@ using namespace js::wasm;
using mozilla::IsNaN;
using mozilla::PodZero;
static bool
CloneModule(JSContext* cx, MutableHandle<AsmJSModuleObject*> moduleObj)
{
ScopedJSDeletePtr<AsmJSModule> module;
if (!moduleObj->module().clone(cx, &module))
return false;
AsmJSModuleObject* newModuleObj = AsmJSModuleObject::create(cx, &module);
if (!newModuleObj)
return false;
moduleObj.set(newModuleObj);
return true;
}
static bool
LinkFail(JSContext* cx, const char* str)
{
@ -127,10 +105,10 @@ HasPureCoercion(JSContext* cx, HandleValue v)
}
static bool
ValidateGlobalVariable(JSContext* cx, const AsmJSModule& module, AsmJSModule::Global& global,
ValidateGlobalVariable(JSContext* cx, const AsmJSModule::Global& global, uint8_t* globalData,
HandleValue importVal)
{
void* datum = module.globalData() + global.varGlobalDataOffset();
void* datum = globalData + global.varGlobalDataOffset();
switch (global.varInitKind()) {
case AsmJSModule::Global::InitConstant: {
@ -214,8 +192,8 @@ ValidateGlobalVariable(JSContext* cx, const AsmJSModule& module, AsmJSModule::Gl
}
static bool
ValidateFFI(JSContext* cx, AsmJSModule::Global& global, HandleValue importVal,
AutoObjectVector* ffis)
ValidateFFI(JSContext* cx, const AsmJSModule::Global& global, HandleValue importVal,
AutoVectorRooter<JSFunction*>* ffis)
{
RootedPropertyName field(cx, global.ffiField());
RootedValue v(cx);
@ -230,7 +208,7 @@ ValidateFFI(JSContext* cx, AsmJSModule::Global& global, HandleValue importVal,
}
static bool
ValidateArrayView(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
ValidateArrayView(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
{
RootedPropertyName field(cx, global.maybeViewName());
if (!field)
@ -272,7 +250,7 @@ ValidateByteLength(JSContext* cx, HandleValue globalVal)
}
static bool
ValidateMathBuiltinFunction(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
ValidateMathBuiltinFunction(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
{
RootedValue v(cx);
if (!GetDataProperty(cx, globalVal, cx->names().Math, &v))
@ -334,7 +312,7 @@ AsmJSSimdTypeToTypeDescrType(AsmJSSimdType type)
}
static bool
ValidateSimdType(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal,
ValidateSimdType(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal,
MutableHandleValue out)
{
RootedValue v(cx);
@ -366,14 +344,14 @@ ValidateSimdType(JSContext* cx, AsmJSModule::Global& global, HandleValue globalV
}
static bool
ValidateSimdType(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
ValidateSimdType(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
{
RootedValue _(cx);
return ValidateSimdType(cx, global, globalVal, &_);
}
static bool
ValidateSimdOperation(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
ValidateSimdOperation(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
{
// SIMD operations are loaded from the SIMD type, so the type must have been
// validated before the operation.
@ -426,7 +404,7 @@ ValidateSimdOperation(JSContext* cx, AsmJSModule::Global& global, HandleValue gl
}
static bool
ValidateAtomicsBuiltinFunction(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
ValidateAtomicsBuiltinFunction(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
{
RootedValue v(cx);
if (!GetDataProperty(cx, globalVal, cx->names().Atomics, &v))
@ -457,7 +435,7 @@ ValidateAtomicsBuiltinFunction(JSContext* cx, AsmJSModule::Global& global, Handl
}
static bool
ValidateConstant(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
ValidateConstant(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
{
RootedPropertyName field(cx, global.constantName());
RootedValue v(cx, globalVal);
@ -486,12 +464,20 @@ ValidateConstant(JSContext* cx, AsmJSModule::Global& global, HandleValue globalV
}
static bool
LinkModuleToHeap(JSContext* cx, AsmJSModule& module, Handle<ArrayBufferObjectMaybeShared*> heap)
CheckBuffer(JSContext* cx, AsmJSModule& module, HandleValue bufferVal,
MutableHandle<ArrayBufferObjectMaybeShared*> buffer)
{
uint32_t heapLength = heap->byteLength();
if (module.isSharedView() && !IsSharedArrayBuffer(bufferVal))
return LinkFail(cx, "shared views can only be constructed onto SharedArrayBuffer");
if (!module.isSharedView() && !IsArrayBuffer(bufferVal))
return LinkFail(cx, "unshared views can only be constructed onto ArrayBuffer");
buffer.set(&AsAnyArrayBuffer(bufferVal));
uint32_t heapLength = buffer->byteLength();
if (!IsValidAsmJSHeapLength(heapLength)) {
ScopedJSFreePtr<char> msg(
UniqueChars msg(
JS_smprintf("ArrayBuffer byteLength 0x%x is not a valid heap length. The next "
"valid length is 0x%x",
heapLength,
@ -503,7 +489,7 @@ LinkModuleToHeap(JSContext* cx, AsmJSModule& module, Handle<ArrayBufferObjectMay
// loads and stores start on an aligned boundary and the heap byteLength has larger alignment.
MOZ_ASSERT((module.minHeapLength() - 1) <= INT32_MAX);
if (heapLength < module.minHeapLength()) {
ScopedJSFreePtr<char> msg(
UniqueChars msg(
JS_smprintf("ArrayBuffer byteLength of 0x%x is less than 0x%x (the size implied "
"by const heap accesses and/or change-heap minimum-length requirements).",
heapLength,
@ -512,60 +498,48 @@ LinkModuleToHeap(JSContext* cx, AsmJSModule& module, Handle<ArrayBufferObjectMay
}
if (heapLength > module.maxHeapLength()) {
ScopedJSFreePtr<char> msg(
UniqueChars msg(
JS_smprintf("ArrayBuffer byteLength 0x%x is greater than maximum length of 0x%x",
heapLength,
module.maxHeapLength()));
return LinkFail(cx, msg.get());
}
// If we've generated the code with signal handlers in mind (for bounds
// checks on x64 and for interrupt callback requesting on all platforms),
// we need to be able to use signals at runtime. In particular, a module
// can have been created using signals and cached, and executed without
// signals activated.
if (module.usesSignalHandlersForInterrupt() && !cx->canUseSignalHandlers())
return LinkFail(cx, "Code generated with signal handlers but signals are deactivated");
// Shell builtins may have disabled signal handlers since the module we're
// cloning was compiled. LookupAsmJSModuleInCache checks for signal handlers
// as well for the caching case.
if (module.wasm().compileArgs() != CompileArgs(cx))
return LinkFail(cx, "Signals have been toggled since compilation");
if (heap->is<ArrayBufferObject>()) {
Rooted<ArrayBufferObject*> abheap(cx, &heap->as<ArrayBufferObject>());
if (!ArrayBufferObject::prepareForAsmJS(cx, abheap, module.usesSignalHandlersForOOB()))
if (buffer->is<ArrayBufferObject>()) {
Rooted<ArrayBufferObject*> abheap(cx, &buffer->as<ArrayBufferObject>());
bool useSignalHandlers = module.wasm().compileArgs().useSignalHandlersForOOB;
if (!ArrayBufferObject::prepareForAsmJS(cx, abheap, useSignalHandlers))
return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
}
module.initHeap(heap, cx);
return true;
}
static bool
DynamicallyLinkModule(JSContext* cx, const CallArgs& args, AsmJSModule& module)
{
module.setIsDynamicallyLinked(cx->runtime());
HandleValue globalVal = args.get(0);
HandleValue importVal = args.get(1);
HandleValue bufferVal = args.get(2);
Rooted<ArrayBufferObjectMaybeShared*> heap(cx);
if (module.hasArrayView()) {
if (module.isSharedView() && !IsSharedArrayBuffer(bufferVal))
return LinkFail(cx, "shared views can only be constructed onto SharedArrayBuffer");
if (!module.isSharedView() && !IsArrayBuffer(bufferVal))
return LinkFail(cx, "unshared views can only be constructed onto ArrayBuffer");
heap = &AsAnyArrayBuffer(bufferVal);
if (!LinkModuleToHeap(cx, module, heap))
return false;
}
Rooted<ArrayBufferObjectMaybeShared*> buffer(cx);
if (module.hasArrayView() && !CheckBuffer(cx, module, bufferVal, &buffer))
return false;
AutoObjectVector ffis(cx);
AutoVectorRooter<JSFunction*> ffis(cx);
if (!ffis.resize(module.numFFIs()))
return false;
for (unsigned i = 0; i < module.numGlobals(); i++) {
AsmJSModule::Global& global = module.global(i);
for (const AsmJSModule::Global& global : module.globals()) {
switch (global.which()) {
case AsmJSModule::Global::Variable:
if (!ValidateGlobalVariable(cx, module, global, importVal))
if (!ValidateGlobalVariable(cx, global, module.wasm().globalData(), importVal))
return false;
break;
case AsmJSModule::Global::FFI:
@ -604,15 +578,13 @@ DynamicallyLinkModule(JSContext* cx, const CallArgs& args, AsmJSModule& module)
}
}
for (unsigned i = 0; i < module.numExits(); i++) {
const AsmJSModule::Exit& exit = module.exit(i);
exit.datum(module).fun = &ffis[exit.ffiIndex()]->as<JSFunction>();
AutoVectorRooter<JSFunction*> imports(cx);
for (const AsmJSModule::Import& import : module.imports()) {
if (!imports.append(ffis[import.ffiIndex()]))
return false;
}
// See the comment in AllocateExecutableMemory.
ExecutableAllocator::makeExecutable(module.codeBase(), module.codeBytes());
return true;
return module.wasm().dynamicallyLink(cx, buffer, imports);
}
static bool
@ -641,10 +613,11 @@ ChangeHeap(JSContext* cx, AsmJSModule& module, const CallArgs& args)
MOZ_ASSERT(IsValidAsmJSHeapLength(heapLength));
if (!ArrayBufferObject::prepareForAsmJS(cx, newBuffer, module.usesSignalHandlersForOOB()))
bool useSignalHandlers = module.wasm().compileArgs().useSignalHandlersForOOB;
if (!ArrayBufferObject::prepareForAsmJS(cx, newBuffer, useSignalHandlers))
return false;
args.rval().set(BooleanValue(module.changeHeap(newBuffer, cx)));
args.rval().set(BooleanValue(module.wasm().changeHeap(newBuffer, cx)));
return true;
}
@ -655,20 +628,13 @@ static const unsigned ASM_MODULE_SLOT = 0;
static const unsigned ASM_EXPORT_INDEX_SLOT = 1;
static unsigned
FunctionToExportedFunctionIndex(HandleFunction fun)
FunctionToExportIndex(HandleFunction fun)
{
MOZ_ASSERT(IsAsmJSFunction(fun));
Value v = fun->getExtendedSlot(ASM_EXPORT_INDEX_SLOT);
return v.toInt32();
}
static const AsmJSModule::ExportedFunction&
FunctionToExportedFunction(HandleFunction fun, AsmJSModule& module)
{
unsigned funIndex = FunctionToExportedFunctionIndex(fun);
return module.exportedFunction(funIndex);
}
static AsmJSModule&
FunctionToEnclosingModule(HandleFunction fun)
{
@ -681,12 +647,15 @@ CallAsmJS(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs callArgs = CallArgsFromVp(argc, vp);
RootedFunction callee(cx, &callArgs.callee().as<JSFunction>());
AsmJSModule& module = FunctionToEnclosingModule(callee);
const AsmJSModule::ExportedFunction& func = FunctionToExportedFunction(callee, module);
// The heap-changing function is a special-case and is implemented by C++.
if (func.isChangeHeap())
return ChangeHeap(cx, module, callArgs);
AsmJSModule& asmJSModule = FunctionToEnclosingModule(callee);
const AsmJSModule::Export& asmJSFunc = asmJSModule.exports()[FunctionToExportIndex(callee)];
if (asmJSFunc.isChangeHeap())
return ChangeHeap(cx, asmJSModule, callArgs);
Module& module = asmJSModule.wasm();
const Export& func = module.exports()[asmJSFunc.wasmIndex()];
// Enable/disable profiling in the asm.js module to match the current global
// profiling state. Don't do this if the module is already active on the
@ -703,7 +672,7 @@ CallAsmJS(JSContext* cx, unsigned argc, Value* vp)
// registers and stack memory and then calls into the internal entry point.
// The return value is stored in the first element of the array (which,
// therefore, must have length >= 1).
js::Vector<AsmJSModule::EntryArg, 8> coercedArgs(cx);
Vector<Module::EntryArg, 8> coercedArgs(cx);
if (!coercedArgs.resize(Max<size_t>(1, func.sig().args().length())))
return false;
@ -767,11 +736,11 @@ CallAsmJS(JSContext* cx, unsigned argc, Value* vp)
// that the optimized asm.js-to-Ion FFI call path (which we want to be
// very fast) can avoid doing so. The JitActivation is marked as
// inactive so stack iteration will skip over it.
AsmJSActivation activation(cx, module);
AsmJSActivation activation(cx, asmJSModule);
JitActivation jitActivation(cx, /* active */ false);
// Call the per-exported-function trampoline created by GenerateEntry.
AsmJSModule::CodePtr enter = module.entryTrampoline(func);
Module::EntryFuncPtr enter = module.entryTrampoline(func);
if (!CALL_GENERATED_2(enter, coercedArgs.begin(), module.globalData()))
return false;
}
@ -826,11 +795,14 @@ CallAsmJS(JSContext* cx, unsigned argc, Value* vp)
}
static JSFunction*
NewExportedFunction(JSContext* cx, const AsmJSModule::ExportedFunction& func,
NewExportedFunction(JSContext* cx, const AsmJSModule& module, const AsmJSModule::Export& func,
HandleObject moduleObj, unsigned exportIndex)
{
unsigned numArgs = func.isChangeHeap()
? 1
: module.wasm().exports()[func.wasmIndex()].sig().args().length();
RootedPropertyName name(cx, func.name());
unsigned numArgs = func.isChangeHeap() ? 1 : func.sig().args().length();
JSFunction* fun =
NewNativeConstructor(cx, CallAsmJS, numArgs, name,
gc::AllocKind::FUNCTION_EXTENDED, GenericObject,
@ -850,10 +822,12 @@ HandleDynamicLinkFailure(JSContext* cx, const CallArgs& args, AsmJSModule& modul
if (cx->isExceptionPending())
return false;
ScriptSource* source = module.scriptSource();
// Source discarding is allowed to affect JS semantics because it is never
// enabled for normal JS content.
bool haveSource = module.scriptSource()->hasSourceData();
if (!haveSource && !JSScript::loadSource(cx, module.scriptSource(), &haveSource))
bool haveSource = source->hasSourceData();
if (!haveSource && !JSScript::loadSource(cx, source, &haveSource))
return false;
if (!haveSource) {
JS_ReportError(cx, "asm.js link failure with source discarding enabled");
@ -862,7 +836,7 @@ HandleDynamicLinkFailure(JSContext* cx, const CallArgs& args, AsmJSModule& modul
uint32_t begin = module.srcBodyStart(); // starts right after 'use asm'
uint32_t end = module.srcEndBeforeCurly();
Rooted<JSFlatString*> src(cx, module.scriptSource()->substringDontDeflate(cx, begin, end));
Rooted<JSFlatString*> src(cx, source->substringDontDeflate(cx, begin, end));
if (!src)
return false;
@ -884,8 +858,8 @@ HandleDynamicLinkFailure(JSContext* cx, const CallArgs& args, AsmJSModule& modul
formals.infallibleAppend(module.bufferArgumentName());
CompileOptions options(cx);
options.setMutedErrors(module.scriptSource()->mutedErrors())
.setFile(module.scriptSource()->filename())
options.setMutedErrors(source->mutedErrors())
.setFile(source->filename())
.setNoScriptRval(false);
// The exported function inherits an implicit strict context if the module
@ -910,112 +884,27 @@ HandleDynamicLinkFailure(JSContext* cx, const CallArgs& args, AsmJSModule& modul
return Invoke(cx, args, args.isConstructing() ? CONSTRUCT : NO_CONSTRUCT);
}
#ifdef MOZ_VTUNE
static bool
SendFunctionsToVTune(JSContext* cx, AsmJSModule& module)
{
uint8_t* base = module.codeBase();
for (unsigned i = 0; i < module.numProfiledFunctions(); i++) {
const AsmJSModule::ProfiledFunction& func = module.profiledFunction(i);
uint8_t* start = base + func.pod.startCodeOffset;
uint8_t* end = base + func.pod.endCodeOffset;
MOZ_ASSERT(end >= start);
unsigned method_id = iJIT_GetNewMethodID();
if (method_id == 0)
return false;
JSAutoByteString bytes;
const char* method_name = AtomToPrintableString(cx, func.name, &bytes);
if (!method_name)
return false;
iJIT_Method_Load method;
method.method_id = method_id;
method.method_name = const_cast<char*>(method_name);
method.method_load_address = (void*)start;
method.method_size = unsigned(end - start);
method.line_number_size = 0;
method.line_number_table = nullptr;
method.class_id = 0;
method.class_file_name = nullptr;
method.source_file_name = nullptr;
iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, (void*)&method);
}
return true;
}
#endif
#ifdef JS_ION_PERF
static bool
SendFunctionsToPerf(JSContext* cx, AsmJSModule& module)
{
if (!PerfFuncEnabled())
return true;
uintptr_t base = (uintptr_t) module.codeBase();
const char* filename = module.scriptSource()->filename();
for (unsigned i = 0; i < module.numProfiledFunctions(); i++) {
const AsmJSModule::ProfiledFunction& func = module.profiledFunction(i);
uintptr_t start = base + (unsigned long) func.pod.startCodeOffset;
uintptr_t end = base + (unsigned long) func.pod.endCodeOffset;
MOZ_ASSERT(end >= start);
size_t size = end - start;
JSAutoByteString bytes;
const char* name = AtomToPrintableString(cx, func.name, &bytes);
if (!name)
return false;
writePerfSpewerAsmJSFunctionMap(start, size, filename, func.pod.lineno,
func.pod.columnIndex, name);
}
return true;
}
#endif
static bool
SendModuleToAttachedProfiler(JSContext* cx, AsmJSModule& module)
{
#if defined(MOZ_VTUNE)
if (IsVTuneProfilingActive() && !SendFunctionsToVTune(cx, module))
return false;
#endif
#if defined(JS_ION_PERF)
if (!SendFunctionsToPerf(cx, module))
return false;
#endif
return true;
}
static JSObject*
CreateExportObject(JSContext* cx, Handle<AsmJSModuleObject*> moduleObj)
CreateExportObject(JSContext* cx, HandleAsmJSModule moduleObj)
{
AsmJSModule& module = moduleObj->module();
const AsmJSModule::ExportVector& exports = module.exports();
if (module.numExportedFunctions() == 1) {
const AsmJSModule::ExportedFunction& func = module.exportedFunction(0);
if (exports.length() == 1) {
const AsmJSModule::Export& func = exports[0];
if (!func.maybeFieldName())
return NewExportedFunction(cx, func, moduleObj, 0);
return NewExportedFunction(cx, module, func, moduleObj, 0);
}
gc::AllocKind allocKind = gc::GetGCObjectKind(module.numExportedFunctions());
gc::AllocKind allocKind = gc::GetGCObjectKind(exports.length());
RootedPlainObject obj(cx, NewBuiltinClassInstance<PlainObject>(cx, allocKind));
if (!obj)
return nullptr;
for (unsigned i = 0; i < module.numExportedFunctions(); i++) {
const AsmJSModule::ExportedFunction& func = module.exportedFunction(i);
for (unsigned i = 0; i < exports.length(); i++) {
const AsmJSModule::Export& func = exports[i];
RootedFunction fun(cx, NewExportedFunction(cx, func, moduleObj, i));
RootedFunction fun(cx, NewExportedFunction(cx, module, func, moduleObj, i));
if (!fun)
return nullptr;
@ -1048,19 +937,23 @@ LinkAsmJS(JSContext* cx, unsigned argc, JS::Value* vp)
RootedFunction fun(cx, &args.callee().as<JSFunction>());
Rooted<AsmJSModuleObject*> moduleObj(cx, &ModuleFunctionToModuleObject(fun));
// When a module is linked, it is dynamically specialized to the given
// arguments (buffer, ffis). Thus, if the module is linked again (it is just
// a function so it can be called multiple times), we need to clone a new
// module.
if (moduleObj->module().isDynamicallyLinked() && !CloneModule(cx, &moduleObj))
return false;
if (moduleObj->module().wasm().dynamicallyLinked()) {
Rooted<AsmJSModuleObject*> clone(cx, AsmJSModuleObject::create(cx));
if (!clone)
return false;
if (!moduleObj->module().clone(cx, clone))
return false;
moduleObj = clone;
}
AsmJSModule& module = moduleObj->module();
AutoFlushICache afc("LinkAsmJS");
module.setAutoFlushICacheRange();
// Link the module by performing the link-time validation checks in the
// asm.js spec and then patching the generated module to associate it with
// the given heap (ArrayBuffer) and a new global data segment (the closure
@ -1072,11 +965,6 @@ LinkAsmJS(JSContext* cx, unsigned argc, JS::Value* vp)
return HandleDynamicLinkFailure(cx, args, module, name);
}
// Notify profilers so that asm.js generated code shows up with JS function
// names and lines in native (i.e., not SPS) profilers.
if (!SendModuleToAttachedProfiler(cx, module))
return false;
// Link-time validation succeeded, so wrap all the exported functions with
// CallAsmJS builtins that trampoline into the generated code.
JSObject* obj = CreateExportObject(cx, moduleObj);
@ -1252,7 +1140,7 @@ js::IsAsmJSModuleLoadedFromCache(JSContext* cx, unsigned argc, Value* vp)
return false;
}
bool loadedFromCache = ModuleFunctionToModuleObject(fun).module().loadedFromCache();
bool loadedFromCache = ModuleFunctionToModuleObject(fun).module().wasm().loadedFromCache();
args.rval().set(BooleanValue(loadedFromCache));
return true;
@ -1277,7 +1165,7 @@ JSString*
js::AsmJSFunctionToString(JSContext* cx, HandleFunction fun)
{
AsmJSModule& module = FunctionToEnclosingModule(fun);
const AsmJSModule::ExportedFunction& f = FunctionToExportedFunction(fun, module);
const AsmJSModule::Export& f = module.exports()[FunctionToExportIndex(fun)];
uint32_t begin = module.srcStart() + f.startOffsetInModule();
uint32_t end = module.srcStart() + f.endOffsetInModule();

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,6 @@
#include "asmjs/AsmJSValidate.h"
#include "mozilla/Move.h"
#include "mozilla/UniquePtr.h"
#include "jsmath.h"
#include "jsprf.h"
@ -49,7 +48,6 @@ using mozilla::IsNaN;
using mozilla::IsNegativeZero;
using mozilla::Move;
using mozilla::PositiveInfinity;
using mozilla::UniquePtr;
using JS::AsmJSOption;
using JS::GenericNaN;
@ -1164,13 +1162,13 @@ class MOZ_STACK_CLASS ModuleValidator
Scalar::Type type;
};
class ExitDescriptor
class ImportDescriptor
{
PropertyName* name_;
const LifoSig* sig_;
public:
ExitDescriptor(PropertyName* name, const LifoSig& sig)
ImportDescriptor(PropertyName* name, const LifoSig& sig)
: name_(name), sig_(&sig)
{}
@ -1189,7 +1187,7 @@ class MOZ_STACK_CLASS ModuleValidator
static HashNumber hash(const Lookup& l) {
return HashGeneric(l.name_, l.sig_.hash());
}
static bool match(const ExitDescriptor& lhs, const Lookup& rhs) {
static bool match(const ImportDescriptor& lhs, const Lookup& rhs) {
return lhs.name_ == rhs.name_ && *lhs.sig_ == rhs.sig_;
}
};
@ -1202,36 +1200,37 @@ class MOZ_STACK_CLASS ModuleValidator
typedef Vector<ArrayView> ArrayViewVector;
public:
typedef HashMap<ExitDescriptor, unsigned, ExitDescriptor> ExitMap;
typedef HashMap<ImportDescriptor, unsigned, ImportDescriptor> ImportMap;
private:
ExclusiveContext* cx_;
AsmJSParser& parser_;
ExclusiveContext* cx_;
AsmJSParser& parser_;
ModuleGenerator mg_;
ModuleGenerator mg_;
AsmJSModule* module_;
LifoAlloc validationLifo_;
FuncVector functions_;
FuncPtrTableVector funcPtrTables_;
GlobalMap globals_;
ArrayViewVector arrayViews_;
ExitMap exits_;
LifoAlloc validationLifo_;
FuncVector functions_;
FuncPtrTableVector funcPtrTables_;
GlobalMap globals_;
ArrayViewVector arrayViews_;
ImportMap imports_;
MathNameMap standardLibraryMathNames_;
AtomicsNameMap standardLibraryAtomicsNames_;
SimdOperationNameMap standardLibrarySimdOpNames_;
MathNameMap standardLibraryMathNames_;
AtomicsNameMap standardLibraryAtomicsNames_;
SimdOperationNameMap standardLibrarySimdOpNames_;
ParseNode* moduleFunctionNode_;
PropertyName* moduleFunctionName_;
ParseNode* moduleFunctionNode_;
PropertyName* moduleFunctionName_;
UniquePtr<char[], JS::FreePolicy> errorString_;
uint32_t errorOffset_;
bool errorOverRecursed_;
UniqueChars errorString_;
uint32_t errorOffset_;
bool errorOverRecursed_;
bool canValidateChangeHeap_;
bool hasChangeHeap_;
bool supportsSimd_;
bool atomicsPresent_;
bool canValidateChangeHeap_;
bool hasChangeHeap_;
bool supportsSimd_;
bool atomicsPresent_;
public:
ModuleValidator(ExclusiveContext* cx, AsmJSParser& parser)
@ -1243,7 +1242,7 @@ class MOZ_STACK_CLASS ModuleValidator
funcPtrTables_(cx),
globals_(cx),
arrayViews_(cx),
exits_(cx),
imports_(cx),
standardLibraryMathNames_(cx),
standardLibraryAtomicsNames_(cx),
standardLibrarySimdOpNames_(cx),
@ -1303,8 +1302,8 @@ class MOZ_STACK_CLASS ModuleValidator
public:
bool init() {
if (!globals_.init() || !exits_.init())
bool init(HandleAsmJSModule moduleObj) {
if (!globals_.init() || !imports_.init())
return false;
if (!standardLibraryMathNames_.init() ||
@ -1371,11 +1370,34 @@ class MOZ_STACK_CLASS ModuleValidator
// js::FunctionToString.
bool strict = parser_.pc->sc->strict() && !parser_.pc->sc->hasExplicitUseStrict();
return mg_.init(parser_.ss, srcStart, srcBodyStart, strict);
module_ = cx_->new_<AsmJSModule>(parser_.ss, srcStart, srcBodyStart, strict);
if (!module_)
return false;
moduleObj->setModule(module_);
return mg_.init();
}
bool finish(ScopedJSDeletePtr<AsmJSModule>* module, SlowFunctionVector* slowFuncs) {
return mg_.finish(parser_.tokenStream, module, slowFuncs);
bool finish(SlowFunctionVector* slowFuncs) {
uint32_t endBeforeCurly = tokenStream().currentToken().pos.end;
TokenPos pos;
JS_ALWAYS_TRUE(tokenStream().peekTokenPos(&pos, TokenStream::Operand));
uint32_t endAfterCurly = pos.end;
auto usesHeap = Module::HeapBool(module_->hasArrayView());
auto sharedHeap = Module::SharedBool(module_->isSharedView());
UniqueChars filename = make_string_copy(parser_.ss->filename());
if (!filename)
return false;
UniqueStaticLinkData linkData;
Module* wasm = mg_.finish(usesHeap, sharedHeap, Move(filename), &linkData, slowFuncs);
if (!wasm)
return false;
module_->finish(wasm, Move(linkData), endBeforeCurly, endAfterCurly);
return true;
}
// Mutable interface.
@ -1384,140 +1406,127 @@ class MOZ_STACK_CLASS ModuleValidator
void initImportArgumentName(PropertyName* n) { module().initImportArgumentName(n); }
void initBufferArgumentName(PropertyName* n) { module().initBufferArgumentName(n); }
bool addGlobalVarInit(PropertyName* varName, const NumLit& lit, bool isConst) {
// The type of a const is the exact type of the literal (since its value
// cannot change) which is more precise than the corresponding vartype.
Type type = isConst ? Type::lit(lit) : Type::var(lit.type());
bool addGlobalVarInit(PropertyName* var, const NumLit& lit, bool isConst) {
uint32_t globalDataOffset;
if (!module().addGlobalVarInit(lit.value(), &globalDataOffset))
if (!mg_.allocateGlobalVar(lit.type(), &globalDataOffset))
return false;
Global::Which which = isConst ? Global::ConstantLiteral : Global::Variable;
Global* global = validationLifo_.new_<Global>(which);
if (!global)
return false;
global->u.varOrConst.globalDataOffset_ = globalDataOffset;
global->u.varOrConst.type_ = type.which();
global->u.varOrConst.type_ = (isConst ? Type::lit(lit) : Type::var(lit.type())).which();
if (isConst)
global->u.varOrConst.literalValue_ = lit;
return globals_.putNew(varName, global);
return globals_.putNew(var, global) &&
module().addGlobalVarInit(lit.value(), globalDataOffset);
}
bool addGlobalVarImport(PropertyName* varName, PropertyName* fieldName, ValType importType,
bool isConst)
{
bool addGlobalVarImport(PropertyName* var, PropertyName* field, ValType type, bool isConst) {
uint32_t globalDataOffset;
if (!module().addGlobalVarImport(fieldName, importType, &globalDataOffset))
if (!mg_.allocateGlobalVar(type, &globalDataOffset))
return false;
Global::Which which = isConst ? Global::ConstantImport : Global::Variable;
Global* global = validationLifo_.new_<Global>(which);
if (!global)
return false;
global->u.varOrConst.globalDataOffset_ = globalDataOffset;
global->u.varOrConst.type_ = Type::var(importType).which();
return globals_.putNew(varName, global);
global->u.varOrConst.type_ = Type::var(type).which();
return globals_.putNew(var, global) &&
module().addGlobalVarImport(field, type, globalDataOffset);
}
bool addArrayView(PropertyName* varName, Scalar::Type vt, PropertyName* maybeField)
{
if (!arrayViews_.append(ArrayView(varName, vt)))
bool addArrayView(PropertyName* var, Scalar::Type vt, PropertyName* maybeField) {
if (!arrayViews_.append(ArrayView(var, vt)))
return false;
Global* global = validationLifo_.new_<Global>(Global::ArrayView);
if (!global)
return false;
if (!module().addArrayView(vt, maybeField))
return false;
global->u.viewInfo.viewType_ = vt;
return globals_.putNew(varName, global);
return globals_.putNew(var, global) &&
module().addArrayView(vt, maybeField);
}
bool addMathBuiltinFunction(PropertyName* varName, AsmJSMathBuiltinFunction func,
PropertyName* fieldName)
bool addMathBuiltinFunction(PropertyName* var, AsmJSMathBuiltinFunction func,
PropertyName* field)
{
if (!module().addMathBuiltinFunction(func, fieldName))
return false;
Global* global = validationLifo_.new_<Global>(Global::MathBuiltinFunction);
if (!global)
return false;
global->u.mathBuiltinFunc_ = func;
return globals_.putNew(varName, global);
return globals_.putNew(var, global) &&
module().addMathBuiltinFunction(func, field);
}
private:
bool addGlobalDoubleConstant(PropertyName* varName, double constant) {
bool addGlobalDoubleConstant(PropertyName* var, double constant) {
Global* global = validationLifo_.new_<Global>(Global::ConstantLiteral);
if (!global)
return false;
global->u.varOrConst.type_ = Type::Double;
global->u.varOrConst.literalValue_ = NumLit(NumLit::Double, DoubleValue(constant));
return globals_.putNew(varName, global);
return globals_.putNew(var, global);
}
public:
bool addMathBuiltinConstant(PropertyName* varName, double constant, PropertyName* fieldName) {
if (!module().addMathBuiltinConstant(constant, fieldName))
return false;
return addGlobalDoubleConstant(varName, constant);
bool addMathBuiltinConstant(PropertyName* var, double constant, PropertyName* field) {
return addGlobalDoubleConstant(var, constant) &&
module().addMathBuiltinConstant(constant, field);
}
bool addGlobalConstant(PropertyName* varName, double constant, PropertyName* fieldName) {
if (!module().addGlobalConstant(constant, fieldName))
return false;
return addGlobalDoubleConstant(varName, constant);
bool addGlobalConstant(PropertyName* var, double constant, PropertyName* field) {
return addGlobalDoubleConstant(var, constant) &&
module().addGlobalConstant(constant, field);
}
bool addAtomicsBuiltinFunction(PropertyName* varName, AsmJSAtomicsBuiltinFunction func,
PropertyName* fieldName)
bool addAtomicsBuiltinFunction(PropertyName* var, AsmJSAtomicsBuiltinFunction func,
PropertyName* field)
{
if (!module().addAtomicsBuiltinFunction(func, fieldName))
return false;
Global* global = validationLifo_.new_<Global>(Global::AtomicsBuiltinFunction);
if (!global)
return false;
atomicsPresent_ = true;
global->u.atomicsBuiltinFunc_ = func;
return globals_.putNew(varName, global);
return globals_.putNew(var, global) &&
module().addAtomicsBuiltinFunction(func, field);
}
bool addSimdCtor(PropertyName* varName, AsmJSSimdType type, PropertyName* fieldName) {
if (!module().addSimdCtor(type, fieldName))
return false;
bool addSimdCtor(PropertyName* var, AsmJSSimdType type, PropertyName* field) {
Global* global = validationLifo_.new_<Global>(Global::SimdCtor);
if (!global)
return false;
global->u.simdCtorType_ = type;
return globals_.putNew(varName, global);
return globals_.putNew(var, global) &&
module().addSimdCtor(type, field);
}
bool addSimdOperation(PropertyName* varName, AsmJSSimdType type, AsmJSSimdOperation op,
PropertyName* typeVarName, PropertyName* opName)
bool addSimdOperation(PropertyName* var, AsmJSSimdType type, AsmJSSimdOperation op,
PropertyName* opName)
{
if (!module().addSimdOperation(type, op, opName))
return false;
Global* global = validationLifo_.new_<Global>(Global::SimdOperation);
if (!global)
return false;
global->u.simdOp.type_ = type;
global->u.simdOp.which_ = op;
return globals_.putNew(varName, global);
return globals_.putNew(var, global) &&
module().addSimdOperation(type, op, opName);
}
bool addByteLength(PropertyName* name) {
canValidateChangeHeap_ = true;
if (!module().addByteLength())
return false;
Global* global = validationLifo_.new_<Global>(Global::ByteLength);
return global && globals_.putNew(name, global);
return global && globals_.putNew(name, global) &&
module().addByteLength();
}
bool addChangeHeap(PropertyName* name, ParseNode* fn, uint32_t mask, uint32_t min, uint32_t max) {
hasChangeHeap_ = true;
module().addChangeHeap(mask, min, max);
Global* global = validationLifo_.new_<Global>(Global::ChangeHeap);
if (!global)
return false;
global->u.changeHeap.srcBegin_ = fn->pn_pos.begin;
global->u.changeHeap.srcEnd_ = fn->pn_pos.end;
return globals_.putNew(name, global);
return globals_.putNew(name, global) &&
module().addChangeHeap(mask, min, max);
}
bool addArrayViewCtor(PropertyName* varName, Scalar::Type vt, PropertyName* fieldName) {
bool addArrayViewCtor(PropertyName* var, Scalar::Type vt, PropertyName* field) {
Global* global = validationLifo_.new_<Global>(Global::ArrayViewCtor);
if (!global)
return false;
if (!module().addArrayViewCtor(vt, fieldName))
return false;
global->u.viewInfo.viewType_ = vt;
return globals_.putNew(varName, global);
return globals_.putNew(var, global) &&
module().addArrayViewCtor(vt, field);
}
bool addFFI(PropertyName* varName, PropertyName* field) {
bool addFFI(PropertyName* var, PropertyName* field) {
Global* global = validationLifo_.new_<Global>(Global::FFI);
if (!global)
return false;
@ -1525,19 +1534,22 @@ class MOZ_STACK_CLASS ModuleValidator
if (!module().addFFI(field, &index))
return false;
global->u.ffiIndex_ = index;
return globals_.putNew(varName, global);
return globals_.putNew(var, global);
}
bool addExportedFunction(const Func& func, PropertyName* maybeFieldName) {
bool addExport(const Func& func, PropertyName* maybeFieldName) {
MallocSig::ArgVector args;
if (!args.appendAll(func.sig().args()))
return false;
MallocSig sig(Move(args), func.sig().ret());
return module().addExportedFunction(func.name(), func.index(), func.srcBegin(),
func.srcEnd(), maybeFieldName, Move(sig));
uint32_t wasmIndex;
if (!mg_.declareExport(Move(sig), func.index(), &wasmIndex))
return false;
return module().addExport(func.name(), maybeFieldName, wasmIndex,
func.srcBegin(), func.srcEnd());
}
bool addExportedChangeHeap(PropertyName* name, const Global& g, PropertyName* maybeFieldName) {
return module().addExportedChangeHeap(name, g.changeHeapSrcBegin(), g.changeHeapSrcEnd(),
maybeFieldName);
bool addChangeHeapExport(PropertyName* name, const Global& g, PropertyName* maybeFieldName) {
return module().addExport(name, maybeFieldName, AsmJSModule::Export::ChangeHeap,
g.changeHeapSrcBegin(), g.changeHeapSrcEnd());
}
private:
const LifoSig* getLifoSig(const LifoSig& sig) {
@ -1582,29 +1594,31 @@ class MOZ_STACK_CLASS ModuleValidator
FuncPtrTable* t = validationLifo_.new_<FuncPtrTable>(cx_, name, firstUse, *lifoSig, mask);
return t && funcPtrTables_.append(t);
}
bool defineFuncPtrTable(uint32_t funcPtrTableIndex, ModuleGenerator::FuncIndexVector&& elems) {
bool defineFuncPtrTable(uint32_t funcPtrTableIndex, const Vector<uint32_t>& elems) {
FuncPtrTable& table = *funcPtrTables_[funcPtrTableIndex];
if (table.defined())
return false;
table.define();
return mg_.defineFuncPtrTable(funcPtrTableIndex, Move(elems));
mg_.defineFuncPtrTable(funcPtrTableIndex, elems);
return true;
}
bool addExit(PropertyName* name, MallocSig&& sig, unsigned ffiIndex, unsigned* exitIndex,
bool addImport(PropertyName* name, MallocSig&& sig, unsigned ffiIndex, unsigned* importIndex,
const LifoSig** lifoSig)
{
ExitDescriptor::Lookup lookup(name, sig);
ExitMap::AddPtr p = exits_.lookupForAdd(lookup);
ImportDescriptor::Lookup lookup(name, sig);
ImportMap::AddPtr p = imports_.lookupForAdd(lookup);
if (p) {
*lifoSig = &p->key().sig();
*exitIndex = p->value();
*importIndex = p->value();
return true;
}
*lifoSig = getLifoSig(sig);
if (!*lifoSig)
return false;
if (!module().addExit(Move(sig), ffiIndex, exitIndex))
if (!mg_.declareImport(Move(sig), importIndex))
return false;
return exits_.add(p, ExitDescriptor(name, **lifoSig), *exitIndex);
return imports_.add(p, ImportDescriptor(name, **lifoSig), *importIndex) &&
module().addImport(ffiIndex, *importIndex);
}
bool tryOnceToValidateChangeHeap() {
@ -1636,7 +1650,7 @@ class MOZ_STACK_CLASS ModuleValidator
MOZ_ASSERT(errorOffset_ == UINT32_MAX);
MOZ_ASSERT(str);
errorOffset_ = offset;
errorString_ = DuplicateString(cx_, str);
errorString_ = make_string_copy(str);
return false;
}
@ -1692,7 +1706,7 @@ class MOZ_STACK_CLASS ModuleValidator
ParseNode* moduleFunctionNode() const { return moduleFunctionNode_; }
PropertyName* moduleFunctionName() const { return moduleFunctionName_; }
ModuleGenerator& mg() { return mg_; }
AsmJSModule& module() const { return mg_.module(); }
AsmJSModule& module() const { return *module_; }
AsmJSParser& parser() const { return parser_; }
TokenStream& tokenStream() const { return parser_.tokenStream; }
bool supportsSimd() const { return supportsSimd_; }
@ -1753,9 +1767,19 @@ class MOZ_STACK_CLASS ModuleValidator
return false;
}
void startFunctionBodies() {
if (atomicsPresent_)
bool startFunctionBodies() {
if (atomicsPresent_) {
#if defined(ENABLE_SHARED_ARRAY_BUFFER)
module().setViewsAreShared();
#else
return failOffset(parser_.tokenStream.currentToken().pos.begin,
"shared memory and atomics not supported by this build");
#endif
}
return true;
}
bool finishFunctionBodies() {
return mg_.finishFuncs();
}
};
@ -2680,8 +2704,7 @@ CheckGlobalSimdImport(ModuleValidator& m, ParseNode* initNode, PropertyName* var
static bool
CheckGlobalSimdOperationImport(ModuleValidator& m, const ModuleValidator::Global* global,
ParseNode* initNode, PropertyName* varName, PropertyName* ctorVarName,
PropertyName* opName)
ParseNode* initNode, PropertyName* varName, PropertyName* opName)
{
AsmJSSimdType simdType = global->simdCtorType();
AsmJSSimdOperation simdOp;
@ -2689,7 +2712,7 @@ CheckGlobalSimdOperationImport(ModuleValidator& m, const ModuleValidator::Global
return m.failName(initNode, "'%s' is not a standard SIMD operation", opName);
if (!IsSimdValidOperationType(simdType, simdOp))
return m.failName(initNode, "'%s' is not an operation supported by the SIMD type", opName);
return m.addSimdOperation(varName, simdType, simdOp, ctorVarName, opName);
return m.addSimdOperation(varName, simdType, simdOp, opName);
}
static bool
@ -2751,7 +2774,7 @@ CheckGlobalDotImport(ModuleValidator& m, PropertyName* varName, ParseNode* initN
if (!global->isSimdCtor())
return m.failName(base, "expecting SIMD constructor name, got %s", field);
return CheckGlobalSimdOperationImport(m, global, initNode, varName, base->name(), field);
return CheckGlobalSimdOperationImport(m, global, initNode, varName, field);
}
static bool
@ -4054,8 +4077,7 @@ CheckFuncPtrCall(FunctionValidator& f, ParseNode* callNode, ExprType ret, Type*
if (!CheckFuncPtrTableAgainstExisting(f.m(), tableNode, name, sig, mask, &funcPtrTableIndex))
return false;
uint32_t globalDataOffset = f.m().module().funcPtrTable(funcPtrTableIndex).globalDataOffset();
f.patch32(globalDataOffsetAt, globalDataOffset);
f.patch32(globalDataOffsetAt, f.m().mg().funcPtrTableGlobalDataOffset(funcPtrTableIndex));
f.patchSig(sigAt, &f.m().funcPtrTable(funcPtrTableIndex).sig());
*type = Type::ret(ret);
@ -4099,7 +4121,7 @@ CheckFFICall(FunctionValidator& f, ParseNode* callNode, unsigned ffiIndex, ExprT
// Global data offset
size_t offsetAt = f.temp32();
// Pointer to the exit's signature in the module's lifo
// Pointer to the import's signature in the module's lifo
size_t sigAt = f.tempPtr();
// Call node position (asm.js specific)
WriteCallLineCol(f, callNode);
@ -4110,13 +4132,12 @@ CheckFFICall(FunctionValidator& f, ParseNode* callNode, unsigned ffiIndex, ExprT
MallocSig sig(Move(args), ret);
unsigned exitIndex = 0;
unsigned importIndex = 0;
const LifoSig* lifoSig = nullptr;
if (!f.m().addExit(calleeName, Move(sig), ffiIndex, &exitIndex, &lifoSig))
if (!f.m().addImport(calleeName, Move(sig), ffiIndex, &importIndex, &lifoSig))
return false;
JS_STATIC_ASSERT(offsetof(AsmJSModule::ExitDatum, exit) == 0);
f.patch32(offsetAt, f.module().exit(exitIndex).globalDataOffset());
f.patch32(offsetAt, f.m().mg().importExitGlobalDataOffset(importIndex));
f.patchSig(sigAt, lifoSig);
*type = Type::ret(ret);
return true;
@ -5856,7 +5877,7 @@ enum class InterruptCheckPosition {
static void
MaybeAddInterruptCheck(FunctionValidator& f, InterruptCheckPosition pos, ParseNode* pn)
{
if (f.m().module().usesSignalHandlersForInterrupt())
if (f.m().mg().args().useSignalHandlersForInterrupt)
return;
switch (pos) {
@ -6685,7 +6706,7 @@ CheckFuncPtrTable(ModuleValidator& m, ParseNode* var)
unsigned mask = length - 1;
ModuleGenerator::FuncIndexVector elems;
Vector<uint32_t> elemFuncIndices(m.cx());
const LifoSig* sig = nullptr;
for (ParseNode* elem = ListHead(arrayLiteral); elem; elem = NextNode(elem)) {
if (!elem->isKind(PNK_NAME))
@ -6703,7 +6724,7 @@ CheckFuncPtrTable(ModuleValidator& m, ParseNode* var)
sig = &func->sig();
}
if (!elems.append(func->index()))
if (!elemFuncIndices.append(func->index()))
return false;
}
@ -6711,7 +6732,7 @@ CheckFuncPtrTable(ModuleValidator& m, ParseNode* var)
if (!CheckFuncPtrTableAgainstExisting(m, var, var->name(), *sig, mask, &funcPtrTableIndex))
return false;
if (!m.defineFuncPtrTable(funcPtrTableIndex, Move(elems)))
if (!m.defineFuncPtrTable(funcPtrTableIndex, elemFuncIndices))
return m.fail(var, "duplicate function-pointer definition");
return true;
@ -6756,10 +6777,10 @@ CheckModuleExportFunction(ModuleValidator& m, ParseNode* pn, PropertyName* maybe
return m.failName(pn, "exported function name '%s' not found", funcName);
if (global->which() == ModuleValidator::Global::Function)
return m.addExportedFunction(m.function(global->funcIndex()), maybeFieldName);
return m.addExport(m.function(global->funcIndex()), maybeFieldName);
if (global->which() == ModuleValidator::Global::ChangeHeap)
return m.addExportedChangeHeap(funcName, *global, maybeFieldName);
return m.addChangeHeapExport(funcName, *global, maybeFieldName);
return m.failName(pn, "'%s' is not a function", funcName);
}
@ -6842,14 +6863,13 @@ CheckModuleEnd(ModuleValidator &m)
}
static bool
CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList,
ScopedJSDeletePtr<AsmJSModule>* module, unsigned* time,
SlowFunctionVector* slowFuncs)
CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList, HandleAsmJSModule obj,
unsigned* time, SlowFunctionVector* slowFuncs)
{
int64_t before = PRMJ_Now();
ModuleValidator m(cx, parser);
if (!m.init())
if (!m.init(obj))
return false;
if (PropertyName* moduleFunctionName = FunctionName(m.moduleFunctionNode())) {
@ -6873,17 +6893,15 @@ CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList,
if (!CheckModuleGlobals(m))
return false;
m.startFunctionBodies();
#if !defined(ENABLE_SHARED_ARRAY_BUFFER)
if (m.usesSharedMemory())
return m.failOffset(m.parser().tokenStream.currentToken().pos.begin,
"shared memory and atomics not supported by this build");
#endif
if (!m.startFunctionBodies())
return false;
if (!CheckFunctions(m))
return false;
if (!m.finishFunctionBodies())
return false;
if (!CheckFuncPtrTables(m))
return false;
@ -6893,36 +6911,35 @@ CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList,
if (!CheckModuleEnd(m))
return false;
if (!m.finish(module, slowFuncs))
if (!m.finish(slowFuncs))
return false;
*time = (PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC;
return true;
}
static bool
BuildConsoleMessage(ExclusiveContext* cx, AsmJSModule& module,
unsigned time, const SlowFunctionVector& slowFuncs,
JS::AsmJSCacheResult cacheResult, ScopedJSFreePtr<char>* out)
static UniqueChars
BuildConsoleMessage(ExclusiveContext* cx, AsmJSModule& module, unsigned time,
const SlowFunctionVector& slowFuncs, JS::AsmJSCacheResult cacheResult)
{
#ifndef JS_MORE_DETERMINISTIC
ScopedJSFreePtr<char> slowText;
UniqueChars slowText;
if (!slowFuncs.empty()) {
slowText.reset(JS_smprintf("; %d functions compiled slowly: ", slowFuncs.length()));
if (!slowText)
return true;
return nullptr;
for (unsigned i = 0; i < slowFuncs.length(); i++) {
const SlowFunction& func = slowFuncs[i];
JSAutoByteString name;
if (!AtomToPrintableString(cx, func.name, &name))
return false;
return nullptr;
slowText.reset(JS_smprintf("%s%s:%u:%u (%ums)%s", slowText.get(),
name.ptr(), func.line, func.column, func.ms,
i+1 < slowFuncs.length() ? ", " : ""));
if (!slowText)
return true;
return nullptr;
}
}
@ -6961,11 +6978,11 @@ BuildConsoleMessage(ExclusiveContext* cx, AsmJSModule& module,
break;
}
out->reset(JS_smprintf("total compilation time %dms; %s%s",
time, cacheString, slowText ? slowText.get() : ""));
return UniqueChars(JS_smprintf("total compilation time %dms; %s%s",
time, cacheString, slowText ? slowText.get() : ""));
#else
return make_string_copy("");
#endif
return true;
}
static bool
@ -7029,42 +7046,40 @@ js::ValidateAsmJS(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList
if (!EstablishPreconditions(cx, parser))
return NoExceptionPending(cx);
ScopedJSDeletePtr<AsmJSModule> module;
ScopedJSFreePtr<char> message;
Rooted<AsmJSModuleObject*> moduleObj(cx, AsmJSModuleObject::create(cx));
if (!moduleObj)
return false;
// Before spending any time parsing the module, try to look it up in the
// embedding's cache using the chars about to be parsed as the key.
if (!LookupAsmJSModuleInCache(cx, parser, &module, &message))
bool loadedFromCache;
UniqueChars message;
if (!LookupAsmJSModuleInCache(cx, parser, moduleObj, &loadedFromCache, &message))
return false;
// If not present in the cache, parse, validate and generate code in a
// single linear pass over the chars of the asm.js module.
if (!module) {
if (!loadedFromCache) {
// "Checking" parses, validates and compiles, producing a fully compiled
// AsmJSModule as result.
// AsmJSModuleObject as result.
unsigned time;
SlowFunctionVector slowFuncs(cx);
if (!CheckModule(cx, parser, stmtList, &module, &time, &slowFuncs))
if (!CheckModule(cx, parser, stmtList, moduleObj, &time, &slowFuncs))
return NoExceptionPending(cx);
// Try to store the AsmJSModule in the embedding's cache. The
// AsmJSModule must be stored before static linking since static linking
// specializes the AsmJSModule to the current process's address space
// and therefore must be executed after a cache hit.
JS::AsmJSCacheResult cacheResult = StoreAsmJSModuleInCache(parser, *module, cx);
module->staticallyLink(cx);
if (!BuildConsoleMessage(cx, *module, time, slowFuncs, cacheResult, &message))
AsmJSModule& module = moduleObj->module();
JS::AsmJSCacheResult cacheResult = StoreAsmJSModuleInCache(parser, module, cx);
if (!module.staticallyLink(cx))
return false;
}
// The AsmJSModuleObject isn't directly referenced by user code; it is only
// referenced (and kept alive by) an internal slot of the asm.js module
// function generated below and asm.js export functions generated when the
// asm.js module function is called.
RootedObject moduleObj(cx, AsmJSModuleObject::create(cx, &module));
if (!moduleObj)
return false;
message = BuildConsoleMessage(cx, module, time, slowFuncs, cacheResult);
if (!message)
return NoExceptionPending(cx);
}
// The module function dynamically links the AsmJSModule when called and
// generates a set of functions wrapping all the exports.

View File

@ -27,7 +27,6 @@
#include "jit/Registers.h"
#include "js/TypeDecls.h"
#include "vm/NativeObject.h"
namespace js {

View File

@ -1,42 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* Copyright 2015 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef asmjs_wasm_compile_args_h
#define asmjs_wasm_compile_args_h
struct JSRuntime;
namespace js {
namespace wasm {
struct CompileArgs
{
JSRuntime* runtime;
bool usesSignalHandlersForOOB;
CompileArgs(JSRuntime* runtime,
bool usesSignalHandlersForOOB)
: runtime(runtime),
usesSignalHandlersForOOB(usesSignalHandlersForOOB)
{}
};
} // namespace wasm
} // namespace js
#endif // asmjs_wasm_compile_args_h

View File

@ -16,7 +16,9 @@
* limitations under the License.
*/
#include "asmjs/AsmJSFrameIterator.h"
#include "asmjs/WasmFrameIterator.h"
#include "jsatom.h"
#include "asmjs/AsmJSModule.h"
#include "jit/MacroAssembler-inl.h"
@ -26,9 +28,10 @@ using namespace js::jit;
using namespace js::wasm;
using mozilla::DebugOnly;
using mozilla::Swap;
/*****************************************************************************/
// AsmJSFrameIterator implementation
// FrameIterator implementation
static void*
ReturnAddressFromFP(void* fp)
@ -42,17 +45,29 @@ CallerFPFromFP(void* fp)
return reinterpret_cast<AsmJSFrame*>(fp)->callerFP;
}
AsmJSFrameIterator::AsmJSFrameIterator(const AsmJSActivation& activation)
: module_(&activation.module()),
FrameIterator::FrameIterator()
: cx_(nullptr),
module_(nullptr),
callsite_(nullptr),
codeRange_(nullptr),
fp_(nullptr)
{
MOZ_ASSERT(done());
}
FrameIterator::FrameIterator(const AsmJSActivation& activation)
: cx_(activation.cx()),
module_(&activation.module().wasm()),
callsite_(nullptr),
codeRange_(nullptr),
fp_(activation.fp())
{
if (!fp_)
return;
settle();
if (fp_)
settle();
}
void
AsmJSFrameIterator::operator++()
FrameIterator::operator++()
{
MOZ_ASSERT(!done());
DebugOnly<uint8_t*> oldfp = fp_;
@ -62,41 +77,57 @@ AsmJSFrameIterator::operator++()
}
void
AsmJSFrameIterator::settle()
FrameIterator::settle()
{
void* returnAddress = ReturnAddressFromFP(fp_);
const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(returnAddress);
const CodeRange* codeRange = module_->lookupCodeRange(returnAddress);
MOZ_ASSERT(codeRange);
codeRange_ = codeRange;
switch (codeRange->kind()) {
case AsmJSModule::CodeRange::Function:
case CodeRange::Function:
callsite_ = module_->lookupCallSite(returnAddress);
MOZ_ASSERT(callsite_);
break;
case AsmJSModule::CodeRange::Entry:
case CodeRange::Entry:
fp_ = nullptr;
MOZ_ASSERT(done());
break;
case AsmJSModule::CodeRange::JitFFI:
case AsmJSModule::CodeRange::SlowFFI:
case AsmJSModule::CodeRange::Interrupt:
case AsmJSModule::CodeRange::Inline:
case AsmJSModule::CodeRange::Thunk:
case CodeRange::ImportJitExit:
case CodeRange::ImportInterpExit:
case CodeRange::Interrupt:
case CodeRange::Inline:
MOZ_CRASH("Should not encounter an exit during iteration");
}
}
JSAtom*
AsmJSFrameIterator::functionDisplayAtom() const
FrameIterator::functionDisplayAtom() const
{
MOZ_ASSERT(!done());
return reinterpret_cast<const AsmJSModule::CodeRange*>(codeRange_)->functionName(*module_);
const char* chars = module_->functionName(codeRange_->funcNameIndex());
UTF8Chars utf8(chars, strlen(chars));
size_t twoByteLength;
UniquePtr<char16_t> twoByte(JS::UTF8CharsToNewTwoByteCharsZ(cx_, utf8, &twoByteLength).get());
if (!twoByte) {
cx_->clearPendingException();
return cx_->names().empty;
}
JSAtom* atom = AtomizeChars(cx_, twoByte.get(), twoByteLength);
if (!atom) {
cx_->clearPendingException();
return cx_->names().empty;
}
return atom;
}
unsigned
AsmJSFrameIterator::computeLine(uint32_t* column) const
FrameIterator::computeLine(uint32_t* column) const
{
MOZ_ASSERT(!done());
if (column)
@ -163,11 +194,11 @@ PushRetAddr(MacroAssembler& masm)
}
// Generate a prologue that maintains AsmJSActivation::fp as the virtual frame
// pointer so that AsmJSProfilingFrameIterator can walk the stack at any pc in
// pointer so that ProfilingFrameIterator can walk the stack at any pc in
// generated code.
static void
GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
AsmJSProfilingOffsets* offsets, Label* maybeEntry = nullptr)
ProfilingOffsets* offsets, Label* maybeEntry = nullptr)
{
#if !defined (JS_CODEGEN_ARM)
Register scratch = ABIArgGenerator::NonArg_VolatileReg;
@ -179,7 +210,7 @@ GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason
masm.setSecondScratchReg(InvalidReg);
#endif
// AsmJSProfilingFrameIterator needs to know the offsets of several key
// ProfilingFrameIterator needs to know the offsets of several key
// instructions from entry. To save space, we make these offsets static
// constants and assert that they match the actual codegen below. On ARM,
// this requires AutoForbidPools to prevent a constant pool from being
@ -204,9 +235,9 @@ GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason
MOZ_ASSERT_IF(!masm.oom(), StoredFP == masm.currentOffset() - offsets->begin);
}
if (reason.kind() != ExitReason::None) {
masm.store32_NoSecondScratch(Imm32(reason.pack()),
Address(scratch, AsmJSActivation::offsetOfPackedExitReason()));
if (reason != ExitReason::None) {
masm.store32_NoSecondScratch(Imm32(int32_t(reason)),
Address(scratch, AsmJSActivation::offsetOfExitReason()));
}
#if defined(JS_CODEGEN_ARM)
@ -220,7 +251,7 @@ GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason
// Generate the inverse of GenerateProfilingPrologue.
static void
GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
AsmJSProfilingOffsets* offsets)
ProfilingOffsets* offsets)
{
Register scratch = ABIArgGenerator::NonReturn_VolatileReg0;
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
@ -233,12 +264,12 @@ GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason
masm.loadAsmJSActivation(scratch);
if (reason.kind() != ExitReason::None) {
masm.store32(Imm32(ExitReason::None),
Address(scratch, AsmJSActivation::offsetOfPackedExitReason()));
if (reason != ExitReason::None) {
masm.store32(Imm32(int32_t(ExitReason::None)),
Address(scratch, AsmJSActivation::offsetOfExitReason()));
}
// AsmJSProfilingFrameIterator assumes fixed offsets of the last few
// ProfilingFrameIterator assumes fixed offsets of the last few
// instructions from profilingReturn, so AutoForbidPools to ensure that
// unintended instructions are not automatically inserted.
{
@ -272,11 +303,10 @@ GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason
// to call out to C++ so, as an optimization, we don't update fp. To avoid
// recompilation when the profiling mode is toggled, we generate both prologues
// a priori and switch between prologues when the profiling mode is toggled.
// Specifically, AsmJSModule::setProfilingEnabled patches all callsites to
// Specifically, Module::setProfilingEnabled patches all callsites to
// either call the profiling or non-profiling entry point.
void
js::GenerateAsmJSFunctionPrologue(MacroAssembler& masm, unsigned framePushed,
AsmJSFunctionOffsets* offsets)
wasm::GenerateFunctionPrologue(MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets)
{
#if defined(JS_CODEGEN_ARM)
// Flush pending pools so they do not get dumped between the 'begin' and
@ -301,14 +331,13 @@ js::GenerateAsmJSFunctionPrologue(MacroAssembler& masm, unsigned framePushed,
masm.setFramePushed(framePushed);
}
// Similar to GenerateAsmJSFunctionPrologue (see comment), we generate both a
// Similar to GenerateFunctionPrologue (see comment), we generate both a
// profiling and non-profiling epilogue a priori. When the profiling mode is
// toggled, AsmJSModule::setProfilingEnabled patches the 'profiling jump' to
// toggled, Module::setProfilingEnabled patches the 'profiling jump' to
// either be a nop (falling through to the normal prologue) or a jump (jumping
// to the profiling epilogue).
void
js::GenerateAsmJSFunctionEpilogue(MacroAssembler& masm, unsigned framePushed,
AsmJSFunctionOffsets* offsets)
wasm::GenerateFunctionEpilogue(MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets)
{
MOZ_ASSERT(masm.framePushed() == framePushed);
@ -329,7 +358,7 @@ js::GenerateAsmJSFunctionEpilogue(MacroAssembler& masm, unsigned framePushed,
#endif
// The exact form of this instruction must be kept consistent with the
// patching in AsmJSModule::setProfilingEnabled.
// patching in Module::setProfilingEnabled.
offsets->profilingJump = masm.currentOffset();
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
masm.twoByteNop();
@ -361,8 +390,8 @@ js::GenerateAsmJSFunctionEpilogue(MacroAssembler& masm, unsigned framePushed,
}
void
js::GenerateAsmJSExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
AsmJSProfilingOffsets* offsets, Label* maybeEntry)
wasm::GenerateExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
ProfilingOffsets* offsets, Label* maybeEntry)
{
masm.haltingAlign(CodeAlignment);
GenerateProfilingPrologue(masm, framePushed, reason, offsets, maybeEntry);
@ -370,25 +399,36 @@ js::GenerateAsmJSExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitRe
}
void
js::GenerateAsmJSExitEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
AsmJSProfilingOffsets* offsets)
wasm::GenerateExitEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
ProfilingOffsets* offsets)
{
// Inverse of GenerateAsmJSExitPrologue:
// Inverse of GenerateExitPrologue:
MOZ_ASSERT(masm.framePushed() == framePushed);
GenerateProfilingEpilogue(masm, framePushed, reason, offsets);
masm.setFramePushed(0);
}
/*****************************************************************************/
// AsmJSProfilingFrameIterator
// ProfilingFrameIterator
AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& activation)
: module_(&activation.module()),
ProfilingFrameIterator::ProfilingFrameIterator()
: module_(nullptr),
codeRange_(nullptr),
callerFP_(nullptr),
callerPC_(nullptr),
stackAddress_(nullptr),
exitReason_(ExitReason::None),
codeRange_(nullptr)
exitReason_(ExitReason::None)
{
MOZ_ASSERT(done());
}
ProfilingFrameIterator::ProfilingFrameIterator(const AsmJSActivation& activation)
: module_(&activation.module().wasm()),
codeRange_(nullptr),
callerFP_(nullptr),
callerPC_(nullptr),
stackAddress_(nullptr),
exitReason_(ExitReason::None)
{
// If profiling hasn't been enabled for this module, then CallerFPFromFP
// will be trash, so ignore the entire activation. In practice, this only
@ -404,30 +444,24 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation&
}
static inline void
AssertMatchesCallSite(const AsmJSModule& module, const AsmJSModule::CodeRange* calleeCodeRange,
void* callerPC, void* callerFP, void* fp)
AssertMatchesCallSite(const Module& module, void* callerPC, void* callerFP, void* fp)
{
#ifdef DEBUG
const AsmJSModule::CodeRange* callerCodeRange = module.lookupCodeRange(callerPC);
const CodeRange* callerCodeRange = module.lookupCodeRange(callerPC);
MOZ_ASSERT(callerCodeRange);
if (callerCodeRange->isEntry()) {
if (callerCodeRange->kind() == CodeRange::Entry) {
MOZ_ASSERT(callerFP == nullptr);
return;
}
const CallSite* callsite = module.lookupCallSite(callerPC);
if (calleeCodeRange->isThunk()) {
MOZ_ASSERT(!callsite);
MOZ_ASSERT(callerCodeRange->isFunction());
} else {
MOZ_ASSERT(callsite);
MOZ_ASSERT(callerFP == (uint8_t*)fp + callsite->stackDepth());
}
MOZ_ASSERT(callsite);
MOZ_ASSERT(callerFP == (uint8_t*)fp + callsite->stackDepth());
#endif
}
void
AsmJSProfilingFrameIterator::initFromFP(const AsmJSActivation& activation)
ProfilingFrameIterator::initFromFP(const AsmJSActivation& activation)
{
uint8_t* fp = activation.fp();
@ -441,59 +475,57 @@ AsmJSProfilingFrameIterator::initFromFP(const AsmJSActivation& activation)
// Since we don't have the pc for fp, start unwinding at the caller of fp
// (ReturnAddressFromFP(fp)). This means that the innermost frame is
// skipped. This is fine because:
// - for FFI calls, the innermost frame is a thunk, so the first frame that
// shows up is the function calling the FFI;
// - for Math and other builtin calls, when profiling is activated, we
// patch all call sites to instead call through a thunk; and
// - for interrupts, we just accept that we'll lose the innermost frame.
// - for import exit calls, the innermost frame is a thunk, so the first
// frame that shows up is the function calling the import;
// - for Math and other builtin calls as well as interrupts, we note the absence
// of an exit reason and inject a fake "builtin" frame; and
// - for async interrupts, we just accept that we'll lose the innermost frame.
void* pc = ReturnAddressFromFP(fp);
const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(pc);
const CodeRange* codeRange = module_->lookupCodeRange(pc);
MOZ_ASSERT(codeRange);
codeRange_ = codeRange;
stackAddress_ = fp;
switch (codeRange->kind()) {
case AsmJSModule::CodeRange::Entry:
case CodeRange::Entry:
callerPC_ = nullptr;
callerFP_ = nullptr;
break;
case AsmJSModule::CodeRange::Function:
case CodeRange::Function:
fp = CallerFPFromFP(fp);
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp);
AssertMatchesCallSite(*module_, callerPC_, callerFP_, fp);
break;
case AsmJSModule::CodeRange::JitFFI:
case AsmJSModule::CodeRange::SlowFFI:
case AsmJSModule::CodeRange::Interrupt:
case AsmJSModule::CodeRange::Inline:
case AsmJSModule::CodeRange::Thunk:
case CodeRange::ImportJitExit:
case CodeRange::ImportInterpExit:
case CodeRange::Interrupt:
case CodeRange::Inline:
MOZ_CRASH("Unexpected CodeRange kind");
}
// Despite the above reasoning for skipping a frame, we do actually want FFI
// trampolines and interrupts to show up in the profile (so they can
// accumulate self time and explain performance faults). To do this, an
// "exit reason" is stored on all the paths leaving asm.js and this iterator
// treats this exit reason as its own frame. If we have exited asm.js code
// without setting an exit reason, the reason will be None and this means
// the code was asynchronously interrupted.
// The iterator inserts a pretend innermost frame for non-None ExitReasons.
// This allows the variety of exit reasons to show up in the callstack.
exitReason_ = activation.exitReason();
if (exitReason_.kind() == ExitReason::None)
exitReason_ = ExitReason::Interrupt;
// In the case of calls to builtins or asynchronous interrupts, no exit path
// is taken so the exitReason is None. Coerce these to the Native exit
// reason so that self-time is accounted for.
if (exitReason_ == ExitReason::None)
exitReason_ = ExitReason::Native;
MOZ_ASSERT(!done());
}
typedef JS::ProfilingFrameIterator::RegisterState RegisterState;
AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& activation,
const RegisterState& state)
: module_(&activation.module()),
ProfilingFrameIterator::ProfilingFrameIterator(const AsmJSActivation& activation,
const RegisterState& state)
: module_(&activation.module().wasm()),
codeRange_(nullptr),
callerFP_(nullptr),
callerPC_(nullptr),
exitReason_(ExitReason::None),
codeRange_(nullptr)
exitReason_(ExitReason::None)
{
// If profiling hasn't been enabled for this module, then CallerFPFromFP
// will be trash, so ignore the entire activation. In practice, this only
@ -515,13 +547,12 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation&
// Note: fp may be null while entering and leaving the activation.
uint8_t* fp = activation.fp();
const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(state.pc);
const CodeRange* codeRange = module_->lookupCodeRange(state.pc);
switch (codeRange->kind()) {
case AsmJSModule::CodeRange::Function:
case AsmJSModule::CodeRange::JitFFI:
case AsmJSModule::CodeRange::SlowFFI:
case AsmJSModule::CodeRange::Interrupt:
case AsmJSModule::CodeRange::Thunk: {
case CodeRange::Function:
case CodeRange::ImportJitExit:
case CodeRange::ImportInterpExit:
case CodeRange::Interrupt: {
// When the pc is inside the prologue/epilogue, the innermost
// call's AsmJSFrame is not complete and thus fp points to the the
// second-to-innermost call's AsmJSFrame. Since fp can only tell you
@ -529,8 +560,8 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation&
// while pc is in the prologue/epilogue would skip the second-to-
// innermost call. To avoid this problem, we use the static structure of
// the code in the prologue and epilogue to do the Right Thing.
uint32_t offsetInModule = (uint8_t*)state.pc - module_->codeBase();
MOZ_ASSERT(offsetInModule < module_->codeBytes());
MOZ_ASSERT(module_->containsCodePC(state.pc));
uint32_t offsetInModule = (uint8_t*)state.pc - module_->code();
MOZ_ASSERT(offsetInModule >= codeRange->begin());
MOZ_ASSERT(offsetInModule < codeRange->end());
uint32_t offsetInCodeRange = offsetInModule - codeRange->begin();
@ -541,13 +572,13 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation&
// still in lr and fp still holds the caller's fp.
callerPC_ = state.lr;
callerFP_ = fp;
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp - 2);
AssertMatchesCallSite(*module_, callerPC_, callerFP_, sp - 2);
} else if (offsetInModule == codeRange->profilingReturn() - PostStorePrePopFP) {
// Second-to-last instruction of the ARM/MIPS function; fp points to
// the caller's fp; have not yet popped AsmJSFrame.
callerPC_ = ReturnAddressFromFP(sp);
callerFP_ = CallerFPFromFP(sp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp);
AssertMatchesCallSite(*module_, callerPC_, callerFP_, sp);
} else
#endif
if (offsetInCodeRange < PushedFP || offsetInModule == codeRange->profilingReturn()) {
@ -555,32 +586,32 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation&
// still points to the caller's fp.
callerPC_ = *sp;
callerFP_ = fp;
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp - 1);
AssertMatchesCallSite(*module_, callerPC_, callerFP_, sp - 1);
} else if (offsetInCodeRange < StoredFP) {
// The full AsmJSFrame has been pushed; fp still points to the
// caller's frame.
MOZ_ASSERT(fp == CallerFPFromFP(sp));
callerPC_ = ReturnAddressFromFP(sp);
callerFP_ = CallerFPFromFP(sp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp);
AssertMatchesCallSite(*module_, callerPC_, callerFP_, sp);
} else {
// Not in the prologue/epilogue.
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp);
AssertMatchesCallSite(*module_, callerPC_, callerFP_, fp);
}
break;
}
case AsmJSModule::CodeRange::Entry: {
case CodeRange::Entry: {
// The entry trampoline is the final frame in an AsmJSActivation. The entry
// trampoline also doesn't GenerateAsmJSPrologue/Epilogue so we can't use
// trampoline also doesn't GeneratePrologue/Epilogue so we can't use
// the general unwinding logic above.
MOZ_ASSERT(!fp);
callerPC_ = nullptr;
callerFP_ = nullptr;
break;
}
case AsmJSModule::CodeRange::Inline: {
case CodeRange::Inline: {
// The throw stub clears AsmJSActivation::fp on it's way out.
if (!fp) {
MOZ_ASSERT(done());
@ -594,7 +625,7 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation&
// skipped frames. Thus, we use simply unwind based on fp.
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp);
AssertMatchesCallSite(*module_, callerPC_, callerFP_, fp);
break;
}
}
@ -605,9 +636,9 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation&
}
void
AsmJSProfilingFrameIterator::operator++()
ProfilingFrameIterator::operator++()
{
if (exitReason_.kind() != ExitReason::None) {
if (exitReason_ != ExitReason::None) {
MOZ_ASSERT(codeRange_);
exitReason_ = ExitReason::None;
MOZ_ASSERT(!done());
@ -621,25 +652,23 @@ AsmJSProfilingFrameIterator::operator++()
return;
}
MOZ_ASSERT(callerPC_);
const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(callerPC_);
const CodeRange* codeRange = module_->lookupCodeRange(callerPC_);
MOZ_ASSERT(codeRange);
codeRange_ = codeRange;
switch (codeRange->kind()) {
case AsmJSModule::CodeRange::Entry:
case CodeRange::Entry:
MOZ_ASSERT(callerFP_ == nullptr);
callerPC_ = nullptr;
break;
case AsmJSModule::CodeRange::Function:
case AsmJSModule::CodeRange::JitFFI:
case AsmJSModule::CodeRange::SlowFFI:
case AsmJSModule::CodeRange::Interrupt:
case AsmJSModule::CodeRange::Inline:
case AsmJSModule::CodeRange::Thunk:
case CodeRange::Function:
case CodeRange::ImportJitExit:
case CodeRange::ImportInterpExit:
case CodeRange::Interrupt:
case CodeRange::Inline:
stackAddress_ = callerFP_;
callerPC_ = ReturnAddressFromFP(callerFP_);
AssertMatchesCallSite(*module_, codeRange, callerPC_, CallerFPFromFP(callerFP_), callerFP_);
AssertMatchesCallSite(*module_, callerPC_, CallerFPFromFP(callerFP_), callerFP_);
callerFP_ = CallerFPFromFP(callerFP_);
break;
}
@ -647,82 +676,175 @@ AsmJSProfilingFrameIterator::operator++()
MOZ_ASSERT(!done());
}
static const char*
BuiltinToName(Builtin builtin)
{
// Note: this label is regexp-matched by
// devtools/client/profiler/cleopatra/js/parserWorker.js.
switch (builtin) {
case Builtin::ToInt32: return "ToInt32 (in asm.js)";
#if defined(JS_CODEGEN_ARM)
case Builtin::aeabi_idivmod: return "software idivmod (in asm.js)";
case Builtin::aeabi_uidivmod: return "software uidivmod (in asm.js)";
case Builtin::AtomicCmpXchg: return "Atomics.compareExchange (in asm.js)";
case Builtin::AtomicXchg: return "Atomics.exchange (in asm.js)";
case Builtin::AtomicFetchAdd: return "Atomics.add (in asm.js)";
case Builtin::AtomicFetchSub: return "Atomics.sub (in asm.js)";
case Builtin::AtomicFetchAnd: return "Atomics.and (in asm.js)";
case Builtin::AtomicFetchOr: return "Atomics.or (in asm.js)";
case Builtin::AtomicFetchXor: return "Atomics.xor (in asm.js)";
#endif
case Builtin::ModD: return "fmod (in asm.js)";
case Builtin::SinD: return "Math.sin (in asm.js)";
case Builtin::CosD: return "Math.cos (in asm.js)";
case Builtin::TanD: return "Math.tan (in asm.js)";
case Builtin::ASinD: return "Math.asin (in asm.js)";
case Builtin::ACosD: return "Math.acos (in asm.js)";
case Builtin::ATanD: return "Math.atan (in asm.js)";
case Builtin::CeilD:
case Builtin::CeilF: return "Math.ceil (in asm.js)";
case Builtin::FloorD:
case Builtin::FloorF: return "Math.floor (in asm.js)";
case Builtin::ExpD: return "Math.exp (in asm.js)";
case Builtin::LogD: return "Math.log (in asm.js)";
case Builtin::PowD: return "Math.pow (in asm.js)";
case Builtin::ATan2D: return "Math.atan2 (in asm.js)";
case Builtin::Limit: break;
}
MOZ_CRASH("symbolic immediate not a builtin");
}
const char*
AsmJSProfilingFrameIterator::label() const
ProfilingFrameIterator::label() const
{
MOZ_ASSERT(!done());
// Use the same string for both time inside and under so that the two
// entries will be coalesced by the profiler.
//
// NB: these labels are regexp-matched by
// devtools/client/profiler/cleopatra/js/parserWorker.js.
const char* jitFFIDescription = "fast FFI trampoline (in asm.js)";
const char* slowFFIDescription = "slow FFI trampoline (in asm.js)";
const char* interruptDescription = "interrupt due to out-of-bounds or long execution (in asm.js)";
// NB: these labels are parsed for location by
// devtools/client/performance/modules/logic/frame-utils.js
const char* importJitDescription = "fast FFI trampoline (in asm.js)";
const char* importInterpDescription = "slow FFI trampoline (in asm.js)";
const char* nativeDescription = "native call (in asm.js)";
switch (exitReason_.kind()) {
switch (exitReason_) {
case ExitReason::None:
break;
case ExitReason::Jit:
return jitFFIDescription;
case ExitReason::Slow:
return slowFFIDescription;
case ExitReason::Interrupt:
return interruptDescription;
case ExitReason::Builtin:
return BuiltinToName(exitReason_.builtin());
case ExitReason::ImportJit:
return importJitDescription;
case ExitReason::ImportInterp:
return importInterpDescription;
case ExitReason::Native:
return nativeDescription;
}
auto codeRange = reinterpret_cast<const AsmJSModule::CodeRange*>(codeRange_);
switch (codeRange->kind()) {
case AsmJSModule::CodeRange::Function: return codeRange->functionProfilingLabel(*module_);
case AsmJSModule::CodeRange::Entry: return "entry trampoline (in asm.js)";
case AsmJSModule::CodeRange::JitFFI: return jitFFIDescription;
case AsmJSModule::CodeRange::SlowFFI: return slowFFIDescription;
case AsmJSModule::CodeRange::Interrupt: return interruptDescription;
case AsmJSModule::CodeRange::Inline: return "inline stub (in asm.js)";
case AsmJSModule::CodeRange::Thunk: return BuiltinToName(codeRange->thunkTarget());
switch (codeRange_->kind()) {
case CodeRange::Function: return module_->profilingLabel(codeRange_->funcNameIndex());
case CodeRange::Entry: return "entry trampoline (in asm.js)";
case CodeRange::ImportJitExit: return importJitDescription;
case CodeRange::ImportInterpExit: return importInterpDescription;
case CodeRange::Interrupt: return nativeDescription;
case CodeRange::Inline: return "inline stub (in asm.js)";
}
MOZ_CRASH("bad code range kind");
}
/*****************************************************************************/
// Runtime patching to enable/disable profiling
// Patch all internal (asm.js->asm.js) callsites to call the profiling
// prologues:
void
wasm::EnableProfilingPrologue(const Module& module, const CallSite& callSite, bool enabled)
{
if (callSite.kind() != CallSite::Relative)
return;
uint8_t* callerRetAddr = module.code() + callSite.returnAddressOffset();
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
void* callee = X86Encoding::GetRel32Target(callerRetAddr);
#elif defined(JS_CODEGEN_ARM)
uint8_t* caller = callerRetAddr - 4;
Instruction* callerInsn = reinterpret_cast<Instruction*>(caller);
BOffImm calleeOffset;
callerInsn->as<InstBLImm>()->extractImm(&calleeOffset);
void* callee = calleeOffset.getDest(callerInsn);
#elif defined(JS_CODEGEN_ARM64)
MOZ_CRASH();
void* callee = nullptr;
(void)callerRetAddr;
#elif defined(JS_CODEGEN_MIPS32)
Instruction* instr = (Instruction*)(callerRetAddr - 4 * sizeof(uint32_t));
void* callee = (void*)Assembler::ExtractLuiOriValue(instr, instr->next());
#elif defined(JS_CODEGEN_MIPS64)
Instruction* instr = (Instruction*)(callerRetAddr - 6 * sizeof(uint32_t));
void* callee = (void*)Assembler::ExtractLoad64Value(instr);
#elif defined(JS_CODEGEN_NONE)
MOZ_CRASH();
void* callee = nullptr;
#else
# error "Missing architecture"
#endif
const CodeRange* codeRange = module.lookupCodeRange(callee);
if (!codeRange->isFunction())
return;
uint8_t* from = module.code() + codeRange->funcNonProfilingEntry();
uint8_t* to = module.code() + codeRange->funcProfilingEntry();
if (!enabled)
Swap(from, to);
MOZ_ASSERT(callee == from);
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
X86Encoding::SetRel32(callerRetAddr, to);
#elif defined(JS_CODEGEN_ARM)
new (caller) InstBLImm(BOffImm(to - caller), Assembler::Always);
#elif defined(JS_CODEGEN_ARM64)
(void)to;
MOZ_CRASH();
#elif defined(JS_CODEGEN_MIPS32)
Assembler::WriteLuiOriInstructions(instr, instr->next(),
ScratchRegister, (uint32_t)to);
instr[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
#elif defined(JS_CODEGEN_MIPS64)
Assembler::WriteLoad64Instructions(instr, ScratchRegister, (uint64_t)to);
instr[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
#elif defined(JS_CODEGEN_NONE)
MOZ_CRASH();
#else
# error "Missing architecture"
#endif
}
// Replace all the nops in all the epilogues of asm.js functions with jumps
// to the profiling epilogues.
void
wasm::EnableProfilingEpilogue(const Module& module, const CodeRange& codeRange, bool enabled)
{
if (!codeRange.isFunction())
return;
uint8_t* jump = module.code() + codeRange.functionProfilingJump();
uint8_t* profilingEpilogue = module.code() + codeRange.funcProfilingEpilogue();
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
// An unconditional jump with a 1 byte offset immediate has the opcode
// 0x90. The offset is relative to the address of the instruction after
// the jump. 0x66 0x90 is the canonical two-byte nop.
ptrdiff_t jumpImmediate = profilingEpilogue - jump - 2;
MOZ_ASSERT(jumpImmediate > 0 && jumpImmediate <= 127);
if (enabled) {
MOZ_ASSERT(jump[0] == 0x66);
MOZ_ASSERT(jump[1] == 0x90);
jump[0] = 0xeb;
jump[1] = jumpImmediate;
} else {
MOZ_ASSERT(jump[0] == 0xeb);
MOZ_ASSERT(jump[1] == jumpImmediate);
jump[0] = 0x66;
jump[1] = 0x90;
}
#elif defined(JS_CODEGEN_ARM)
if (enabled) {
MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstNOP>());
new (jump) InstBImm(BOffImm(profilingEpilogue - jump), Assembler::Always);
} else {
MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstBImm>());
new (jump) InstNOP();
}
#elif defined(JS_CODEGEN_ARM64)
(void)jump;
(void)profilingEpilogue;
MOZ_CRASH();
#elif defined(JS_CODEGEN_MIPS32)
Instruction* instr = (Instruction*)jump;
if (enabled) {
Assembler::WriteLuiOriInstructions(instr, instr->next(),
ScratchRegister, (uint32_t)profilingEpilogue);
instr[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr);
} else {
for (unsigned i = 0; i < 3; i++)
instr[i].makeNop();
}
#elif defined(JS_CODEGEN_MIPS64)
Instruction* instr = (Instruction*)jump;
if (enabled) {
Assembler::WriteLoad64Instructions(instr, ScratchRegister, (uint64_t)profilingEpilogue);
instr[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr);
} else {
for (unsigned i = 0; i < 5; i++)
instr[i].makeNop();
}
#elif defined(JS_CODEGEN_NONE)
MOZ_CRASH();
#else
# error "Missing architecture"
#endif
}

View File

@ -0,0 +1,121 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* Copyright 2014 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef wasm_frame_iterator_h
#define wasm_frame_iterator_h
#include "js/ProfilingFrameIterator.h"
class JSAtom;
namespace js {
class AsmJSActivation;
namespace jit { class MacroAssembler; class Label; }
namespace wasm {
class CallSite;
class CodeRange;
class Module;
struct FuncOffsets;
struct ProfilingOffsets;
// Iterates over the frames of a single AsmJSActivation, called synchronously
// from C++ in the thread of the asm.js. The one exception is that this iterator
// may be called from the interrupt callback which may be called asynchronously
// from asm.js code; in this case, the backtrace may not be correct.
class FrameIterator
{
JSContext* cx_;
const Module* module_;
const CallSite* callsite_;
const CodeRange* codeRange_;
uint8_t* fp_;
void settle();
public:
explicit FrameIterator();
explicit FrameIterator(const AsmJSActivation& activation);
void operator++();
bool done() const { return !fp_; }
JSAtom* functionDisplayAtom() const;
unsigned computeLine(uint32_t* column) const;
};
// An ExitReason describes the possible reasons for leaving compiled wasm code
// or the state of not having left compiled wasm code (ExitReason::None).
enum class ExitReason : uint32_t
{
None, // default state, the pc is in wasm code
ImportJit, // fast-path call directly into JIT code
ImportInterp, // slow-path call into C++ Invoke()
Native // call to native C++ code (e.g., Math.sin, ToInt32(), interrupt)
};
// Iterates over the frames of a single AsmJSActivation, given an
// asynchrously-interrupted thread's state. If the activation's
// module is not in profiling mode, the activation is skipped.
class ProfilingFrameIterator
{
const Module* module_;
const CodeRange* codeRange_;
uint8_t* callerFP_;
void* callerPC_;
void* stackAddress_;
ExitReason exitReason_;
void initFromFP(const AsmJSActivation& activation);
public:
ProfilingFrameIterator();
explicit ProfilingFrameIterator(const AsmJSActivation& activation);
ProfilingFrameIterator(const AsmJSActivation& activation,
const JS::ProfilingFrameIterator::RegisterState& state);
void operator++();
bool done() const { return !codeRange_; }
void* stackAddress() const { MOZ_ASSERT(!done()); return stackAddress_; }
const char* label() const;
};
// Prologue/epilogue code generation
void
GenerateExitPrologue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason,
ProfilingOffsets* offsets, jit::Label* maybeEntry = nullptr);
void
GenerateExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason,
ProfilingOffsets* offsets);
void
GenerateFunctionPrologue(jit::MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets);
void
GenerateFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets);
// Runtime patching to enable/disable profiling
void
EnableProfilingPrologue(const Module& module, const CallSite& callSite, bool enabled);
void
EnableProfilingEpilogue(const Module& module, const CodeRange& codeRange, bool enabled);
} // namespace wasm
} // namespace js
#endif // wasm_frame_iterator_h

View File

@ -18,31 +18,15 @@
#include "asmjs/WasmGenerator.h"
#include "asmjs/AsmJSModule.h"
#include "asmjs/AsmJSValidate.h"
#include "asmjs/WasmStubs.h"
#ifdef MOZ_VTUNE
# include "vtune/VTuneWrapper.h"
#endif
#include "jit/MacroAssembler-inl.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
static bool
ParallelCompilationEnabled(ExclusiveContext* cx)
{
// Since there are a fixed number of helper threads and one is already being
// consumed by this parsing task, ensure that there another free thread to
// avoid deadlock. (Note: there is at most one thread used for parsing so we
// don't have to worry about general dining philosophers.)
if (HelperThreadState().threadCount <= 1 || !CanUseExtraThreads())
return false;
// If 'cx' isn't a JSContext, then we are already off the main thread so
// off-thread compilation must be enabled.
return !cx->isJSContext() || cx->asJSContext()->runtime()->canUseOffthreadIonCompilation();
}
// ****************************************************************************
// ModuleGenerator
@ -51,19 +35,25 @@ static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
ModuleGenerator::ModuleGenerator(ExclusiveContext* cx)
: cx_(cx),
args_(cx),
globalBytes_(InitialGlobalDataBytes),
slowFuncs_(cx),
lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
jcx_(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread())),
alloc_(&lifo_),
masm_(MacroAssembler::AsmJSToken(), &alloc_),
masm_(MacroAssembler::AsmJSToken(), alloc_),
sigs_(cx),
parallel_(false),
outstanding_(0),
tasks_(cx),
freeTasks_(cx),
funcBytes_(0),
funcEntryOffsets_(cx),
funcPtrTables_(cx),
slowFuncs_(cx),
active_(nullptr)
{}
activeFunc_(nullptr),
finishedFuncs_(false)
{
MOZ_ASSERT(IsCompilingAsmJS());
}
ModuleGenerator::~ModuleGenerator()
{
@ -72,12 +62,12 @@ ModuleGenerator::~ModuleGenerator()
if (outstanding_) {
AutoLockHelperThreadState lock;
while (true) {
CompileTaskVector& worklist = HelperThreadState().wasmWorklist();
IonCompileTaskVector& worklist = HelperThreadState().wasmWorklist();
MOZ_ASSERT(outstanding_ >= worklist.length());
outstanding_ -= worklist.length();
worklist.clear();
CompileTaskVector& finished = HelperThreadState().wasmFinishedList();
IonCompileTaskVector& finished = HelperThreadState().wasmFinishedList();
MOZ_ASSERT(outstanding_ >= finished.length());
outstanding_ -= finished.length();
finished.clear();
@ -100,14 +90,29 @@ ModuleGenerator::~ModuleGenerator()
}
}
bool
ModuleGenerator::init(ScriptSource* ss, uint32_t srcStart, uint32_t srcBodyStart, bool strict)
static bool
ParallelCompilationEnabled(ExclusiveContext* cx)
{
if (!sigs_.init())
// Since there are a fixed number of helper threads and one is already being
// consumed by this parsing task, ensure that there another free thread to
// avoid deadlock. (Note: there is at most one thread used for parsing so we
// don't have to worry about general dining philosophers.)
if (HelperThreadState().threadCount <= 1 || !CanUseExtraThreads())
return false;
module_ = cx_->new_<AsmJSModule>(ss, srcStart, srcBodyStart, strict, cx_->canUseSignalHandlers());
if (!module_)
// If 'cx' isn't a JSContext, then we are already off the main thread so
// off-thread compilation must be enabled.
return !cx->isJSContext() || cx->asJSContext()->runtime()->canUseOffthreadIonCompilation();
}
bool
ModuleGenerator::init()
{
staticLinkData_ = cx_->make_unique<StaticLinkData>();
if (!staticLinkData_)
return false;
if (!sigs_.init())
return false;
uint32_t numTasks;
@ -131,8 +136,9 @@ ModuleGenerator::init(ScriptSource* ss, uint32_t srcStart, uint32_t srcBodyStart
if (!tasks_.initCapacity(numTasks))
return false;
JSRuntime* runtime = cx_->compartment()->runtimeFromAnyThread();
for (size_t i = 0; i < numTasks; i++)
tasks_.infallibleEmplaceBack(COMPILATION_LIFO_DEFAULT_CHUNK_SIZE, args());
tasks_.infallibleEmplaceBack(runtime, args_, COMPILATION_LIFO_DEFAULT_CHUNK_SIZE);
if (!freeTasks_.reserve(numTasks))
return false;
@ -143,101 +149,15 @@ ModuleGenerator::init(ScriptSource* ss, uint32_t srcStart, uint32_t srcBodyStart
}
bool
ModuleGenerator::startFunc(PropertyName* name, unsigned line, unsigned column,
FunctionGenerator* fg)
ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset)
{
MOZ_ASSERT(!active_);
if (freeTasks_.empty() && !finishOutstandingTask())
uint32_t pad = ComputeByteAlignment(globalBytes_, align);
if (UINT32_MAX - globalBytes_ < pad + bytes)
return false;
CompileTask* task = freeTasks_.popCopy();
FuncIR* func = task->lifo().new_<FuncIR>(task->lifo(), name, line, column);
if (!func)
return false;
task->init(*func);
fg->m_ = this;
fg->task_ = task;
fg->func_ = func;
active_ = fg;
return true;
}
bool
ModuleGenerator::finishFunc(uint32_t funcIndex, const LifoSig& sig, unsigned generateTime,
FunctionGenerator* fg)
{
MOZ_ASSERT(active_ == fg);
fg->func_->finish(funcIndex, sig, generateTime);
if (parallel_) {
if (!StartOffThreadWasmCompile(cx_, fg->task_))
return false;
outstanding_++;
} else {
if (!CompileFunction(fg->task_))
return false;
if (!finishTask(fg->task_))
return false;
}
fg->m_ = nullptr;
fg->task_ = nullptr;
fg->func_ = nullptr;
active_ = nullptr;
return true;
}
bool
ModuleGenerator::finish(frontend::TokenStream& ts, ScopedJSDeletePtr<AsmJSModule>* module,
SlowFunctionVector* slowFuncs)
{
MOZ_ASSERT(!active_);
while (outstanding_ > 0) {
if (!finishOutstandingTask())
return false;
}
module_->setFunctionBytes(masm_.size());
JitContext jitContext(CompileRuntime::get(args().runtime));
// Now that all function definitions have been compiled and their function-
// entry offsets are all known, patch inter-function calls and fill in the
// function-pointer table offsets.
if (!GenerateStubs(masm_, *module_, funcEntryOffsets_))
return false;
for (auto& cs : masm_.callSites()) {
if (!cs.isInternal())
continue;
MOZ_ASSERT(cs.kind() == CallSiteDesc::Relative);
uint32_t callerOffset = cs.returnAddressOffset();
uint32_t calleeOffset = funcEntryOffsets_[cs.targetIndex()];
masm_.patchCall(callerOffset, calleeOffset);
}
for (unsigned tableIndex = 0; tableIndex < funcPtrTables_.length(); tableIndex++) {
FuncPtrTable& table = funcPtrTables_[tableIndex];
AsmJSModule::OffsetVector entryOffsets;
for (uint32_t funcIndex : table.elems)
entryOffsets.append(funcEntryOffsets_[funcIndex]);
module_->funcPtrTable(tableIndex).define(Move(entryOffsets));
}
masm_.finish();
if (masm_.oom())
return false;
if (!module_->finish(cx_, ts, masm_))
return false;
*module = module_.forget();
*slowFuncs = Move(slowFuncs_);
globalBytes_ += pad;
*globalDataOffset = globalBytes_;
globalBytes_ += bytes;
return true;
}
@ -246,7 +166,7 @@ ModuleGenerator::finishOutstandingTask()
{
MOZ_ASSERT(parallel_);
CompileTask* task = nullptr;
IonCompileTask* task = nullptr;
{
AutoLockHelperThreadState lock;
while (true) {
@ -269,55 +189,51 @@ ModuleGenerator::finishOutstandingTask()
}
bool
ModuleGenerator::finishTask(CompileTask* task)
ModuleGenerator::finishTask(IonCompileTask* task)
{
const FuncIR& func = task->func();
FunctionCompileResults& results = task->results();
FuncCompileResults& results = task->results();
// Merge the compiled results into the whole-module masm.
size_t offset = masm_.size();
if (!masm_.asmMergeWith(results.masm()))
return false;
// Offset the recorded FuncOffsets by the offset of the function in the
// whole module's code segment.
uint32_t offsetInWhole = masm_.size();
results.offsets().offsetBy(offsetInWhole);
// Create the code range now that we know offset of results in whole masm.
AsmJSModule::CodeRange codeRange(func.line(), results.offsets());
codeRange.functionOffsetBy(offset);
if (!module_->addFunctionCodeRange(func.name(), codeRange))
return false;
// Compilation may complete out of order, so cannot simply append().
// Record the non-profiling entry for whole-module linking later.
if (func.index() >= funcEntryOffsets_.length()) {
if (!funcEntryOffsets_.resize(func.index() + 1))
return false;
}
funcEntryOffsets_[func.index()] = codeRange.entry();
funcEntryOffsets_[func.index()] = results.offsets().nonProfilingEntry;
// Merge the compiled results into the whole-module masm.
DebugOnly<size_t> sizeBefore = masm_.size();
if (!masm_.asmMergeWith(results.masm()))
return false;
MOZ_ASSERT(masm_.size() == offsetInWhole + results.masm().size());
// Add the CodeRange for this function.
CacheableChars funcName = StringToNewUTF8CharsZ(cx_, *func.name());
if (!funcName)
return false;
uint32_t nameIndex = funcNames_.length();
if (!funcNames_.emplaceBack(Move(funcName)))
return false;
if (!codeRanges_.emplaceBack(nameIndex, func.line(), results.offsets()))
return false;
// Keep a record of slow functions for printing in the final console message.
unsigned totalTime = func.generateTime() + results.compileTime();
if (totalTime >= SlowFunction::msThreshold) {
if (!slowFuncs_.append(SlowFunction(func.name(), totalTime, func.line(), func.column())))
if (!slowFuncs_.emplaceBack(func.name(), totalTime, func.line(), func.column()))
return false;
}
#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
AsmJSModule::ProfiledFunction pf(func.name(), codeRange.entry(), codeRange.end(),
func.line(), func.column());
if (!module().addProfiledFunction(pf))
return false;
#endif
task->reset();
freeTasks_.infallibleAppend(task);
return true;
}
CompileArgs
ModuleGenerator::args() const
{
return CompileArgs(cx_->compartment()->runtimeFromAnyThread(),
module().usesSignalHandlersForOOB());
}
const LifoSig*
ModuleGenerator::newLifoSig(const MallocSig& sig)
{
@ -333,7 +249,182 @@ ModuleGenerator::newLifoSig(const MallocSig& sig)
}
bool
ModuleGenerator::declareFuncPtrTable(uint32_t numElems, uint32_t* funcPtrTableIndex)
ModuleGenerator::allocateGlobalVar(ValType type, uint32_t* globalDataOffset)
{
unsigned width = 0;
switch (type) {
case wasm::ValType::I32:
case wasm::ValType::F32:
width = 4;
break;
case wasm::ValType::I64:
case wasm::ValType::F64:
width = 8;
break;
case wasm::ValType::I32x4:
case wasm::ValType::F32x4:
case wasm::ValType::B32x4:
width = 16;
break;
}
return allocateGlobalBytes(width, width, globalDataOffset);
}
bool
ModuleGenerator::declareImport(MallocSig&& sig, unsigned* index)
{
static_assert(Module::SizeOfImportExit % sizeof(void*) == 0, "word aligned");
uint32_t globalDataOffset;
if (!allocateGlobalBytes(Module::SizeOfImportExit, sizeof(void*), &globalDataOffset))
return false;
*index = unsigned(imports_.length());
return imports_.emplaceBack(Move(sig), globalDataOffset);
}
uint32_t
ModuleGenerator::numDeclaredImports() const
{
return imports_.length();
}
uint32_t
ModuleGenerator::importExitGlobalDataOffset(uint32_t index) const
{
return imports_[index].exitGlobalDataOffset();
}
const MallocSig&
ModuleGenerator::importSig(uint32_t index) const
{
return imports_[index].sig();
}
bool
ModuleGenerator::defineImport(uint32_t index, ProfilingOffsets interpExit, ProfilingOffsets jitExit)
{
Import& import = imports_[index];
import.initInterpExitOffset(interpExit.begin);
import.initJitExitOffset(jitExit.begin);
return codeRanges_.emplaceBack(CodeRange::ImportInterpExit, interpExit) &&
codeRanges_.emplaceBack(CodeRange::ImportJitExit, jitExit);
}
bool
ModuleGenerator::declareExport(MallocSig&& sig, uint32_t funcIndex, uint32_t* index)
{
*index = exports_.length();
return exports_.emplaceBack(Move(sig), funcIndex);
}
uint32_t
ModuleGenerator::exportFuncIndex(uint32_t index) const
{
return exports_[index].funcIndex();
}
const MallocSig&
ModuleGenerator::exportSig(uint32_t index) const
{
return exports_[index].sig();
}
uint32_t
ModuleGenerator::numDeclaredExports() const
{
return exports_.length();
}
bool
ModuleGenerator::defineExport(uint32_t index, Offsets offsets)
{
exports_[index].initStubOffset(offsets.begin);
return codeRanges_.emplaceBack(CodeRange::Entry, offsets);
}
bool
ModuleGenerator::startFunc(PropertyName* name, unsigned line, unsigned column,
FunctionGenerator* fg)
{
MOZ_ASSERT(!activeFunc_);
MOZ_ASSERT(!finishedFuncs_);
if (freeTasks_.empty() && !finishOutstandingTask())
return false;
IonCompileTask* task = freeTasks_.popCopy();
FuncIR* func = task->lifo().new_<FuncIR>(task->lifo(), name, line, column);
if (!func)
return false;
task->init(*func);
fg->m_ = this;
fg->task_ = task;
fg->func_ = func;
activeFunc_ = fg;
return true;
}
bool
ModuleGenerator::finishFunc(uint32_t funcIndex, const LifoSig& sig, unsigned generateTime,
FunctionGenerator* fg)
{
MOZ_ASSERT(activeFunc_ == fg);
fg->func_->finish(funcIndex, sig, generateTime);
if (parallel_) {
if (!StartOffThreadWasmCompile(cx_, fg->task_))
return false;
outstanding_++;
} else {
if (!IonCompileFunction(fg->task_))
return false;
if (!finishTask(fg->task_))
return false;
}
fg->m_ = nullptr;
fg->task_ = nullptr;
fg->func_ = nullptr;
activeFunc_ = nullptr;
return true;
}
bool
ModuleGenerator::finishFuncs()
{
MOZ_ASSERT(!activeFunc_);
MOZ_ASSERT(!finishedFuncs_);
while (outstanding_ > 0) {
if (!finishOutstandingTask())
return false;
}
// During codegen, all wasm->wasm (internal) calls use AsmJSInternalCallee
// as the call target, which contains the function-index of the target.
// These get recorded in a CallSiteAndTargetVector in the MacroAssembler
// so that we can patch them now that all the function entry offsets are
// known.
for (CallSiteAndTarget& cs : masm_.callSites()) {
if (!cs.isInternal())
continue;
MOZ_ASSERT(cs.kind() == CallSiteDesc::Relative);
uint32_t callerOffset = cs.returnAddressOffset();
uint32_t calleeOffset = funcEntryOffsets_[cs.targetIndex()];
masm_.patchCall(callerOffset, calleeOffset);
}
funcBytes_ = masm_.size();
finishedFuncs_ = true;
return true;
}
bool
ModuleGenerator::declareFuncPtrTable(uint32_t numElems, uint32_t* index)
{
// Here just add an uninitialized FuncPtrTable and claim space in the global
// data section. Later, 'defineFuncPtrTable' will be called with function
@ -343,25 +434,194 @@ ModuleGenerator::declareFuncPtrTable(uint32_t numElems, uint32_t* funcPtrTableIn
if (numElems > 1024 * 1024)
return false;
if (!module_->declareFuncPtrTable(numElems, funcPtrTableIndex))
uint32_t globalDataOffset;
if (!allocateGlobalBytes(numElems * sizeof(void*), sizeof(void*), &globalDataOffset))
return false;
MOZ_ASSERT(*funcPtrTableIndex == funcPtrTables_.length());
return funcPtrTables_.emplaceBack(numElems);
}
StaticLinkData::FuncPtrTableVector& tables = staticLinkData_->funcPtrTables;
bool
ModuleGenerator::defineFuncPtrTable(uint32_t funcPtrTableIndex, FuncIndexVector&& elems)
{
// The AsmJSModule needs to know the offsets in the code section which won't
// be known until 'finish'. So just remember the function indices for now
// and wait until 'finish' to hand over the offsets to the AsmJSModule.
FuncPtrTable& table = funcPtrTables_[funcPtrTableIndex];
if (table.numDeclared != elems.length() || !table.elems.empty())
*index = tables.length();
if (!tables.emplaceBack(globalDataOffset))
return false;
if (!tables.back().elemOffsets.resize(numElems))
return false;
table.elems = Move(elems);
return true;
}
uint32_t
ModuleGenerator::funcPtrTableGlobalDataOffset(uint32_t index) const
{
return staticLinkData_->funcPtrTables[index].globalDataOffset;
}
void
ModuleGenerator::defineFuncPtrTable(uint32_t index, const Vector<uint32_t>& elemFuncIndices)
{
MOZ_ASSERT(finishedFuncs_);
StaticLinkData::FuncPtrTable& table = staticLinkData_->funcPtrTables[index];
MOZ_ASSERT(table.elemOffsets.length() == elemFuncIndices.length());
for (size_t i = 0; i < elemFuncIndices.length(); i++)
table.elemOffsets[i] = funcEntryOffsets_[elemFuncIndices[i]];
}
bool
ModuleGenerator::defineInlineStub(Offsets offsets)
{
MOZ_ASSERT(finishedFuncs_);
return codeRanges_.emplaceBack(CodeRange::Inline, offsets);
}
bool
ModuleGenerator::defineSyncInterruptStub(ProfilingOffsets offsets)
{
MOZ_ASSERT(finishedFuncs_);
return codeRanges_.emplaceBack(CodeRange::Interrupt, offsets);
}
bool
ModuleGenerator::defineAsyncInterruptStub(Offsets offsets)
{
MOZ_ASSERT(finishedFuncs_);
staticLinkData_->pod.interruptOffset = offsets.begin;
return codeRanges_.emplaceBack(CodeRange::Inline, offsets);
}
bool
ModuleGenerator::defineOutOfBoundsStub(Offsets offsets)
{
MOZ_ASSERT(finishedFuncs_);
staticLinkData_->pod.outOfBoundsOffset = offsets.begin;
return codeRanges_.emplaceBack(CodeRange::Inline, offsets);
}
Module*
ModuleGenerator::finish(Module::HeapBool usesHeap,
Module::SharedBool sharedHeap,
UniqueChars filename,
UniqueStaticLinkData* staticLinkData,
SlowFunctionVector* slowFuncs)
{
MOZ_ASSERT(!activeFunc_);
MOZ_ASSERT(finishedFuncs_);
if (!GenerateStubs(*this, usesHeap))
return nullptr;
masm_.finish();
if (masm_.oom())
return nullptr;
// Start global data on a new page so JIT code may be given independent
// protection flags. Note assumption that global data starts right after
// code below.
uint32_t codeBytes = AlignBytes(masm_.bytesNeeded(), AsmJSPageSize);
// Inflate the global bytes up to page size so that the total bytes are a
// page size (as required by the allocator functions).
globalBytes_ = AlignBytes(globalBytes_, AsmJSPageSize);
uint32_t totalBytes = codeBytes + globalBytes_;
// Allocate the code (guarded by a UniquePtr until it is given to the Module).
UniqueCodePtr code = AllocateCode(cx_, totalBytes);
if (!code)
return nullptr;
// Delay flushing until Module::dynamicallyLink. The flush-inhibited range
// is set by executableCopy.
AutoFlushICache afc("ModuleGenerator::finish", /* inhibit = */ true);
masm_.executableCopy(code.get());
// c.f. JitCode::copyFrom
MOZ_ASSERT(masm_.jumpRelocationTableBytes() == 0);
MOZ_ASSERT(masm_.dataRelocationTableBytes() == 0);
MOZ_ASSERT(masm_.preBarrierTableBytes() == 0);
MOZ_ASSERT(!masm_.hasSelfReference());
// Convert the CallSiteAndTargetVector (needed during generation) to a
// CallSiteVector (what is stored in the Module).
CallSiteVector callSites;
if (!callSites.appendAll(masm_.callSites()))
return nullptr;
// Add links to absolute addresses identified symbolically.
StaticLinkData::SymbolicLinkArray& symbolicLinks = staticLinkData_->symbolicLinks;
for (size_t i = 0; i < masm_.numAsmJSAbsoluteAddresses(); i++) {
AsmJSAbsoluteAddress src = masm_.asmJSAbsoluteAddress(i);
if (!symbolicLinks[src.target].append(src.patchAt.offset()))
return nullptr;
}
// Relative link metadata: absolute addresses that refer to another point within
// the asm.js module.
// CodeLabels are used for switch cases and loads from floating-point /
// SIMD values in the constant pool.
for (size_t i = 0; i < masm_.numCodeLabels(); i++) {
CodeLabel cl = masm_.codeLabel(i);
StaticLinkData::InternalLink link(StaticLinkData::InternalLink::CodeLabel);
link.patchAtOffset = masm_.labelToPatchOffset(*cl.patchAt());
link.targetOffset = cl.target()->offset();
if (!staticLinkData_->internalLinks.append(link))
return nullptr;
}
#if defined(JS_CODEGEN_X86)
// Global data accesses in x86 need to be patched with the absolute
// address of the global. Globals are allocated sequentially after the
// code section so we can just use an InternalLink.
for (size_t i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) {
AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i);
StaticLinkData::InternalLink link(StaticLinkData::InternalLink::RawPointer);
link.patchAtOffset = masm_.labelToPatchOffset(a.patchAt);
link.targetOffset = codeBytes + a.globalDataOffset;
if (!staticLinkData_->internalLinks.append(link))
return nullptr;
}
#endif
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
// On MIPS we need to update all the long jumps because they contain an
// absolute adress. The values are correctly patched for the current address
// space, but not after serialization or profiling-mode toggling.
for (size_t i = 0; i < masm_.numLongJumps(); i++) {
size_t off = masm_.longJump(i);
StaticLinkData::InternalLink link(StaticLinkData::InternalLink::InstructionImmediate);
link.patchAtOffset = off;
link.targetOffset = Assembler::ExtractInstructionImmediate(code.get() + off) -
uintptr_t(code.get());
if (!staticLinkData_->internalLinks.append(link))
return nullptr;
}
#endif
#if defined(JS_CODEGEN_X64)
// Global data accesses on x64 use rip-relative addressing and thus do
// not need patching after deserialization.
uint8_t* globalData = code.get() + codeBytes;
for (size_t i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) {
AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i);
masm_.patchAsmJSGlobalAccess(a.patchAt, code.get(), globalData, a.globalDataOffset);
}
#endif
*staticLinkData = Move(staticLinkData_);
*slowFuncs = Move(slowFuncs_);
return cx_->new_<Module>(args_,
funcBytes_,
codeBytes,
globalBytes_,
usesHeap,
sharedHeap,
Move(code),
Move(imports_),
Move(exports_),
masm_.extractHeapAccesses(),
Move(codeRanges_),
Move(callSites),
Move(funcNames_),
Move(filename));
}

View File

@ -16,22 +16,21 @@
* limitations under the License.
*/
#ifndef asmjs_wasm_generator_h
#define asmjs_wasm_generator_h
#ifndef wasm_generator_h
#define wasm_generator_h
#include "asmjs/WasmIonCompile.h"
#include "asmjs/WasmStubs.h"
#include "asmjs/WasmIR.h"
#include "asmjs/WasmModule.h"
#include "jit/MacroAssembler.h"
namespace js {
class AsmJSModule;
namespace fronted { class TokenStream; }
namespace wasm {
class FunctionGenerator;
// A slow function describes a function that took longer than msThreshold to
// validate and compile.
struct SlowFunction
{
SlowFunction(PropertyName* name, unsigned ms, unsigned line, unsigned column)
@ -45,7 +44,6 @@ struct SlowFunction
unsigned line;
unsigned column;
};
typedef Vector<SlowFunction> SlowFunctionVector;
// A ModuleGenerator encapsulates the creation of a wasm module. During the
@ -55,19 +53,7 @@ typedef Vector<SlowFunction> SlowFunctionVector;
// compilation and extract the resulting wasm module.
class MOZ_STACK_CLASS ModuleGenerator
{
public:
typedef Vector<uint32_t, 0, SystemAllocPolicy> FuncIndexVector;
private:
struct FuncPtrTable
{
uint32_t numDeclared;
FuncIndexVector elems;
explicit FuncPtrTable(uint32_t numDeclared) : numDeclared(numDeclared) {}
FuncPtrTable(FuncPtrTable&& rhs) : numDeclared(rhs.numDeclared), elems(Move(rhs.elems)) {}
};
typedef Vector<FuncPtrTable> FuncPtrTableVector;
typedef Vector<uint32_t> FuncOffsetVector;
struct SigHashPolicy
{
@ -77,45 +63,95 @@ class MOZ_STACK_CLASS ModuleGenerator
};
typedef HashSet<const LifoSig*, SigHashPolicy> SigSet;
ExclusiveContext* cx_;
ScopedJSDeletePtr<AsmJSModule> module_;
ExclusiveContext* cx_;
CompileArgs args_;
LifoAlloc lifo_;
jit::TempAllocator alloc_;
jit::MacroAssembler masm_;
SigSet sigs_;
// Data handed over to the Module in finish()
uint32_t globalBytes_;
ImportVector imports_;
ExportVector exports_;
CodeRangeVector codeRanges_;
CacheableCharsVector funcNames_;
bool parallel_;
uint32_t outstanding_;
Vector<CompileTask> tasks_;
Vector<CompileTask*> freeTasks_;
// Data handed back to the caller in finish()
UniqueStaticLinkData staticLinkData_;
SlowFunctionVector slowFuncs_;
FuncOffsetVector funcEntryOffsets_;
FuncPtrTableVector funcPtrTables_;
// Data scoped to the ModuleGenerator's lifetime
LifoAlloc lifo_;
jit::JitContext jcx_;
jit::TempAllocator alloc_;
jit::MacroAssembler masm_;
SigSet sigs_;
SlowFunctionVector slowFuncs_;
mozilla::DebugOnly<FunctionGenerator*> active_;
// Parallel compilation
bool parallel_;
uint32_t outstanding_;
Vector<IonCompileTask> tasks_;
Vector<IonCompileTask*> freeTasks_;
// Function compilation
uint32_t funcBytes_;
FuncOffsetVector funcEntryOffsets_;
DebugOnly<FunctionGenerator*> activeFunc_;
DebugOnly<bool> finishedFuncs_;
bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset);
bool finishOutstandingTask();
bool finishTask(CompileTask* task);
CompileArgs args() const;
bool finishTask(IonCompileTask* task);
public:
explicit ModuleGenerator(ExclusiveContext* cx);
~ModuleGenerator();
bool init(ScriptSource* ss, uint32_t srcStart, uint32_t srcBodyStart, bool strict);
AsmJSModule& module() const { return *module_; }
bool init();
CompileArgs args() const { return args_; }
jit::MacroAssembler& masm() { return masm_; }
const FuncOffsetVector& funcEntryOffsets() const { return funcEntryOffsets_; }
const LifoSig* newLifoSig(const MallocSig& sig);
bool declareFuncPtrTable(uint32_t numElems, uint32_t* funcPtrTableIndex);
bool defineFuncPtrTable(uint32_t funcPtrTableIndex, FuncIndexVector&& elems);
// Global data:
bool allocateGlobalVar(ValType type, uint32_t* globalDataOffset);
// Imports:
bool declareImport(MallocSig&& sig, uint32_t* index);
uint32_t numDeclaredImports() const;
uint32_t importExitGlobalDataOffset(uint32_t index) const;
const MallocSig& importSig(uint32_t index) const;
bool defineImport(uint32_t index, ProfilingOffsets interpExit, ProfilingOffsets jitExit);
// Exports:
bool declareExport(MallocSig&& sig, uint32_t funcIndex, uint32_t* index);
uint32_t numDeclaredExports() const;
uint32_t exportFuncIndex(uint32_t index) const;
const MallocSig& exportSig(uint32_t index) const;
bool defineExport(uint32_t index, Offsets offsets);
// Functions:
bool startFunc(PropertyName* name, unsigned line, unsigned column, FunctionGenerator* fg);
bool finishFunc(uint32_t funcIndex, const LifoSig& sig, unsigned generateTime, FunctionGenerator* fg);
bool finishFuncs();
bool finish(frontend::TokenStream& ts, ScopedJSDeletePtr<AsmJSModule>* module,
SlowFunctionVector* slowFuncs);
// Function-pointer tables:
bool declareFuncPtrTable(uint32_t numElems, uint32_t* index);
uint32_t funcPtrTableGlobalDataOffset(uint32_t index) const;
void defineFuncPtrTable(uint32_t index, const Vector<uint32_t>& elemFuncIndices);
// Stubs:
bool defineInlineStub(Offsets offsets);
bool defineSyncInterruptStub(ProfilingOffsets offsets);
bool defineAsyncInterruptStub(Offsets offsets);
bool defineOutOfBoundsStub(Offsets offsets);
// Null return indicates failure. The caller must immediately root a
// non-null return value.
Module* finish(Module::HeapBool usesHeap,
Module::SharedBool sharedHeap,
UniqueChars filename,
UniqueStaticLinkData* staticLinkData,
SlowFunctionVector* slowFuncs);
};
// A FunctionGenerator encapsulates the generation of a single function body.
@ -128,7 +164,7 @@ class MOZ_STACK_CLASS FunctionGenerator
friend class ModuleGenerator;
ModuleGenerator* m_;
CompileTask* task_;
IonCompileTask* task_;
FuncIR* func_;
public:
@ -139,4 +175,4 @@ class MOZ_STACK_CLASS FunctionGenerator
} // namespace wasm
} // namespace js
#endif // asmjs_wasm_generator_h
#endif // wasm_generator_h

View File

@ -16,10 +16,10 @@
* limitations under the License.
*/
#ifndef asmjs_wasm_ir_h
#define asmjs_wasm_ir_h
#ifndef wasm_ir_h
#define wasm_ir_h
#include "asmjs/Wasm.h"
#include "asmjs/WasmTypes.h"
namespace js {
@ -434,8 +434,7 @@ class FuncIR
typedef Vector<uint8_t, 4096, LifoAllocPolicy<Fallible>> Bytecode;
// Note: this unrooted field assumes AutoKeepAtoms via TokenStream via
// asm.js compilation. Wasm compilation will require an alternative way to
// name CodeRanges (index).
// asm.js compilation.
PropertyName* name_;
unsigned line_;
unsigned column_;
@ -572,4 +571,4 @@ class FuncIR
} // namespace wasm
} // namespace js
#endif // asmjs_wasm_ir_h
#endif // wasm_ir_h

View File

@ -40,33 +40,30 @@ class FunctionCompiler
typedef Vector<size_t, 4, SystemAllocPolicy> PositionStack;
typedef Vector<ValType, 4, SystemAllocPolicy> LocalTypes;
CompileArgs args_;
const FuncIR& func_;
size_t pc_;
const FuncIR& func_;
size_t pc_;
TempAllocator& alloc_;
MIRGraph& graph_;
const CompileInfo& info_;
MIRGenerator& mirGen_;
TempAllocator& alloc_;
MIRGraph& graph_;
const CompileInfo& info_;
MIRGenerator& mirGen_;
MBasicBlock* curBlock_;
MBasicBlock* curBlock_;
PositionStack loopStack_;
PositionStack breakableStack_;
UnlabeledBlockMap unlabeledBreaks_;
UnlabeledBlockMap unlabeledContinues_;
LabeledBlockMap labeledBreaks_;
LabeledBlockMap labeledContinues_;
PositionStack loopStack_;
PositionStack breakableStack_;
UnlabeledBlockMap unlabeledBreaks_;
UnlabeledBlockMap unlabeledContinues_;
LabeledBlockMap labeledBreaks_;
LabeledBlockMap labeledContinues_;
LocalTypes localTypes_;
LocalTypes localTypes_;
FunctionCompileResults& compileResults_;
FuncCompileResults& compileResults_;
public:
FunctionCompiler(CompileArgs args, const FuncIR& func, MIRGenerator& mirGen,
FunctionCompileResults& compileResults)
: args_(args),
func_(func),
FunctionCompiler(const FuncIR& func, MIRGenerator& mirGen, FuncCompileResults& compileResults)
: func_(func),
pc_(0),
alloc_(mirGen.alloc()),
graph_(mirGen.graph()),
@ -770,7 +767,7 @@ class FunctionCompiler
return callPrivate(MAsmJSCall::Callee(ptrFun), call, ret, def);
}
bool builtinCall(Builtin builtin, const Call& call, ValType type, MDefinition** def)
bool builtinCall(SymbolicAddress builtin, const Call& call, ValType type, MDefinition** def)
{
return callPrivate(MAsmJSCall::Callee(builtin), call, ToExprType(type), def);
}
@ -1648,7 +1645,7 @@ EmitMathBuiltinCall(FunctionCompiler& f, F32 f32, MDefinition** def)
f.finishCallArgs(&call);
Builtin callee = f32 == F32::Ceil ? Builtin::CeilF : Builtin::FloorF;
SymbolicAddress callee = f32 == F32::Ceil ? SymbolicAddress::CeilF : SymbolicAddress::FloorF;
return f.builtinCall(callee, call, ValType::F32, def);
}
@ -1671,20 +1668,20 @@ EmitMathBuiltinCall(FunctionCompiler& f, F64 f64, MDefinition** def)
return false;
}
Builtin callee;
SymbolicAddress callee;
switch (f64) {
case F64::Ceil: callee = Builtin::CeilD; break;
case F64::Floor: callee = Builtin::FloorD; break;
case F64::Sin: callee = Builtin::SinD; break;
case F64::Cos: callee = Builtin::CosD; break;
case F64::Tan: callee = Builtin::TanD; break;
case F64::Asin: callee = Builtin::ASinD; break;
case F64::Acos: callee = Builtin::ACosD; break;
case F64::Atan: callee = Builtin::ATanD; break;
case F64::Exp: callee = Builtin::ExpD; break;
case F64::Log: callee = Builtin::LogD; break;
case F64::Pow: callee = Builtin::PowD; break;
case F64::Atan2: callee = Builtin::ATan2D; break;
case F64::Ceil: callee = SymbolicAddress::CeilD; break;
case F64::Floor: callee = SymbolicAddress::FloorD; break;
case F64::Sin: callee = SymbolicAddress::SinD; break;
case F64::Cos: callee = SymbolicAddress::CosD; break;
case F64::Tan: callee = SymbolicAddress::TanD; break;
case F64::Asin: callee = SymbolicAddress::ASinD; break;
case F64::Acos: callee = SymbolicAddress::ACosD; break;
case F64::Atan: callee = SymbolicAddress::ATanD; break;
case F64::Exp: callee = SymbolicAddress::ExpD; break;
case F64::Log: callee = SymbolicAddress::LogD; break;
case F64::Pow: callee = SymbolicAddress::PowD; break;
case F64::Atan2: callee = SymbolicAddress::ATan2D; break;
default: MOZ_CRASH("unexpected double math builtin callee");
}
@ -3046,26 +3043,25 @@ EmitB32X4Expr(FunctionCompiler& f, MDefinition** def)
}
bool
wasm::CompileFunction(CompileTask* task)
wasm::IonCompileFunction(IonCompileTask* task)
{
int64_t before = PRMJ_Now();
CompileArgs args = task->args();
const FuncIR& func = task->func();
FunctionCompileResults& results = task->results();
FuncCompileResults& results = task->results();
JitContext jitContext(CompileRuntime::get(args.runtime), &results.alloc());
JitContext jitContext(CompileRuntime::get(task->runtime()), &results.alloc());
const JitCompileOptions options;
MIRGraph graph(&results.alloc());
CompileInfo compileInfo(func.numLocals());
MIRGenerator mir(nullptr, options, &results.alloc(), &graph, &compileInfo,
IonOptimizations.get(OptimizationLevel::AsmJS),
args.usesSignalHandlersForOOB);
task->args().useSignalHandlersForOOB);
// Build MIR graph
{
FunctionCompiler f(args, func, mir, results);
FunctionCompiler f(func, mir, results);
if (!f.init())
return false;

View File

@ -16,60 +16,69 @@
* limitations under the License.
*/
#ifndef asmjs_wasm_ion_compile_h
#define asmjs_wasm_ion_compile_h
#ifndef wasm_ion_compile_h
#define wasm_ion_compile_h
#include "asmjs/AsmJSFrameIterator.h"
#include "asmjs/WasmCompileArgs.h"
#include "asmjs/WasmIR.h"
#include "jit/MacroAssembler.h"
namespace js {
namespace wasm {
class FunctionCompileResults
// The FuncCompileResults contains the results of compiling a single function
// body, ready to be merged into the whole-module MacroAssembler.
class FuncCompileResults
{
jit::TempAllocator alloc_;
jit::MacroAssembler masm_;
AsmJSFunctionOffsets offsets_;
FuncOffsets offsets_;
unsigned compileTime_;
FunctionCompileResults(const FunctionCompileResults&) = delete;
FunctionCompileResults& operator=(const FunctionCompileResults&) = delete;
FuncCompileResults(const FuncCompileResults&) = delete;
FuncCompileResults& operator=(const FuncCompileResults&) = delete;
public:
explicit FunctionCompileResults(LifoAlloc& lifo)
explicit FuncCompileResults(LifoAlloc& lifo)
: alloc_(&lifo),
masm_(jit::MacroAssembler::AsmJSToken(), &alloc_),
masm_(jit::MacroAssembler::AsmJSToken(), alloc_),
compileTime_(0)
{}
jit::TempAllocator& alloc() { return alloc_; }
jit::MacroAssembler& masm() { return masm_; }
AsmJSFunctionOffsets& offsets() { return offsets_; }
const AsmJSFunctionOffsets& offsets() const { return offsets_; }
FuncOffsets& offsets() { return offsets_; }
void setCompileTime(unsigned t) { MOZ_ASSERT(!compileTime_); compileTime_ = t; }
unsigned compileTime() const { return compileTime_; }
};
class CompileTask
// An IonCompileTask represents the task of compiling a single function body. An
// IonCompileTask is filled with the wasm code to be compiled on the main
// validation thread, sent off to an Ion compilation helper thread which creates
// the FuncCompileResults, and finally sent back to the validation thread. To
// save time allocating and freeing memory, IonCompileTasks are reset() and
// reused.
class IonCompileTask
{
LifoAlloc lifo_;
JSRuntime* const runtime_;
const CompileArgs args_;
LifoAlloc lifo_;
const FuncIR* func_;
mozilla::Maybe<FunctionCompileResults> results_;
mozilla::Maybe<FuncCompileResults> results_;
CompileTask(const CompileTask&) = delete;
CompileTask& operator=(const CompileTask&) = delete;
IonCompileTask(const IonCompileTask&) = delete;
IonCompileTask& operator=(const IonCompileTask&) = delete;
public:
CompileTask(size_t defaultChunkSize, CompileArgs args)
: lifo_(defaultChunkSize),
IonCompileTask(JSRuntime* runtime, CompileArgs args, size_t defaultChunkSize)
: runtime_(runtime),
args_(args),
lifo_(defaultChunkSize),
func_(nullptr)
{}
JSRuntime* runtime() const {
return runtime_;
}
LifoAlloc& lifo() {
return lifo_;
}
@ -84,7 +93,7 @@ class CompileTask
MOZ_ASSERT(func_);
return *func_;
}
FunctionCompileResults& results() {
FuncCompileResults& results() {
return *results_;
}
void reset() {
@ -95,9 +104,9 @@ class CompileTask
};
bool
CompileFunction(CompileTask* task);
IonCompileFunction(IonCompileTask* task);
} // namespace wasm
} // namespace js
#endif // asmjs_wasm_ion_compile_h
#endif // wasm_ion_compile_h

1368
js/src/asmjs/WasmModule.cpp Normal file

File diff suppressed because it is too large Load Diff

569
js/src/asmjs/WasmModule.h Normal file
View File

@ -0,0 +1,569 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* Copyright 2015 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef wasm_module_h
#define wasm_module_h
#include "asmjs/WasmTypes.h"
#include "gc/Barrier.h"
#include "vm/MallocProvider.h"
namespace js {
class AsmJSActivation;
namespace jit { struct BaselineScript; }
namespace wasm {
// A wasm Module and everything it contains must support serialization,
// deserialization and cloning. Some data can be simply copied as raw bytes and,
// as a convention, is stored in an inline CacheablePod struct. Everything else
// should implement the below methods which are called recusively by the
// containing Module. The implementation of all these methods are grouped
// together in WasmSerialize.cpp.
#define WASM_DECLARE_SERIALIZABLE(Type) \
size_t serializedSize() const; \
uint8_t* serialize(uint8_t* cursor) const; \
const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor); \
bool clone(JSContext* cx, Type* out) const; \
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
// The StaticLinkData contains all the metadata necessary to perform
// Module::staticallyLink but is not necessary afterwards.
struct StaticLinkData
{
struct InternalLink {
enum Kind {
RawPointer,
CodeLabel,
InstructionImmediate
};
uint32_t patchAtOffset;
uint32_t targetOffset;
InternalLink() = default;
explicit InternalLink(Kind kind);
bool isRawPointerPatch();
};
typedef Vector<InternalLink, 0, SystemAllocPolicy> InternalLinkVector;
typedef Vector<uint32_t, 0, SystemAllocPolicy> OffsetVector;
struct SymbolicLinkArray : mozilla::EnumeratedArray<SymbolicAddress,
SymbolicAddress::Limit,
OffsetVector> {
WASM_DECLARE_SERIALIZABLE(SymbolicLinkArray)
};
struct FuncPtrTable {
uint32_t globalDataOffset;
OffsetVector elemOffsets;
explicit FuncPtrTable(uint32_t globalDataOffset) : globalDataOffset(globalDataOffset) {}
FuncPtrTable() = default;
FuncPtrTable(FuncPtrTable&& rhs)
: globalDataOffset(rhs.globalDataOffset), elemOffsets(Move(rhs.elemOffsets))
{}
WASM_DECLARE_SERIALIZABLE(FuncPtrTable)
};
typedef Vector<FuncPtrTable, 0, SystemAllocPolicy> FuncPtrTableVector;
struct CacheablePod {
uint32_t interruptOffset;
uint32_t outOfBoundsOffset;
} pod;
InternalLinkVector internalLinks;
SymbolicLinkArray symbolicLinks;
FuncPtrTableVector funcPtrTables;
WASM_DECLARE_SERIALIZABLE(StaticLinkData)
};
typedef UniquePtr<StaticLinkData, JS::DeletePolicy<StaticLinkData>> UniqueStaticLinkData;
// An Export describes an export from a wasm module. Currently only functions
// can be exported.
class Export
{
MallocSig sig_;
struct CacheablePod {
uint32_t funcIndex_;
uint32_t stubOffset_;
} pod;
public:
Export() = default;
Export(MallocSig&& sig, uint32_t funcIndex)
: sig_(Move(sig))
{
pod.funcIndex_ = funcIndex;
pod.stubOffset_ = UINT32_MAX;
}
Export(Export&& rhs)
: sig_(Move(rhs.sig_)),
pod(rhs.pod)
{}
void initStubOffset(uint32_t stubOffset) {
MOZ_ASSERT(pod.stubOffset_ == UINT32_MAX);
pod.stubOffset_ = stubOffset;
}
uint32_t funcIndex() const {
return pod.funcIndex_;
}
uint32_t stubOffset() const {
return pod.stubOffset_;
}
const MallocSig& sig() const {
return sig_;
}
WASM_DECLARE_SERIALIZABLE(Export)
};
typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
// An Import describes a wasm module import. Currently, only functions can be
// imported in wasm and a function import also includes the signature used
// within the module to call that import. An import is slightly different than
// an asm.js FFI function: a single asm.js FFI function can be called with many
// different signatures. When compiled to wasm, each unique FFI function paired
// with signature generates a wasm import.
class Import
{
MallocSig sig_;
struct CacheablePod {
uint32_t exitGlobalDataOffset_;
uint32_t interpExitCodeOffset_;
uint32_t jitExitCodeOffset_;
} pod;
public:
Import() {}
Import(Import&& rhs) : sig_(Move(rhs.sig_)), pod(rhs.pod) {}
Import(MallocSig&& sig, uint32_t exitGlobalDataOffset)
: sig_(Move(sig))
{
pod.exitGlobalDataOffset_ = exitGlobalDataOffset;
pod.interpExitCodeOffset_ = 0;
pod.jitExitCodeOffset_ = 0;
}
void initInterpExitOffset(uint32_t off) {
MOZ_ASSERT(!pod.interpExitCodeOffset_);
pod.interpExitCodeOffset_ = off;
}
void initJitExitOffset(uint32_t off) {
MOZ_ASSERT(!pod.jitExitCodeOffset_);
pod.jitExitCodeOffset_ = off;
}
const MallocSig& sig() const {
return sig_;
}
uint32_t exitGlobalDataOffset() const {
return pod.exitGlobalDataOffset_;
}
uint32_t interpExitCodeOffset() const {
MOZ_ASSERT(pod.interpExitCodeOffset_);
return pod.interpExitCodeOffset_;
}
uint32_t jitExitCodeOffset() const {
MOZ_ASSERT(pod.jitExitCodeOffset_);
return pod.jitExitCodeOffset_;
}
WASM_DECLARE_SERIALIZABLE(Import)
};
typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
// A CodeRange describes a single contiguous range of code within a wasm
// module's code segment. A CodeRange describes what the code does and, for
// function bodies, the name and source coordinates of the function.
class CodeRange
{
uint32_t nameIndex_;
uint32_t lineNumber_;
uint32_t begin_;
uint32_t profilingReturn_;
uint32_t end_;
union {
struct {
uint8_t kind_;
uint8_t beginToEntry_;
uint8_t profilingJumpToProfilingReturn_;
uint8_t profilingEpilogueToProfilingReturn_;
} func;
uint8_t kind_;
} u;
void assertValid();
public:
enum Kind { Function, Entry, ImportJitExit, ImportInterpExit, Interrupt, Inline };
CodeRange() = default;
CodeRange(Kind kind, Offsets offsets);
CodeRange(Kind kind, ProfilingOffsets offsets);
CodeRange(uint32_t nameIndex, uint32_t lineNumber, FuncOffsets offsets);
// All CodeRanges have a begin and end.
uint32_t begin() const {
return begin_;
}
uint32_t end() const {
return end_;
}
// Other fields are only available for certain CodeRange::Kinds.
Kind kind() const { return Kind(u.kind_); }
// Every CodeRange except entry and inline stubs has a profiling return
// which is used for asynchronous profiling to determine the frame pointer.
uint32_t profilingReturn() const {
MOZ_ASSERT(kind() != Entry && kind() != Inline);
return profilingReturn_;
}
// Functions have offsets which allow patching to selectively execute
// profiling prologues/epilogues.
bool isFunction() const {
return kind() == Function;
}
uint32_t funcProfilingEntry() const {
MOZ_ASSERT(isFunction());
return begin();
}
uint32_t funcNonProfilingEntry() const {
MOZ_ASSERT(isFunction());
return begin_ + u.func.beginToEntry_;
}
uint32_t functionProfilingJump() const {
MOZ_ASSERT(isFunction());
return profilingReturn_ - u.func.profilingJumpToProfilingReturn_;
}
uint32_t funcProfilingEpilogue() const {
MOZ_ASSERT(isFunction());
return profilingReturn_ - u.func.profilingEpilogueToProfilingReturn_;
}
uint32_t funcNameIndex() const {
MOZ_ASSERT(isFunction());
return nameIndex_;
}
uint32_t funcLineNumber() const {
MOZ_ASSERT(isFunction());
return lineNumber_;
}
// A sorted array of CodeRanges can be looked up via BinarySearch and PC.
struct PC {
size_t offset;
explicit PC(size_t offset) : offset(offset) {}
bool operator==(const CodeRange& rhs) const {
return offset >= rhs.begin() && offset < rhs.end();
}
bool operator<(const CodeRange& rhs) const {
return offset < rhs.begin();
}
};
};
typedef Vector<CodeRange, 0, SystemAllocPolicy> CodeRangeVector;
// A CacheableChars is used to cacheably store UniqueChars in Module.
struct CacheableChars : public UniqueChars
{
explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs) : UniqueChars(Move(rhs)) {}
CacheableChars() = default;
CacheableChars(CacheableChars&& rhs) : UniqueChars(Move(rhs)) {}
void operator=(CacheableChars&& rhs) { UniqueChars& base = *this; base = Move(rhs); }
WASM_DECLARE_SERIALIZABLE(CacheableChars)
};
typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
// A UniqueCodePtr owns allocated executable code. Code passed to the Module
// constructor must be allocated via AllocateCode.
class CodeDeleter
{
uint32_t bytes_;
public:
explicit CodeDeleter(uint32_t bytes) : bytes_(bytes) {}
void operator()(uint8_t* p);
};
typedef JS::UniquePtr<uint8_t, CodeDeleter> UniqueCodePtr;
UniqueCodePtr
AllocateCode(ExclusiveContext* cx, size_t bytes);
// Module represents a compiled WebAssembly module which lives until the last
// reference to any exported functions is dropped. Modules must be wrapped by a
// rooted JSObject immediately after creation so that Module::trace() is called
// during GC. Modules are created after compilation completes and start in a
// a fully unlinked state. After creation, a module must be first statically
// linked and then dynamically linked:
//
// - Static linking patches code or global data that relies on absolute
// addresses. Static linking should happen after a module is serialized into
// a cache file so that the cached code is stored unlinked and ready to be
// statically linked after deserialization.
//
// - Dynamic linking patches code or global data that relies on the address of
// the heap and imports of a module. A module may only be dynamically linked
// once. However, a dynamically-linked module may be cloned so that the clone
// can be independently dynamically linked.
//
// Once fully dynamically linked, a module can have its exports invoked (via
// entryTrampoline). While executing, profiling may be enabled/disabled (when
// the Module is not active()) via setProfilingEnabled(). When profiling is
// enabled, a module's frames will be visible to wasm::ProfilingFrameIterator.
class Module
{
struct ImportExit {
void* code;
jit::BaselineScript* baselineScript;
HeapPtrFunction fun;
static_assert(sizeof(HeapPtrFunction) == sizeof(void*), "for JIT access");
};
struct FuncPtrTable {
uint32_t globalDataOffset;
uint32_t numElems;
explicit FuncPtrTable(const StaticLinkData::FuncPtrTable& table)
: globalDataOffset(table.globalDataOffset),
numElems(table.elemOffsets.length())
{}
};
typedef Vector<FuncPtrTable, 0, SystemAllocPolicy> FuncPtrTableVector;
typedef Vector<CacheableChars, 0, SystemAllocPolicy> FuncLabelVector;
typedef RelocatablePtrArrayBufferObjectMaybeShared BufferPtr;
// Initialized when constructed:
struct CacheablePod {
const uint32_t functionBytes_;
const uint32_t codeBytes_;
const uint32_t globalBytes_;
const bool usesHeap_;
const bool sharedHeap_;
const bool usesSignalHandlersForOOB_;
const bool usesSignalHandlersForInterrupt_;
} pod;
const UniqueCodePtr code_;
const ImportVector imports_;
const ExportVector exports_;
const HeapAccessVector heapAccesses_;
const CodeRangeVector codeRanges_;
const CallSiteVector callSites_;
const CacheableCharsVector funcNames_;
const CacheableChars filename_;
const bool loadedFromCache_;
// Initialized during staticallyLink:
bool staticallyLinked_;
uint8_t* interrupt_;
uint8_t* outOfBounds_;
FuncPtrTableVector funcPtrTables_;
// Initialized during dynamicallyLink:
bool dynamicallyLinked_;
BufferPtr maybeHeap_;
Module** prev_;
Module* next_;
// Mutated after dynamicallyLink:
bool profilingEnabled_;
FuncLabelVector funcLabels_;
bool interrupted_;
class AutoMutateCode;
uint32_t totalBytes() const;
uint8_t* rawHeapPtr() const;
uint8_t*& rawHeapPtr();
void specializeToHeap(ArrayBufferObjectMaybeShared* heap);
void despecializeFromHeap(ArrayBufferObjectMaybeShared* heap);
void sendCodeRangesToProfiler(JSContext* cx);
ImportExit& importToExit(const Import& import);
enum CacheBool { NotLoadedFromCache = false, LoadedFromCache = true };
enum ProfilingBool { ProfilingDisabled = false, ProfilingEnabled = true };
static CacheablePod zeroPod();
void init();
Module(const CacheablePod& pod,
UniqueCodePtr code,
ImportVector&& imports,
ExportVector&& exports,
HeapAccessVector&& heapAccesses,
CodeRangeVector&& codeRanges,
CallSiteVector&& callSites,
CacheableCharsVector&& funcNames,
CacheableChars filename,
CacheBool loadedFromCache,
ProfilingBool profilingEnabled,
FuncLabelVector&& funcLabels);
template <class> friend struct js::MallocProvider;
public:
static const unsigned SizeOfImportExit = sizeof(ImportExit);
static const unsigned OffsetOfImportExitFun = offsetof(ImportExit, fun);
enum HeapBool { DoesntUseHeap = false, UsesHeap = true };
enum SharedBool { UnsharedHeap = false, SharedHeap = true };
Module(CompileArgs args,
uint32_t functionBytes,
uint32_t codeBytes,
uint32_t globalBytes,
HeapBool usesHeap,
SharedBool sharedHeap,
UniqueCodePtr code,
ImportVector&& imports,
ExportVector&& exports,
HeapAccessVector&& heapAccesses,
CodeRangeVector&& codeRanges,
CallSiteVector&& callSites,
CacheableCharsVector&& funcNames,
CacheableChars filename);
~Module();
void trace(JSTracer* trc);
uint8_t* code() const { return code_.get(); }
uint8_t* globalData() const { return code() + pod.codeBytes_; }
uint32_t globalBytes() const { return pod.globalBytes_; }
bool usesHeap() const { return pod.usesHeap_; }
bool sharedHeap() const { return pod.sharedHeap_; }
CompileArgs compileArgs() const;
const ImportVector& imports() const { return imports_; }
const ExportVector& exports() const { return exports_; }
const char* functionName(uint32_t i) const { return funcNames_[i].get(); }
const char* filename() const { return filename_.get(); }
bool loadedFromCache() const { return loadedFromCache_; }
bool staticallyLinked() const { return staticallyLinked_; }
bool dynamicallyLinked() const { return dynamicallyLinked_; }
bool profilingEnabled() const { return profilingEnabled_; }
// The range [0, functionBytes) is a subrange of [0, codeBytes) that
// contains only function body code, not the stub code. This distinction is
// used by the async interrupt handler to only interrupt when the pc is in
// function code which, in turn, simplifies reasoning about how stubs
// enter/exit.
bool containsFunctionPC(void* pc) const;
bool containsCodePC(void* pc) const;
const CallSite* lookupCallSite(void* returnAddress) const;
const CodeRange* lookupCodeRange(void* pc) const;
const HeapAccess* lookupHeapAccess(void* pc) const;
// This function transitions the module from an unlinked state to a
// statically-linked state. The given StaticLinkData must have come from the
// compilation of this module.
bool staticallyLink(ExclusiveContext* cx, const StaticLinkData& linkData);
// This function transitions the module from a statically-linked state to a
// dynamically-linked state. If this module usesHeap(), a non-null heap
// buffer must be given. The given import vector must match the module's
// ImportVector.
bool dynamicallyLink(JSContext* cx, Handle<ArrayBufferObjectMaybeShared*> heap,
const AutoVectorRooter<JSFunction*>& imports);
Module* nextLinked() const;
// The wasm heap, established by dynamicallyLink.
ArrayBufferObjectMaybeShared* maybeBuffer() const;
SharedMem<uint8_t*> maybeHeap() const;
size_t heapLength() const;
// asm.js may detach and change the heap at any time. As an internal detail,
// the heap may not be changed while the module has been asynchronously
// interrupted.
bool hasDetachedHeap() const;
bool changeHeap(Handle<ArrayBufferObject*> newBuffer, JSContext* cx);
bool detachHeap(JSContext* cx);
void setInterrupted(bool interrupted);
// The exports of a wasm module are called by preparing an array of
// arguments (coerced to the corresponding types of the Export signature)
// and calling the export's entry trampoline. All such calls must be
// associated with a containing AsmJSActivation. The innermost
// AsmJSActivation must be maintained in the Module::activation field.
struct EntryArg {
uint64_t lo;
uint64_t hi;
};
typedef int32_t (*EntryFuncPtr)(EntryArg* args, uint8_t* global);
EntryFuncPtr entryTrampoline(const Export& func) const;
AsmJSActivation*& activation();
// Initially, calls to imports in wasm code call out through the generic
// callImport method. If the imported callee gets JIT compiled and the types
// match up, callImport will patch the code to instead call through a thunk
// directly into the JIT code. If the JIT code is released, the Module must
// be notified so it can go back to the generic callImport.
bool callImport(JSContext* cx, uint32_t importIndex, unsigned argc, const Value* argv,
MutableHandleValue rval);
void deoptimizeImportExit(uint32_t importIndex);
// At runtime, when $pc is in wasm function code (containsFunctionPC($pc)),
// $pc may be moved abruptly to interrupt() or outOfBounds() by a signal
// handler or SetContext() from another thread.
uint8_t* interrupt() const { MOZ_ASSERT(staticallyLinked_); return interrupt_; }
uint8_t* outOfBounds() const { MOZ_ASSERT(staticallyLinked_); return outOfBounds_; }
// When a module is inactive (no live activations), the profiling mode
// can be toggled. WebAssembly frames only show up in the
// ProfilingFrameIterator when profiling is enabled.
bool active() { return !!activation(); }
void setProfilingEnabled(bool enabled, JSContext* cx);
const char* profilingLabel(uint32_t funcIndex) const;
// See WASM_DECLARE_SERIALIZABLE.
size_t serializedSize() const;
uint8_t* serialize(uint8_t* cursor) const;
typedef UniquePtr<Module, JS::DeletePolicy<Module>> UniqueModule;
static const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor,
UniqueModule* out);
UniqueModule clone(JSContext* cx, const StaticLinkData& linkData) const;
void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode,
size_t* asmJSModuleData);
};
} // namespace js
} // namespace wasm
#endif // wasm_module_h

View File

@ -0,0 +1,350 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* Copyright 2015 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef wasm_serialize_h
#define wasm_serialize_h
#include "jit/MacroAssembler.h"
namespace js {
namespace wasm {
// Factor out common serialization, cloning and about:memory size-computation
// functions for reuse when serializing wasm and asm.js modules.
static inline uint8_t*
WriteBytes(uint8_t* dst, const void* src, size_t nbytes)
{
memcpy(dst, src, nbytes);
return dst + nbytes;
}
static inline const uint8_t*
ReadBytes(const uint8_t* src, void* dst, size_t nbytes)
{
memcpy(dst, src, nbytes);
return src + nbytes;
}
template <class T>
static inline uint8_t*
WriteScalar(uint8_t* dst, T t)
{
memcpy(dst, &t, sizeof(t));
return dst + sizeof(t);
}
template <class T>
static inline const uint8_t*
ReadScalar(const uint8_t* src, T* dst)
{
memcpy(dst, src, sizeof(*dst));
return src + sizeof(*dst);
}
static inline size_t
SerializedNameSize(PropertyName* name)
{
size_t s = sizeof(uint32_t);
if (name)
s += name->length() * (name->hasLatin1Chars() ? sizeof(Latin1Char) : sizeof(char16_t));
return s;
}
static inline uint8_t*
SerializeName(uint8_t* cursor, PropertyName* name)
{
MOZ_ASSERT_IF(name, !name->empty());
if (name) {
static_assert(JSString::MAX_LENGTH <= INT32_MAX, "String length must fit in 31 bits");
uint32_t length = name->length();
uint32_t lengthAndEncoding = (length << 1) | uint32_t(name->hasLatin1Chars());
cursor = WriteScalar<uint32_t>(cursor, lengthAndEncoding);
JS::AutoCheckCannotGC nogc;
if (name->hasLatin1Chars())
cursor = WriteBytes(cursor, name->latin1Chars(nogc), length * sizeof(Latin1Char));
else
cursor = WriteBytes(cursor, name->twoByteChars(nogc), length * sizeof(char16_t));
} else {
cursor = WriteScalar<uint32_t>(cursor, 0);
}
return cursor;
}
template <typename CharT>
static inline const uint8_t*
DeserializeChars(ExclusiveContext* cx, const uint8_t* cursor, size_t length, PropertyName** name)
{
Vector<CharT> tmp(cx);
CharT* src;
if ((size_t(cursor) & (sizeof(CharT) - 1)) != 0) {
// Align 'src' for AtomizeChars.
if (!tmp.resize(length))
return nullptr;
memcpy(tmp.begin(), cursor, length * sizeof(CharT));
src = tmp.begin();
} else {
src = (CharT*)cursor;
}
JSAtom* atom = AtomizeChars(cx, src, length);
if (!atom)
return nullptr;
*name = atom->asPropertyName();
return cursor + length * sizeof(CharT);
}
static inline const uint8_t*
DeserializeName(ExclusiveContext* cx, const uint8_t* cursor, PropertyName** name)
{
uint32_t lengthAndEncoding;
cursor = ReadScalar<uint32_t>(cursor, &lengthAndEncoding);
uint32_t length = lengthAndEncoding >> 1;
if (length == 0) {
*name = nullptr;
return cursor;
}
bool latin1 = lengthAndEncoding & 0x1;
return latin1
? DeserializeChars<Latin1Char>(cx, cursor, length, name)
: DeserializeChars<char16_t>(cx, cursor, length, name);
}
template <class T, size_t N>
static inline size_t
SerializedVectorSize(const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
{
size_t size = sizeof(uint32_t);
for (size_t i = 0; i < vec.length(); i++)
size += vec[i].serializedSize();
return size;
}
template <class T, size_t N>
static inline uint8_t*
SerializeVector(uint8_t* cursor, const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
{
cursor = WriteScalar<uint32_t>(cursor, vec.length());
for (size_t i = 0; i < vec.length(); i++)
cursor = vec[i].serialize(cursor);
return cursor;
}
template <class T, size_t N>
static inline const uint8_t*
DeserializeVector(ExclusiveContext* cx, const uint8_t* cursor,
mozilla::Vector<T, N, SystemAllocPolicy>* vec)
{
uint32_t length;
cursor = ReadScalar<uint32_t>(cursor, &length);
if (!vec->resize(length))
return nullptr;
for (size_t i = 0; i < vec->length(); i++) {
if (!(cursor = (*vec)[i].deserialize(cx, cursor)))
return nullptr;
}
return cursor;
}
template <class T, size_t N>
static inline bool
CloneVector(JSContext* cx, const mozilla::Vector<T, N, SystemAllocPolicy>& in,
mozilla::Vector<T, N, SystemAllocPolicy>* out)
{
if (!out->resize(in.length()))
return false;
for (size_t i = 0; i < in.length(); i++) {
if (!in[i].clone(cx, &(*out)[i]))
return false;
}
return true;
}
template <class T, size_t N>
static inline size_t
SizeOfVectorExcludingThis(const mozilla::Vector<T, N, SystemAllocPolicy>& vec,
MallocSizeOf mallocSizeOf)
{
size_t size = vec.sizeOfExcludingThis(mallocSizeOf);
for (const T& t : vec)
size += t.sizeOfExcludingThis(mallocSizeOf);
return size;
}
template <class T, size_t N>
static inline size_t
SerializedPodVectorSize(const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
{
return sizeof(uint32_t) +
vec.length() * sizeof(T);
}
template <class T, size_t N>
static inline uint8_t*
SerializePodVector(uint8_t* cursor, const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
{
cursor = WriteScalar<uint32_t>(cursor, vec.length());
cursor = WriteBytes(cursor, vec.begin(), vec.length() * sizeof(T));
return cursor;
}
template <class T, size_t N>
static inline const uint8_t*
DeserializePodVector(ExclusiveContext* cx, const uint8_t* cursor,
mozilla::Vector<T, N, SystemAllocPolicy>* vec)
{
uint32_t length;
cursor = ReadScalar<uint32_t>(cursor, &length);
if (!vec->resize(length))
return nullptr;
cursor = ReadBytes(cursor, vec->begin(), length * sizeof(T));
return cursor;
}
template <class T, size_t N>
static inline bool
ClonePodVector(JSContext* cx, const mozilla::Vector<T, N, SystemAllocPolicy>& in,
mozilla::Vector<T, N, SystemAllocPolicy>* out)
{
if (!out->resize(in.length()))
return false;
mozilla::PodCopy(out->begin(), in.begin(), in.length());
return true;
}
static inline bool
GetCPUID(uint32_t* cpuId)
{
enum Arch {
X86 = 0x1,
X64 = 0x2,
ARM = 0x3,
MIPS = 0x4,
MIPS64 = 0x5,
ARCH_BITS = 3
};
#if defined(JS_CODEGEN_X86)
MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
*cpuId = X86 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS);
return true;
#elif defined(JS_CODEGEN_X64)
MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
*cpuId = X64 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS);
return true;
#elif defined(JS_CODEGEN_ARM)
MOZ_ASSERT(jit::GetARMFlags() <= (UINT32_MAX >> ARCH_BITS));
*cpuId = ARM | (jit::GetARMFlags() << ARCH_BITS);
return true;
#elif defined(JS_CODEGEN_MIPS32)
MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
*cpuId = MIPS | (jit::GetMIPSFlags() << ARCH_BITS);
return true;
#elif defined(JS_CODEGEN_MIPS64)
MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
*cpuId = MIPS64 | (jit::GetMIPSFlags() << ARCH_BITS);
return true;
#else
return false;
#endif
}
class MachineId
{
uint32_t cpuId_;
JS::BuildIdCharVector buildId_;
public:
bool extractCurrentState(ExclusiveContext* cx) {
if (!cx->asmJSCacheOps().buildId)
return false;
if (!cx->asmJSCacheOps().buildId(&buildId_))
return false;
if (!GetCPUID(&cpuId_))
return false;
return true;
}
size_t serializedSize() const {
return sizeof(uint32_t) +
SerializedPodVectorSize(buildId_);
}
uint8_t* serialize(uint8_t* cursor) const {
cursor = WriteScalar<uint32_t>(cursor, cpuId_);
cursor = SerializePodVector(cursor, buildId_);
return cursor;
}
const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor) {
(cursor = ReadScalar<uint32_t>(cursor, &cpuId_)) &&
(cursor = DeserializePodVector(cx, cursor, &buildId_));
return cursor;
}
bool operator==(const MachineId& rhs) const {
return cpuId_ == rhs.cpuId_ &&
buildId_.length() == rhs.buildId_.length() &&
mozilla::PodEqual(buildId_.begin(), rhs.buildId_.begin(), buildId_.length());
}
bool operator!=(const MachineId& rhs) const {
return !(*this == rhs);
}
};
struct ScopedCacheEntryOpenedForWrite
{
ExclusiveContext* cx;
const size_t serializedSize;
uint8_t* memory;
intptr_t handle;
ScopedCacheEntryOpenedForWrite(ExclusiveContext* cx, size_t serializedSize)
: cx(cx), serializedSize(serializedSize), memory(nullptr), handle(-1)
{}
~ScopedCacheEntryOpenedForWrite() {
if (memory)
cx->asmJSCacheOps().closeEntryForWrite(serializedSize, memory, handle);
}
};
struct ScopedCacheEntryOpenedForRead
{
ExclusiveContext* cx;
size_t serializedSize;
const uint8_t* memory;
intptr_t handle;
explicit ScopedCacheEntryOpenedForRead(ExclusiveContext* cx)
: cx(cx), serializedSize(0), memory(nullptr), handle(0)
{}
~ScopedCacheEntryOpenedForRead() {
if (memory)
cx->asmJSCacheOps().closeEntryForRead(serializedSize, memory, handle);
}
};
} // namespace wasm
} // namespace js
#endif // wasm_serialize_h

View File

@ -16,18 +16,20 @@
* limitations under the License.
*/
#include "asmjs/AsmJSSignalHandlers.h"
#include "asmjs/WasmSignalHandlers.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/PodOperations.h"
#include "asmjs/AsmJSModule.h"
#include "asmjs/AsmJSValidate.h"
#include "jit/AtomicOperations.h"
#include "jit/Disassembler.h"
#include "vm/Runtime.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using JS::GenericNaN;
using mozilla::DebugOnly;
@ -600,12 +602,12 @@ ComputeAccessAddress(EMULATOR_CONTEXT* context, const Disassembler::ComplexAddre
MOZ_COLD static uint8_t*
EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
const HeapAccess* heapAccess, const AsmJSModule& module)
const HeapAccess* heapAccess, const Module& module)
{
MOZ_RELEASE_ASSERT(module.containsFunctionPC(pc));
MOZ_RELEASE_ASSERT(module.usesSignalHandlersForOOB());
MOZ_RELEASE_ASSERT(module.compileArgs().useSignalHandlersForOOB);
MOZ_RELEASE_ASSERT(!heapAccess->hasLengthCheck());
MOZ_RELEASE_ASSERT(heapAccess->insnOffset() == (pc - module.codeBase()));
MOZ_RELEASE_ASSERT(heapAccess->insnOffset() == (pc - module.code()));
// Disassemble the instruction which caused the trap so that we can extract
// information about it and decide what to do.
@ -704,7 +706,7 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
// load/store that we should handle.
if (heapAccess->throwOnOOB())
return module.outOfBoundsExit();
return module.outOfBounds();
switch (access.kind()) {
case Disassembler::HeapAccess::Load:
@ -755,7 +757,7 @@ HandleFault(PEXCEPTION_POINTERS exception)
if (!activation)
return false;
const AsmJSModule& module = activation->module();
const Module& module = activation->module().wasm();
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
@ -772,18 +774,14 @@ HandleFault(PEXCEPTION_POINTERS exception)
// between a faulting heap access and the handling of the fault due
// to InterruptRunningCode's use of SuspendThread. When this happens,
// after ResumeThread, the exception handler is called with pc equal to
// module.interruptExit, which is logically wrong. The Right Thing would
// module.interrupt, which is logically wrong. The Right Thing would
// be for the OS to make fault-handling atomic (so that CONTEXT.pc was
// always the logically-faulting pc). Fortunately, we can detect this
// case and silence the exception ourselves (the exception will
// retrigger after the interrupt jumps back to resumePC).
if (pc == module.interruptExit() &&
module.containsFunctionPC(activation->resumePC()) &&
module.lookupHeapAccess(activation->resumePC()))
{
return true;
}
return false;
return pc == module.interrupt() &&
module.containsFunctionPC(activation->resumePC()) &&
module.lookupHeapAccess(activation->resumePC());
}
const HeapAccess* heapAccess = module.lookupHeapAccess(pc);
@ -902,7 +900,7 @@ HandleMachException(JSRuntime* rt, const ExceptionRequest& request)
if (!activation)
return false;
const AsmJSModule& module = activation->module();
const Module& module = activation->module().wasm();
if (!module.containsFunctionPC(pc))
return false;
@ -939,11 +937,11 @@ static const mach_msg_id_t sExceptionId = 2405;
// The choice of id here is arbitrary, the only constraint is that sQuitId != sExceptionId.
static const mach_msg_id_t sQuitId = 42;
void
AsmJSMachExceptionHandlerThread(void* threadArg)
static void
MachExceptionHandlerThread(void* threadArg)
{
JSRuntime* rt = reinterpret_cast<JSRuntime*>(threadArg);
mach_port_t port = rt->asmJSMachExceptionHandler.port();
mach_port_t port = rt->wasmMachExceptionHandler.port();
kern_return_t kret;
while(true) {
@ -954,7 +952,7 @@ AsmJSMachExceptionHandlerThread(void* threadArg)
// If we fail even receiving the message, we can't even send a reply!
// Rather than hanging the faulting thread (hanging the browser), crash.
if (kret != KERN_SUCCESS) {
fprintf(stderr, "AsmJSMachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret);
fprintf(stderr, "MachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret);
MOZ_CRASH();
}
@ -992,14 +990,14 @@ AsmJSMachExceptionHandlerThread(void* threadArg)
}
}
AsmJSMachExceptionHandler::AsmJSMachExceptionHandler()
MachExceptionHandler::MachExceptionHandler()
: installed_(false),
thread_(nullptr),
port_(MACH_PORT_NULL)
{}
void
AsmJSMachExceptionHandler::uninstall()
MachExceptionHandler::uninstall()
{
if (installed_) {
thread_port_t thread = mach_thread_self();
@ -1025,7 +1023,7 @@ AsmJSMachExceptionHandler::uninstall()
kern_return_t kret = mach_msg(&msg, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL,
MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
if (kret != KERN_SUCCESS) {
fprintf(stderr, "AsmJSMachExceptionHandler: failed to send quit message: %d\n", (int)kret);
fprintf(stderr, "MachExceptionHandler: failed to send quit message: %d\n", (int)kret);
MOZ_CRASH();
}
@ -1041,7 +1039,7 @@ AsmJSMachExceptionHandler::uninstall()
}
bool
AsmJSMachExceptionHandler::install(JSRuntime* rt)
MachExceptionHandler::install(JSRuntime* rt)
{
MOZ_ASSERT(!installed());
kern_return_t kret;
@ -1056,7 +1054,7 @@ AsmJSMachExceptionHandler::install(JSRuntime* rt)
goto error;
// Create a thread to block on reading port_.
thread_ = PR_CreateThread(PR_USER_THREAD, AsmJSMachExceptionHandlerThread, rt,
thread_ = PR_CreateThread(PR_USER_THREAD, MachExceptionHandlerThread, rt,
PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 0);
if (!thread_)
goto error;
@ -1112,7 +1110,7 @@ HandleFault(int signum, siginfo_t* info, void* ctx)
if (!activation)
return false;
const AsmJSModule& module = activation->module();
const Module& module = activation->module().wasm();
if (!module.containsFunctionPC(pc))
return false;
@ -1185,18 +1183,18 @@ RedirectJitCodeToInterruptCheck(JSRuntime* rt, CONTEXT* context)
RedirectIonBackedgesToInterruptCheck(rt);
if (AsmJSActivation* activation = rt->asmJSActivationStack()) {
const AsmJSModule& module = activation->module();
const Module& module = activation->module().wasm();
#ifdef JS_SIMULATOR
if (module.containsFunctionPC(rt->simulator()->get_pc_as<void*>()))
rt->simulator()->set_resume_pc(module.interruptExit());
rt->simulator()->set_resume_pc(module.interrupt());
#endif
uint8_t** ppc = ContextToPC(context);
uint8_t* pc = *ppc;
if (module.containsFunctionPC(pc)) {
activation->setResumePC(pc);
*ppc = module.interruptExit();
*ppc = module.interrupt();
return true;
}
}
@ -1223,11 +1221,11 @@ JitInterruptHandler(int signum, siginfo_t* info, void* context)
#endif
bool
js::EnsureSignalHandlersInstalled(JSRuntime* rt)
wasm::EnsureSignalHandlersInstalled(JSRuntime* rt)
{
#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
// On OSX, each JSRuntime gets its own handler thread.
if (!rt->asmJSMachExceptionHandler.installed() && !rt->asmJSMachExceptionHandler.install(rt))
if (!rt->wasmMachExceptionHandler.installed() && !rt->wasmMachExceptionHandler.install(rt))
return false;
#endif

View File

@ -16,8 +16,8 @@
* limitations under the License.
*/
#ifndef asmjs_AsmJSSignalHandlers_h
#define asmjs_AsmJSSignalHandlers_h
#ifndef wasm_signal_handlers_h
#define wasm_signal_handlers_h
#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
# include <mach/mach.h>
@ -28,6 +28,12 @@ struct JSRuntime;
namespace js {
// Force any currently-executing asm.js/ion code to call HandleExecutionInterrupt.
extern void
InterruptRunningJitCode(JSRuntime* rt);
namespace wasm {
// Set up any signal/exception handlers needed to execute code in the given
// runtime. Return whether runtime can:
// - rely on fault handler support for avoiding asm.js heap bounds checks
@ -35,10 +41,6 @@ namespace js {
bool
EnsureSignalHandlersInstalled(JSRuntime* rt);
// Force any currently-executing asm.js code to call HandleExecutionInterrupt.
extern void
InterruptRunningJitCode(JSRuntime* rt);
#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
// On OSX we are forced to use the lower-level Mach exception mechanism instead
// of Unix signals. Mach exceptions are not handled on the victim's stack but
@ -46,7 +48,7 @@ InterruptRunningJitCode(JSRuntime* rt);
// per JSRuntime (upon the first use of asm.js in the JSRuntime). This thread
// and related resources are owned by AsmJSMachExceptionHandler which is owned
// by JSRuntime.
class AsmJSMachExceptionHandler
class MachExceptionHandler
{
bool installed_;
PRThread* thread_;
@ -55,14 +57,15 @@ class AsmJSMachExceptionHandler
void uninstall();
public:
AsmJSMachExceptionHandler();
~AsmJSMachExceptionHandler() { uninstall(); }
MachExceptionHandler();
~MachExceptionHandler() { uninstall(); }
mach_port_t port() const { return port_; }
bool installed() const { return installed_; }
bool install(JSRuntime* rt);
};
#endif
} // namespace wasm
} // namespace js
#endif // asmjs_AsmJSSignalHandlers_h
#endif // wasm_signal_handlers_h

View File

@ -21,8 +21,6 @@
#include "mozilla/ArrayUtils.h"
#include "mozilla/EnumeratedRange.h"
#include "asmjs/AsmJSModule.h"
#include "jit/MacroAssembler-inl.h"
using namespace js;
@ -97,20 +95,18 @@ static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * siz
static const unsigned FramePushedForEntrySP = FramePushedAfterSave + sizeof(void*);
// Generate a stub that enters wasm from a C++ caller via the native ABI.
// The signature of the entry point is AsmJSModule::CodePtr. The exported wasm
// The signature of the entry point is Module::CodePtr. The exported wasm
// function has an ABI derived from its specific signature, so this function
// must map from the ABI of CodePtr to the export's signature's ABI.
static bool
GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex,
const FuncOffsetVector& funcOffsets)
GenerateEntry(ModuleGenerator& mg, unsigned exportIndex, Module::HeapBool usesHeap)
{
AsmJSModule::ExportedFunction& exp = module.exportedFunction(exportIndex);
if (exp.isChangeHeap())
return true;
MacroAssembler& masm = mg.masm();
const MallocSig& sig = mg.exportSig(exportIndex);
masm.haltingAlign(CodeAlignment);
AsmJSOffsets offsets;
Offsets offsets;
offsets.begin = masm.currentOffset();
// Save the return address if it wasn't already saved by the call insn.
@ -139,7 +135,8 @@ GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex,
// ARM, MIPS/MIPS64 and x64 have a globally-pinned HeapReg (x86 uses immediates in
// effective addresses). Loading the heap register depends on the global
// register already having been loaded.
masm.loadAsmJSHeapRegisterFromGlobalData();
if (usesHeap)
masm.loadAsmJSHeapRegisterFromGlobalData();
// Put the 'argv' argument into a non-argument/return register so that we
// can use 'argv' while we fill in the arguments for the asm.js callee.
@ -168,12 +165,12 @@ GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex,
masm.andToStackPtr(Imm32(~(AsmJSStackAlignment - 1)));
// Bump the stack for the call.
masm.reserveStack(AlignBytes(StackArgBytes(exp.sig().args()), AsmJSStackAlignment));
masm.reserveStack(AlignBytes(StackArgBytes(sig.args()), AsmJSStackAlignment));
// Copy parameters out of argv and into the registers/stack-slots specified by
// the system ABI.
for (ABIArgValTypeIter iter(exp.sig().args()); !iter.done(); iter++) {
unsigned argOffset = iter.index() * sizeof(AsmJSModule::EntryArg);
for (ABIArgValTypeIter iter(sig.args()); !iter.done(); iter++) {
unsigned argOffset = iter.index() * sizeof(Module::EntryArg);
Address src(argv, argOffset);
MIRType type = iter.mirType();
switch (iter->kind()) {
@ -186,7 +183,7 @@ GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex,
break;
#endif
case ABIArg::FPU: {
static_assert(sizeof(AsmJSModule::EntryArg) >= jit::Simd128DataSize,
static_assert(sizeof(Module::EntryArg) >= jit::Simd128DataSize,
"EntryArg must be big enough to store SIMD values");
switch (type) {
case MIRType_Int32x4:
@ -243,7 +240,7 @@ GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex,
// Call into the real function.
masm.assertStackAlignment(AsmJSStackAlignment);
Label target;
target.bind(funcOffsets[exp.funcIndex()]);
target.bind(mg.funcEntryOffsets()[mg.exportFuncIndex(exportIndex)]);
masm.call(CallSiteDesc(CallSiteDesc::Relative), &target);
// Recover the stack pointer value before dynamic alignment.
@ -255,7 +252,7 @@ GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex,
masm.Pop(argv);
// Store the return value in argv[0]
switch (exp.sig().ret()) {
switch (sig.ret()) {
case ExprType::Void:
break;
case ExprType::I32:
@ -291,117 +288,8 @@ GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex,
if (masm.oom())
return false;
exp.initCodeOffset(offsets.begin);
offsets.end = masm.currentOffset();
return module.addCodeRange(AsmJSModule::CodeRange::Entry, offsets);
}
// Generate a thunk that updates fp before calling the given builtin so that
// both the builtin and the calling function show up in profiler stacks. (This
// thunk is dynamically patched in when profiling is enabled.) Since the thunk
// pushes an AsmJSFrame on the stack, that means we must rebuild the stack
// frame. Fortunately, these are low arity functions and everything is passed in
// regs on everything but x86 anyhow.
//
// NB: Since this thunk is being injected at system ABI callsites, it must
// preserve the argument registers (going in) and the return register
// (coming out) and preserve non-volatile registers.
static bool
GenerateBuiltinThunk(MacroAssembler& masm, AsmJSModule& module, Builtin builtin)
{
MIRTypeVector args;
switch (builtin) {
case Builtin::ToInt32:
MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
break;
#if defined(JS_CODEGEN_ARM)
case Builtin::aeabi_idivmod:
case Builtin::aeabi_uidivmod:
MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
break;
case Builtin::AtomicCmpXchg:
MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
break;
case Builtin::AtomicXchg:
case Builtin::AtomicFetchAdd:
case Builtin::AtomicFetchSub:
case Builtin::AtomicFetchAnd:
case Builtin::AtomicFetchOr:
case Builtin::AtomicFetchXor:
MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
break;
#endif
case Builtin::SinD:
case Builtin::CosD:
case Builtin::TanD:
case Builtin::ASinD:
case Builtin::ACosD:
case Builtin::ATanD:
case Builtin::CeilD:
case Builtin::FloorD:
case Builtin::ExpD:
case Builtin::LogD:
MOZ_ALWAYS_TRUE(args.append(MIRType_Double));
break;
case Builtin::ModD:
case Builtin::PowD:
case Builtin::ATan2D:
MOZ_ALWAYS_TRUE(args.append(MIRType_Double));
MOZ_ALWAYS_TRUE(args.append(MIRType_Double));
break;
case Builtin::CeilF:
case Builtin::FloorF:
MOZ_ALWAYS_TRUE(args.append(MIRType_Float32));
break;
case Builtin::Limit:
MOZ_CRASH("Bad builtin");
}
MOZ_ASSERT(args.length() <= 4);
static_assert(MIRTypeVector::InlineLength >= 4, "infallibility of append");
MOZ_ASSERT(masm.framePushed() == 0);
uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, args);
AsmJSProfilingOffsets offsets;
GenerateAsmJSExitPrologue(masm, framePushed, ExitReason(builtin), &offsets);
for (ABIArgMIRTypeIter i(args); !i.done(); i++) {
if (i->kind() != ABIArg::Stack)
continue;
#if !defined(JS_CODEGEN_ARM)
unsigned offsetToCallerStackArgs = sizeof(AsmJSFrame) + masm.framePushed();
Address srcAddr(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
Address dstAddr(masm.getStackPointer(), i->offsetFromArgBase());
if (i.mirType() == MIRType_Int32 || i.mirType() == MIRType_Float32) {
masm.load32(srcAddr, ABIArgGenerator::NonArg_VolatileReg);
masm.store32(ABIArgGenerator::NonArg_VolatileReg, dstAddr);
} else {
MOZ_ASSERT(i.mirType() == MIRType_Double);
masm.loadDouble(srcAddr, ScratchDoubleReg);
masm.storeDouble(ScratchDoubleReg, dstAddr);
}
#else
MOZ_CRASH("Architecture should have enough registers for all builtin calls");
#endif
}
AssertStackAlignment(masm, ABIStackAlignment);
masm.call(BuiltinToImmediate(builtin));
GenerateAsmJSExitEpilogue(masm, framePushed, ExitReason(builtin), &offsets);
if (masm.oom())
return false;
offsets.end = masm.currentOffset();
return module.addBuiltinThunkCodeRange(builtin, offsets);
return mg.defineExport(exportIndex, offsets);
}
static void
@ -444,17 +332,13 @@ FillArgumentArray(MacroAssembler& masm, const MallocSig::ArgVector& args, unsign
}
}
// If an FFI detaches its heap (viz., via ArrayBuffer.transfer), it must
// If an import call detaches its heap (viz., via ArrayBuffer.transfer), it must
// call change-heap to another heap (viz., the new heap returned by transfer)
// before returning to asm.js code. If the application fails to do this (if the
// heap pointer is null), jump to a stub.
static void
CheckForHeapDetachment(MacroAssembler& masm, const AsmJSModule& module, Register scratch,
Label* onDetached)
CheckForHeapDetachment(MacroAssembler& masm, Register scratch, Label* onDetached)
{
if (!module.hasArrayView())
return;
MOZ_ASSERT(int(masm.framePushed()) >= int(ShadowStackSpace));
AssertStackAlignment(masm, ABIStackAlignment);
#if defined(JS_CODEGEN_X86)
@ -467,18 +351,19 @@ CheckForHeapDetachment(MacroAssembler& masm, const AsmJSModule& module, Register
}
// Generate a stub that is called via the internal ABI derived from the
// signature of the exit and calls into an appropriate InvokeFromAsmJS_* C++
// signature of the import and calls into an appropriate InvokeImport C++
// function, having boxed all the ABI arguments into a homogeneous Value array.
static bool
GenerateInterpExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
Label* throwLabel, Label* onDetached)
GenerateInterpExitStub(ModuleGenerator& mg, unsigned importIndex, Module::HeapBool usesHeap,
Label* throwLabel, Label* onDetached, ProfilingOffsets* offsets)
{
AsmJSModule::Exit& exit = module.exit(exitIndex);
MacroAssembler& masm = mg.masm();
const MallocSig& sig = mg.importSig(importIndex);
masm.setFramePushed(0);
// Argument types for InvokeFromAsmJS_*:
static const MIRType typeArray[] = { MIRType_Pointer, // exitDatum
// Argument types for InvokeImport_*:
static const MIRType typeArray[] = { MIRType_Pointer, // ImportExit
MIRType_Int32, // argc
MIRType_Pointer }; // argv
MIRTypeVector invokeArgTypes;
@ -489,29 +374,28 @@ GenerateInterpExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex
// The padding between stack args and argv ensures that argv is aligned. The
// padding between argv and retaddr ensures that sp is aligned.
unsigned argOffset = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
unsigned argBytes = Max<size_t>(1, exit.sig().args().length()) * sizeof(Value);
unsigned argBytes = Max<size_t>(1, sig.args().length()) * sizeof(Value);
unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);
AsmJSProfilingOffsets offsets;
GenerateAsmJSExitPrologue(masm, framePushed, ExitReason::Slow, &offsets);
GenerateExitPrologue(masm, framePushed, ExitReason::ImportInterp, offsets);
// Fill the argument array.
unsigned offsetToCallerStackArgs = sizeof(AsmJSFrame) + masm.framePushed();
Register scratch = ABIArgGenerator::NonArgReturnReg0;
FillArgumentArray(masm, exit.sig().args(), argOffset, offsetToCallerStackArgs, scratch);
FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch);
// Prepare the arguments for the call to InvokeFromAsmJS_*.
// Prepare the arguments for the call to InvokeImport_*.
ABIArgMIRTypeIter i(invokeArgTypes);
// argument 0: exitIndex
// argument 0: importIndex
if (i->kind() == ABIArg::GPR)
masm.mov(ImmWord(exitIndex), i->gpr());
masm.mov(ImmWord(importIndex), i->gpr());
else
masm.store32(Imm32(exitIndex), Address(masm.getStackPointer(), i->offsetFromArgBase()));
masm.store32(Imm32(importIndex), Address(masm.getStackPointer(), i->offsetFromArgBase()));
i++;
// argument 1: argc
unsigned argc = exit.sig().args().length();
unsigned argc = sig.args().length();
if (i->kind() == ABIArg::GPR)
masm.mov(ImmWord(argc), i->gpr());
else
@ -531,13 +415,13 @@ GenerateInterpExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex
// Make the call, test whether it succeeded, and extract the return value.
AssertStackAlignment(masm, ABIStackAlignment);
switch (exit.sig().ret()) {
switch (sig.ret()) {
case ExprType::Void:
masm.call(SymbolicAddress::InvokeFromAsmJS_Ignore);
masm.call(SymbolicAddress::InvokeImport_Void);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
break;
case ExprType::I32:
masm.call(SymbolicAddress::InvokeFromAsmJS_ToInt32);
masm.call(SymbolicAddress::InvokeImport_I32);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.unboxInt32(argv, ReturnReg);
break;
@ -546,7 +430,7 @@ GenerateInterpExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex
case ExprType::F32:
MOZ_CRASH("Float32 shouldn't be returned from a FFI");
case ExprType::F64:
masm.call(SymbolicAddress::InvokeFromAsmJS_ToNumber);
masm.call(SymbolicAddress::InvokeImport_F64);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.loadDouble(argv, ReturnDoubleReg);
break;
@ -558,17 +442,18 @@ GenerateInterpExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex
// The heap pointer may have changed during the FFI, so reload it and test
// for detachment.
masm.loadAsmJSHeapRegisterFromGlobalData();
CheckForHeapDetachment(masm, module, ABIArgGenerator::NonReturn_VolatileReg0, onDetached);
if (usesHeap) {
masm.loadAsmJSHeapRegisterFromGlobalData();
CheckForHeapDetachment(masm, ABIArgGenerator::NonReturn_VolatileReg0, onDetached);
}
GenerateAsmJSExitEpilogue(masm, framePushed, ExitReason::Slow, &offsets);
GenerateExitEpilogue(masm, framePushed, ExitReason::ImportInterp, offsets);
if (masm.oom())
return false;
offsets.end = masm.currentOffset();
exit.initInterpOffset(offsets.begin);
return module.addCodeRange(AsmJSModule::CodeRange::SlowFFI, offsets);
offsets->end = masm.currentOffset();
return true;
}
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
@ -578,34 +463,35 @@ static const unsigned MaybeSavedGlobalReg = 0;
#endif
// Generate a stub that is called via the internal ABI derived from the
// signature of the exit and calls into a compatible Ion-compiled JIT function,
// having boxed all the ABI arguments into the Ion stack frame layout.
// signature of the import and calls into a compatible JIT function,
// having boxed all the ABI arguments into the JIT stack frame layout.
static bool
GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
Label* throwLabel, Label* onDetached)
GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, Module::HeapBool usesHeap,
Label* throwLabel, Label* onDetached, ProfilingOffsets* offsets)
{
AsmJSModule::Exit& exit = module.exit(exitIndex);
MacroAssembler& masm = mg.masm();
const MallocSig& sig = mg.importSig(importIndex);
masm.setFramePushed(0);
// Ion calls use the following stack layout (sp grows to the left):
// JIT calls use the following stack layout (sp grows to the left):
// | retaddr | descriptor | callee | argc | this | arg1..N |
// After the Ion frame, the global register (if present) is saved since Ion
// does not preserve non-volatile regs. Also, unlike most ABIs, Ion requires
// that sp be JitStackAlignment-aligned *after* pushing the return address.
// After the JIT frame, the global register (if present) is saved since the
// JIT's ABI does not preserve non-volatile regs. Also, unlike most ABIs,
// the JIT ABI requires that sp be JitStackAlignment-aligned *after* pushing
// the return address.
static_assert(AsmJSStackAlignment >= JitStackAlignment, "subsumes");
unsigned sizeOfRetAddr = sizeof(void*);
unsigned ionFrameBytes = 3 * sizeof(void*) + (1 + exit.sig().args().length()) * sizeof(Value);
unsigned totalIonBytes = sizeOfRetAddr + ionFrameBytes + MaybeSavedGlobalReg;
unsigned ionFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalIonBytes) -
unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + sig.args().length()) * sizeof(Value);
unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes + MaybeSavedGlobalReg;
unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) -
sizeOfRetAddr;
AsmJSProfilingOffsets offsets;
GenerateAsmJSExitPrologue(masm, ionFramePushed, ExitReason::Jit, &offsets);
GenerateExitPrologue(masm, jitFramePushed, ExitReason::ImportJit, offsets);
// 1. Descriptor
size_t argOffset = 0;
uint32_t descriptor = MakeFrameDescriptor(ionFramePushed, JitFrame_Entry);
uint32_t descriptor = MakeFrameDescriptor(jitFramePushed, JitFrame_Entry);
masm.storePtr(ImmWord(uintptr_t(descriptor)), Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(size_t);
@ -614,17 +500,18 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
Register scratch = ABIArgGenerator::NonArgReturnReg1; // repeatedly clobbered
// 2.1. Get ExitDatum
unsigned globalDataOffset = module.exit(exitIndex).globalDataOffset();
unsigned globalDataOffset = mg.importExitGlobalDataOffset(importIndex);
#if defined(JS_CODEGEN_X64)
masm.append(AsmJSGlobalAccess(masm.leaRipRelative(callee), globalDataOffset));
#elif defined(JS_CODEGEN_X86)
masm.append(AsmJSGlobalAccess(masm.movlWithPatch(Imm32(0), callee), globalDataOffset));
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
masm.computeEffectiveAddress(Address(GlobalReg, globalDataOffset - AsmJSGlobalRegBias), callee);
#endif
// 2.2. Get callee
masm.loadPtr(Address(callee, offsetof(AsmJSModule::ExitDatum, fun)), callee);
masm.loadPtr(Address(callee, Module::OffsetOfImportExitFun), callee);
// 2.3. Save callee
masm.storePtr(callee, Address(masm.getStackPointer(), argOffset));
@ -635,7 +522,7 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
masm.loadBaselineOrIonNoArgCheck(callee, callee, nullptr);
// 3. Argc
unsigned argc = exit.sig().args().length();
unsigned argc = sig.args().length();
masm.storePtr(ImmWord(uintptr_t(argc)), Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(size_t);
@ -644,10 +531,10 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
argOffset += sizeof(Value);
// 5. Fill the arguments
unsigned offsetToCallerStackArgs = ionFramePushed + sizeof(AsmJSFrame);
FillArgumentArray(masm, exit.sig().args(), argOffset, offsetToCallerStackArgs, scratch);
argOffset += exit.sig().args().length() * sizeof(Value);
MOZ_ASSERT(argOffset == ionFrameBytes);
unsigned offsetToCallerStackArgs = jitFramePushed + sizeof(AsmJSFrame);
FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch);
argOffset += sig.args().length() * sizeof(Value);
MOZ_ASSERT(argOffset == jitFrameBytes);
// 6. Jit code will clobber all registers, even non-volatiles. GlobalReg and
// HeapReg are removed from the general register set for asm.js code, so
@ -657,7 +544,7 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
// heap may change during the FFI call.
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
static_assert(MaybeSavedGlobalReg == sizeof(void*), "stack frame accounting");
masm.storePtr(GlobalReg, Address(masm.getStackPointer(), ionFrameBytes));
masm.storePtr(GlobalReg, Address(masm.getStackPointer(), jitFrameBytes));
#endif
{
@ -770,13 +657,13 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
masm.storePtr(reg2, Address(reg0, offsetOfJitActivation));
}
// Reload the global register since Ion code can clobber any register.
// Reload the global register since JIT code can clobber any register.
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
static_assert(MaybeSavedGlobalReg == sizeof(void*), "stack frame accounting");
masm.loadPtr(Address(masm.getStackPointer(), ionFrameBytes), GlobalReg);
masm.loadPtr(Address(masm.getStackPointer(), jitFrameBytes), GlobalReg);
#endif
// As explained above, the frame was aligned for Ion such that
// As explained above, the frame was aligned for the JIT ABI such that
// (sp + sizeof(void*)) % JitStackAlignment == 0
// But now we possibly want to call one of several different C++ functions,
// so subtract the sizeof(void*) so that sp is aligned for an ABI call.
@ -788,7 +675,7 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
masm.branchTestMagic(Assembler::Equal, JSReturnOperand, throwLabel);
Label oolConvert;
switch (exit.sig().ret()) {
switch (sig.ret()) {
case ExprType::Void:
break;
case ExprType::I32:
@ -798,25 +685,27 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
case ExprType::I64:
MOZ_CRASH("no int64 in asm.js");
case ExprType::F32:
MOZ_CRASH("Float shouldn't be returned from a FFI");
MOZ_CRASH("Float shouldn't be returned from an import");
case ExprType::F64:
masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert);
break;
case ExprType::I32x4:
case ExprType::F32x4:
case ExprType::B32x4:
MOZ_CRASH("SIMD types shouldn't be returned from a FFI");
MOZ_CRASH("SIMD types shouldn't be returned from an import");
}
Label done;
masm.bind(&done);
// The heap pointer has to be reloaded anyway since Ion could have clobbered
// it. Additionally, the FFI may have detached the heap buffer.
masm.loadAsmJSHeapRegisterFromGlobalData();
CheckForHeapDetachment(masm, module, ABIArgGenerator::NonReturn_VolatileReg0, onDetached);
// The heap pointer has to be reloaded anyway since JIT code could have
// clobbered it. Additionally, the import may have detached the heap buffer.
if (usesHeap) {
masm.loadAsmJSHeapRegisterFromGlobalData();
CheckForHeapDetachment(masm, ABIArgGenerator::NonReturn_VolatileReg0, onDetached);
}
GenerateAsmJSExitEpilogue(masm, masm.framePushed(), ExitReason::Jit, &offsets);
GenerateExitEpilogue(masm, masm.framePushed(), ExitReason::ImportJit, offsets);
if (oolConvert.used()) {
masm.bind(&oolConvert);
@ -847,7 +736,7 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
// Call coercion function
AssertStackAlignment(masm, ABIStackAlignment);
switch (exit.sig().ret()) {
switch (sig.ret()) {
case ExprType::I32:
masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
@ -871,9 +760,8 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
if (masm.oom())
return false;
offsets.end = masm.currentOffset();
exit.initJitOffset(offsets.begin);
return module.addCodeRange(AsmJSModule::CodeRange::JitFFI, offsets);
offsets->end = masm.currentOffset();
return true;
}
// Generate a stub that is called when returning from an exit where the module's
@ -881,11 +769,12 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex,
// exception and then jumps to the generic throw stub to pop everything off the
// stack.
static bool
GenerateOnDetachedExit(MacroAssembler& masm, AsmJSModule& module, Label* onDetached,
Label* throwLabel)
GenerateOnDetachedStub(ModuleGenerator& mg, Label* onDetached, Label* throwLabel)
{
MacroAssembler& masm = mg.masm();
masm.haltingAlign(CodeAlignment);
AsmJSOffsets offsets;
Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(onDetached);
@ -898,17 +787,19 @@ GenerateOnDetachedExit(MacroAssembler& masm, AsmJSModule& module, Label* onDetac
return false;
offsets.end = masm.currentOffset();
return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
return mg.defineInlineStub(offsets);
}
// Generate a stub that is called immediately after the prologue when there is a
// stack overflow. This stub calls a C++ function to report the error and then
// jumps to the throw stub to pop the activation.
static bool
GenerateStackOverflowExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
GenerateStackOverflowStub(ModuleGenerator& mg, Label* throwLabel)
{
MacroAssembler& masm = mg.masm();
masm.haltingAlign(CodeAlignment);
AsmJSOffsets offsets;
Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(masm.asmStackOverflowLabel());
@ -935,43 +826,47 @@ GenerateStackOverflowExit(MacroAssembler& masm, AsmJSModule& module, Label* thro
return false;
offsets.end = masm.currentOffset();
return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
return mg.defineInlineStub(offsets);
}
// Generate a stub that is called from the synchronous, inline interrupt checks
// when the interrupt flag is set. This stub calls the C++ function to handle
// the interrupt which returns whether execution has been interrupted.
static bool
GenerateSyncInterruptExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
GenerateSyncInterruptStub(ModuleGenerator& mg, Label* throwLabel)
{
MacroAssembler& masm = mg.masm();
masm.setFramePushed(0);
unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, ShadowStackSpace);
AsmJSProfilingOffsets offsets;
GenerateAsmJSExitPrologue(masm, framePushed, ExitReason::Interrupt, &offsets,
masm.asmSyncInterruptLabel());
ProfilingOffsets offsets;
GenerateExitPrologue(masm, framePushed, ExitReason::Native, &offsets,
masm.asmSyncInterruptLabel());
AssertStackAlignment(masm, ABIStackAlignment);
masm.call(SymbolicAddress::HandleExecutionInterrupt);
masm.branchIfFalseBool(ReturnReg, throwLabel);
GenerateAsmJSExitEpilogue(masm, framePushed, ExitReason::Interrupt, &offsets);
GenerateExitEpilogue(masm, framePushed, ExitReason::Native, &offsets);
if (masm.oom())
return false;
offsets.end = masm.currentOffset();
return module.addCodeRange(AsmJSModule::CodeRange::Interrupt, offsets);
return mg.defineSyncInterruptStub(offsets);
}
// Generate a stub that is jumped to from an out-of-bounds heap access when
// there are throwing semantics. This stub calls a C++ function to report an
// error and then jumps to the throw stub to pop the activation.
static bool
GenerateConversionErrorExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
GenerateConversionErrorStub(ModuleGenerator& mg, Label* throwLabel)
{
MacroAssembler& masm = mg.masm();
masm.haltingAlign(CodeAlignment);
AsmJSOffsets offsets;
Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(masm.asmOnConversionErrorLabel());
@ -979,7 +874,7 @@ GenerateConversionErrorExit(MacroAssembler& masm, AsmJSModule& module, Label* th
// into C++. We unconditionally jump to throw so don't worry about restoring sp.
masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
// OnOutOfBounds always throws.
// OnImpreciseConversion always throws.
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::OnImpreciseConversion);
masm.jump(throwLabel);
@ -988,18 +883,19 @@ GenerateConversionErrorExit(MacroAssembler& masm, AsmJSModule& module, Label* th
return false;
offsets.end = masm.currentOffset();
module.setOnOutOfBoundsExitOffset(offsets.begin);
return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
return mg.defineInlineStub(offsets);
}
// Generate a stub that is jumped to from an out-of-bounds heap access when
// there are throwing semantics. This stub calls a C++ function to report an
// error and then jumps to the throw stub to pop the activation.
static bool
GenerateOutOfBoundsExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
GenerateOutOfBoundsStub(ModuleGenerator& mg, Label* throwLabel)
{
MacroAssembler& masm = mg.masm();
masm.haltingAlign(CodeAlignment);
AsmJSOffsets offsets;
Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(masm.asmOnOutOfBoundsLabel());
@ -1016,8 +912,7 @@ GenerateOutOfBoundsExit(MacroAssembler& masm, AsmJSModule& module, Label* throwL
return false;
offsets.end = masm.currentOffset();
module.setOnOutOfBoundsExitOffset(offsets.begin);
return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
return mg.defineOutOfBoundsStub(offsets);
}
static const LiveRegisterSet AllRegsExceptSP(
@ -1034,10 +929,12 @@ static const LiveRegisterSet AllRegsExceptSP(
// after restoring all registers. To hack around this, push the resumePC on the
// stack so that it can be popped directly into PC.
static bool
GenerateAsyncInterruptExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
GenerateAsyncInterruptStub(ModuleGenerator& mg, Module::HeapBool usesHeap, Label* throwLabel)
{
MacroAssembler& masm = mg.masm();
masm.haltingAlign(CodeAlignment);
AsmJSOffsets offsets;
Offsets offsets;
offsets.begin = masm.currentOffset();
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
@ -1187,8 +1084,7 @@ GenerateAsyncInterruptExit(MacroAssembler& masm, AsmJSModule& module, Label* thr
return false;
offsets.end = masm.currentOffset();
module.setAsyncInterruptOffset(offsets.begin);
return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
return mg.defineAsyncInterruptStub(offsets);
}
// If an exception is thrown, simply pop all frames (since asm.js does not
@ -1197,10 +1093,12 @@ GenerateAsyncInterruptExit(MacroAssembler& masm, AsmJSModule& module, Label* thr
// 2. PopRegsInMask to restore the caller's non-volatile registers.
// 3. Return (to CallAsmJS).
static bool
GenerateThrowStub(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
GenerateThrowStub(ModuleGenerator& mg, Label* throwLabel)
{
MacroAssembler& masm = mg.masm();
masm.haltingAlign(CodeAlignment);
AsmJSOffsets offsets;
Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(throwLabel);
@ -1224,19 +1122,14 @@ GenerateThrowStub(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
return false;
offsets.end = masm.currentOffset();
return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
return mg.defineInlineStub(offsets);
}
bool
wasm::GenerateStubs(MacroAssembler& masm, AsmJSModule& module, const FuncOffsetVector& funcOffsets)
wasm::GenerateStubs(ModuleGenerator& mg, Module::HeapBool usesHeap)
{
for (unsigned i = 0; i < module.numExportedFunctions(); i++) {
if (!GenerateEntry(masm, module, i, funcOffsets))
return false;
}
for (auto builtin : MakeEnumeratedRange(Builtin::Limit)) {
if (!GenerateBuiltinThunk(masm, module, builtin))
for (unsigned i = 0; i < mg.numDeclaredExports(); i++) {
if (!GenerateEntry(mg, i, usesHeap))
return false;
}
@ -1245,45 +1138,51 @@ wasm::GenerateStubs(MacroAssembler& masm, AsmJSModule& module, const FuncOffsetV
{
Label onDetached;
for (size_t i = 0; i < module.numExits(); i++) {
if (!GenerateInterpExit(masm, module, i, &onThrow, &onDetached))
for (size_t i = 0; i < mg.numDeclaredImports(); i++) {
ProfilingOffsets interp;
if (!GenerateInterpExitStub(mg, i, usesHeap, &onThrow, &onDetached, &interp))
return false;
if (!GenerateIonExit(masm, module, i, &onThrow, &onDetached))
ProfilingOffsets jit;
if (!GenerateJitExitStub(mg, i, usesHeap, &onThrow, &onDetached, &jit))
return false;
if (!mg.defineImport(i, interp, jit))
return false;
}
if (onDetached.used()) {
if (!GenerateOnDetachedExit(masm, module, &onDetached, &onThrow))
if (!GenerateOnDetachedStub(mg, &onDetached, &onThrow))
return false;
}
}
if (masm.asmStackOverflowLabel()->used()) {
if (!GenerateStackOverflowExit(masm, module, &onThrow))
if (mg.masm().asmStackOverflowLabel()->used()) {
if (!GenerateStackOverflowStub(mg, &onThrow))
return false;
}
if (masm.asmSyncInterruptLabel()->used()) {
if (!GenerateSyncInterruptExit(masm, module, &onThrow))
if (mg.masm().asmSyncInterruptLabel()->used()) {
if (!GenerateSyncInterruptStub(mg, &onThrow))
return false;
}
if (masm.asmOnConversionErrorLabel()->used()) {
if (!GenerateConversionErrorExit(masm, module, &onThrow))
if (mg.masm().asmOnConversionErrorLabel()->used()) {
if (!GenerateConversionErrorStub(mg, &onThrow))
return false;
}
// Generate unconditionally: the out-of-bounds exit may be used later even
// if signal handling isn't used for out-of-bounds at the moment.
if (!GenerateOutOfBoundsExit(masm, module, &onThrow))
if (!GenerateOutOfBoundsStub(mg, &onThrow))
return false;
// Generate unconditionally: the async interrupt may be taken at any time.
if (!GenerateAsyncInterruptExit(masm, module, &onThrow))
if (!GenerateAsyncInterruptStub(mg, usesHeap, &onThrow))
return false;
if (onThrow.used()) {
if (!GenerateThrowStub(masm, module, &onThrow))
if (!GenerateThrowStub(mg, &onThrow))
return false;
}

View File

@ -16,23 +16,18 @@
* limitations under the License.
*/
#ifndef asmjs_wasm_stubs_h
#define asmjs_wasm_stubs_h
#ifndef wasm_stubs_h
#define wasm_stubs_h
#include "asmjs/Wasm.h"
#include "asmjs/WasmGenerator.h"
namespace js {
class AsmJSModule;
namespace jit { class MacroAssembler; }
namespace wasm {
typedef Vector<uint32_t> FuncOffsetVector;
bool
GenerateStubs(jit::MacroAssembler& masm, AsmJSModule& module, const FuncOffsetVector& funcOffsets);
GenerateStubs(ModuleGenerator& mg, Module::HeapBool usesHeap);
} // namespace wasm
} // namespace js
#endif // asmjs_wasm_stubs_h
#endif // wasm_stubs_h

292
js/src/asmjs/WasmTypes.cpp Normal file
View File

@ -0,0 +1,292 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* Copyright 2015 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asmjs/WasmTypes.h"
#include "jslibmath.h"
#include "jsmath.h"
#include "asmjs/AsmJSModule.h"
#include "js/Conversions.h"
#include "vm/Interpreter.h"
#include "vm/Stack-inl.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
#if defined(JS_CODEGEN_ARM)
extern "C" {
extern MOZ_EXPORT int64_t
__aeabi_idivmod(int, int);
extern MOZ_EXPORT int64_t
__aeabi_uidivmod(int, int);
}
#endif
namespace js {
namespace wasm {
void
ReportOverRecursed()
{
JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
ReportOverRecursed(cx);
}
bool
HandleExecutionInterrupt()
{
AsmJSActivation* act = JSRuntime::innermostAsmJSActivation();
act->module().wasm().setInterrupted(true);
bool ret = CheckForInterrupt(act->cx());
act->module().wasm().setInterrupted(false);
return ret;
}
} // namespace wasm
} // namespace js
static void
OnDetached()
{
// See hasDetachedHeap comment in LinkAsmJS.
JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_OUT_OF_MEMORY);
}
static void
OnOutOfBounds()
{
JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
}
static void
OnImpreciseConversion()
{
JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_SIMD_FAILED_CONVERSION);
}
static int32_t
CoerceInPlace_ToInt32(MutableHandleValue val)
{
JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
int32_t i32;
if (!ToInt32(cx, val, &i32))
return false;
val.set(Int32Value(i32));
return true;
}
static int32_t
CoerceInPlace_ToNumber(MutableHandleValue val)
{
JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
double dbl;
if (!ToNumber(cx, val, &dbl))
return false;
val.set(DoubleValue(dbl));
return true;
}
// Use an int32_t return type instead of bool since bool does not have a
// specified width and the caller is assuming a word-sized return.
static int32_t
InvokeImport_Void(int32_t importIndex, int32_t argc, Value* argv)
{
AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation();
JSContext* cx = activation->cx();
Module& module = activation->module().wasm();
RootedValue rval(cx);
return module.callImport(cx, importIndex, argc, argv, &rval);
}
// Use an int32_t return type instead of bool since bool does not have a
// specified width and the caller is assuming a word-sized return.
static int32_t
InvokeImport_I32(int32_t importIndex, int32_t argc, Value* argv)
{
AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation();
JSContext* cx = activation->cx();
Module& module = activation->module().wasm();
RootedValue rval(cx);
if (!module.callImport(cx, importIndex, argc, argv, &rval))
return false;
int32_t i32;
if (!ToInt32(cx, rval, &i32))
return false;
argv[0] = Int32Value(i32);
return true;
}
// Use an int32_t return type instead of bool since bool does not have a
// specified width and the caller is assuming a word-sized return.
static int32_t
InvokeImport_F64(int32_t importIndex, int32_t argc, Value* argv)
{
AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation();
JSContext* cx = activation->cx();
Module& module = activation->module().wasm();
RootedValue rval(cx);
if (!module.callImport(cx, importIndex, argc, argv, &rval))
return false;
double dbl;
if (!ToNumber(cx, rval, &dbl))
return false;
argv[0] = DoubleValue(dbl);
return true;
}
template <class F>
static inline void*
FuncCast(F* pf, ABIFunctionType type)
{
void *pv = JS_FUNC_TO_DATA_PTR(void*, pf);
#ifdef JS_SIMULATOR
pv = Simulator::RedirectNativeFunction(pv, type);
#endif
return pv;
}
void*
wasm::AddressOf(SymbolicAddress imm, ExclusiveContext* cx)
{
switch (imm) {
case SymbolicAddress::Runtime:
return cx->runtimeAddressForJit();
case SymbolicAddress::RuntimeInterruptUint32:
return cx->runtimeAddressOfInterruptUint32();
case SymbolicAddress::StackLimit:
return cx->stackLimitAddressForJitCode(StackForUntrustedScript);
case SymbolicAddress::ReportOverRecursed:
return FuncCast(wasm::ReportOverRecursed, Args_General0);
case SymbolicAddress::OnDetached:
return FuncCast(OnDetached, Args_General0);
case SymbolicAddress::OnOutOfBounds:
return FuncCast(OnOutOfBounds, Args_General0);
case SymbolicAddress::OnImpreciseConversion:
return FuncCast(OnImpreciseConversion, Args_General0);
case SymbolicAddress::HandleExecutionInterrupt:
return FuncCast(wasm::HandleExecutionInterrupt, Args_General0);
case SymbolicAddress::InvokeImport_Void:
return FuncCast(InvokeImport_Void, Args_General3);
case SymbolicAddress::InvokeImport_I32:
return FuncCast(InvokeImport_I32, Args_General3);
case SymbolicAddress::InvokeImport_F64:
return FuncCast(InvokeImport_F64, Args_General3);
case SymbolicAddress::CoerceInPlace_ToInt32:
return FuncCast(CoerceInPlace_ToInt32, Args_General1);
case SymbolicAddress::CoerceInPlace_ToNumber:
return FuncCast(CoerceInPlace_ToNumber, Args_General1);
case SymbolicAddress::ToInt32:
return FuncCast<int32_t (double)>(JS::ToInt32, Args_Int_Double);
#if defined(JS_CODEGEN_ARM)
case SymbolicAddress::aeabi_idivmod:
return FuncCast(__aeabi_idivmod, Args_General2);
case SymbolicAddress::aeabi_uidivmod:
return FuncCast(__aeabi_uidivmod, Args_General2);
case SymbolicAddress::AtomicCmpXchg:
return FuncCast<int32_t (int32_t, int32_t, int32_t, int32_t)>(js::atomics_cmpxchg_asm_callout, Args_General4);
case SymbolicAddress::AtomicXchg:
return FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_xchg_asm_callout, Args_General3);
case SymbolicAddress::AtomicFetchAdd:
return FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_add_asm_callout, Args_General3);
case SymbolicAddress::AtomicFetchSub:
return FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_sub_asm_callout, Args_General3);
case SymbolicAddress::AtomicFetchAnd:
return FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_and_asm_callout, Args_General3);
case SymbolicAddress::AtomicFetchOr:
return FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_or_asm_callout, Args_General3);
case SymbolicAddress::AtomicFetchXor:
return FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_xor_asm_callout, Args_General3);
#endif
case SymbolicAddress::ModD:
return FuncCast(NumberMod, Args_Double_DoubleDouble);
case SymbolicAddress::SinD:
#ifdef _WIN64
// Workaround a VS 2013 sin issue, see math_sin_uncached.
return FuncCast<double (double)>(js::math_sin_uncached, Args_Double_Double);
#else
return FuncCast<double (double)>(sin, Args_Double_Double);
#endif
case SymbolicAddress::CosD:
return FuncCast<double (double)>(cos, Args_Double_Double);
case SymbolicAddress::TanD:
return FuncCast<double (double)>(tan, Args_Double_Double);
case SymbolicAddress::ASinD:
return FuncCast<double (double)>(asin, Args_Double_Double);
case SymbolicAddress::ACosD:
return FuncCast<double (double)>(acos, Args_Double_Double);
case SymbolicAddress::ATanD:
return FuncCast<double (double)>(atan, Args_Double_Double);
case SymbolicAddress::CeilD:
return FuncCast<double (double)>(ceil, Args_Double_Double);
case SymbolicAddress::CeilF:
return FuncCast<float (float)>(ceilf, Args_Float32_Float32);
case SymbolicAddress::FloorD:
return FuncCast<double (double)>(floor, Args_Double_Double);
case SymbolicAddress::FloorF:
return FuncCast<float (float)>(floorf, Args_Float32_Float32);
case SymbolicAddress::ExpD:
return FuncCast<double (double)>(exp, Args_Double_Double);
case SymbolicAddress::LogD:
return FuncCast<double (double)>(log, Args_Double_Double);
case SymbolicAddress::PowD:
return FuncCast(ecmaPow, Args_Double_DoubleDouble);
case SymbolicAddress::ATan2D:
return FuncCast(ecmaAtan2, Args_Double_DoubleDouble);
case SymbolicAddress::Limit:
break;
}
MOZ_CRASH("Bad SymbolicAddress");
}
CompileArgs::CompileArgs(ExclusiveContext* cx)
:
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
useSignalHandlersForOOB(cx->canUseSignalHandlers()),
#else
useSignalHandlersForOOB(false),
#endif
useSignalHandlersForInterrupt(cx->canUseSignalHandlers())
{}
bool
CompileArgs::operator==(CompileArgs rhs) const
{
return useSignalHandlersForOOB == rhs.useSignalHandlersForOOB &&
useSignalHandlersForInterrupt == rhs.useSignalHandlersForInterrupt;
}

View File

@ -16,10 +16,13 @@
* limitations under the License.
*/
#ifndef asmjs_wasm_h
#define asmjs_wasm_h
#ifndef wasm_types_h
#define wasm_types_h
#include "mozilla/DebugOnly.h"
#include "mozilla/HashFunctions.h"
#include "mozilla/Move.h"
#include "mozilla/UniquePtr.h"
#include "ds/LifoAlloc.h"
#include "jit/IonTypes.h"
@ -27,9 +30,15 @@
#include "js/Vector.h"
namespace js {
class PropertyName;
namespace wasm {
using mozilla::Move;
using mozilla::DebugOnly;
using mozilla::UniquePtr;
using mozilla::MallocSizeOf;
// The ValType enum represents the WebAssembly "value type", which are used to
// specify the type of locals and parameters.
@ -264,6 +273,76 @@ class LifoSig : public Sig<LifoAllocPolicy<Fallible>>
}
};
// The (,Profiling,Func)Offsets classes are used to record the offsets of
// different key points in a CodeRange during compilation.
struct Offsets
{
MOZ_IMPLICIT Offsets(uint32_t begin = 0, uint32_t end = 0)
: begin(begin), end(end)
{}
// These define a [begin, end) contiguous range of instructions compiled
// into a CodeRange.
uint32_t begin;
uint32_t end;
void offsetBy(uint32_t offset) {
begin += offset;
end += offset;
}
};
struct ProfilingOffsets : Offsets
{
MOZ_IMPLICIT ProfilingOffsets(uint32_t profilingReturn = 0)
: Offsets(), profilingReturn(profilingReturn)
{}
// For CodeRanges with ProfilingOffsets, 'begin' is the offset of the
// profiling entry.
uint32_t profilingEntry() const { return begin; }
// The profiling return is the offset of the return instruction, which
// precedes the 'end' by a variable number of instructions due to
// out-of-line codegen.
uint32_t profilingReturn;
void offsetBy(uint32_t offset) {
Offsets::offsetBy(offset);
profilingReturn += offset;
}
};
struct FuncOffsets : ProfilingOffsets
{
MOZ_IMPLICIT FuncOffsets(uint32_t nonProfilingEntry = 0,
uint32_t profilingJump = 0,
uint32_t profilingEpilogue = 0)
: ProfilingOffsets(),
nonProfilingEntry(nonProfilingEntry),
profilingJump(profilingJump),
profilingEpilogue(profilingEpilogue)
{}
// Function CodeRanges have an additional non-profiling entry that comes
// after the profiling entry and a non-profiling epilogue that comes before
// the profiling epilogue.
uint32_t nonProfilingEntry;
// When profiling is enabled, the 'nop' at offset 'profilingJump' is
// overwritten to be a jump to 'profilingEpilogue'.
uint32_t profilingJump;
uint32_t profilingEpilogue;
void offsetBy(uint32_t offset) {
ProfilingOffsets::offsetBy(offset);
nonProfilingEntry += offset;
profilingJump += offset;
profilingEpilogue += offset;
}
};
// While the frame-pointer chain allows the stack to be unwound without
// metadata, Error.stack still needs to know the line/column of every call in
// the chain. A CallSiteDesc describes a single callsite to which CallSite adds
@ -438,10 +517,14 @@ class HeapAccess {
typedef Vector<HeapAccess, 0, SystemAllocPolicy> HeapAccessVector;
// A wasm::Builtin represents a function implemented by the engine that is
// called directly from wasm code and should show up in the callstack.
// A wasm::SymbolicAddress represents a pointer to a well-known function or
// object that is embedded in wasm code. Since wasm code is serialized and
// later deserialized into a different address space, symbolic addresses must be
// used for *all* pointers into the address space. The MacroAssembler records a
// list of all SymbolicAddresses and the offsets of their use in the code for
// later patching during static linking.
enum class Builtin : uint16_t
enum class SymbolicAddress
{
ToInt32,
#if defined(JS_CODEGEN_ARM)
@ -470,45 +553,6 @@ enum class Builtin : uint16_t
LogD,
PowD,
ATan2D,
Limit
};
// A wasm::SymbolicAddress represents a pointer to a well-known function or
// object that is embedded in wasm code. Since wasm code is serialized and
// later deserialized into a different address space, symbolic addresses must be
// used for *all* pointers into the address space. The MacroAssembler records a
// list of all SymbolicAddresses and the offsets of their use in the code for
// later patching during static linking.
enum class SymbolicAddress
{
ToInt32 = unsigned(Builtin::ToInt32),
#if defined(JS_CODEGEN_ARM)
aeabi_idivmod = unsigned(Builtin::aeabi_idivmod),
aeabi_uidivmod = unsigned(Builtin::aeabi_uidivmod),
AtomicCmpXchg = unsigned(Builtin::AtomicCmpXchg),
AtomicXchg = unsigned(Builtin::AtomicXchg),
AtomicFetchAdd = unsigned(Builtin::AtomicFetchAdd),
AtomicFetchSub = unsigned(Builtin::AtomicFetchSub),
AtomicFetchAnd = unsigned(Builtin::AtomicFetchAnd),
AtomicFetchOr = unsigned(Builtin::AtomicFetchOr),
AtomicFetchXor = unsigned(Builtin::AtomicFetchXor),
#endif
ModD = unsigned(Builtin::ModD),
SinD = unsigned(Builtin::SinD),
CosD = unsigned(Builtin::CosD),
TanD = unsigned(Builtin::TanD),
ASinD = unsigned(Builtin::ASinD),
ACosD = unsigned(Builtin::ACosD),
ATanD = unsigned(Builtin::ATanD),
CeilD = unsigned(Builtin::CeilD),
CeilF = unsigned(Builtin::CeilF),
FloorD = unsigned(Builtin::FloorD),
FloorF = unsigned(Builtin::FloorF),
ExpD = unsigned(Builtin::ExpD),
LogD = unsigned(Builtin::LogD),
PowD = unsigned(Builtin::PowD),
ATan2D = unsigned(Builtin::ATan2D),
Runtime,
RuntimeInterruptUint32,
StackLimit,
@ -517,80 +561,41 @@ enum class SymbolicAddress
OnOutOfBounds,
OnImpreciseConversion,
HandleExecutionInterrupt,
InvokeFromAsmJS_Ignore,
InvokeFromAsmJS_ToInt32,
InvokeFromAsmJS_ToNumber,
InvokeImport_Void,
InvokeImport_I32,
InvokeImport_F64,
CoerceInPlace_ToInt32,
CoerceInPlace_ToNumber,
Limit
};
static inline SymbolicAddress
BuiltinToImmediate(Builtin b)
void*
AddressOf(SymbolicAddress imm, ExclusiveContext* cx);
// The CompileArgs struct captures global parameters that affect all wasm code
// generation. It also currently is the single source of truth for whether or
// not to use signal handlers for different purposes.
struct CompileArgs
{
return SymbolicAddress(b);
}
bool useSignalHandlersForOOB;
bool useSignalHandlersForInterrupt;
static inline bool
ImmediateIsBuiltin(SymbolicAddress imm, Builtin* builtin)
{
if (uint32_t(imm) < uint32_t(Builtin::Limit)) {
*builtin = Builtin(imm);
return true;
}
return false;
}
// An ExitReason describes the possible reasons for leaving compiled wasm code
// or the state of not having left compiled wasm code (ExitReason::None).
class ExitReason
{
public:
// List of reasons for execution leaving compiled wasm code (or None, if
// control hasn't exited).
enum Kind
{
None, // default state, the pc is in wasm code
Jit, // fast-path exit to JIT code
Slow, // general case exit to C++ Invoke
Interrupt, // executing an interrupt callback
Builtin // calling into a builtin (native) function
};
private:
Kind kind_;
wasm::Builtin builtin_;
public:
ExitReason() = default;
MOZ_IMPLICIT ExitReason(Kind kind) : kind_(kind) { MOZ_ASSERT(kind != Builtin); }
MOZ_IMPLICIT ExitReason(wasm::Builtin builtin) : kind_(Builtin), builtin_(builtin) {}
Kind kind() const { return kind_; }
wasm::Builtin builtin() const { MOZ_ASSERT(kind_ == Builtin); return builtin_; }
uint32_t pack() const {
static_assert(sizeof(wasm::Builtin) == 2, "fits");
return uint16_t(kind_) | (uint16_t(builtin_) << 16);
}
static ExitReason unpack(uint32_t u32) {
static_assert(sizeof(wasm::Builtin) == 2, "fits");
ExitReason r;
r.kind_ = Kind(uint16_t(u32));
r.builtin_ = wasm::Builtin(uint16_t(u32 >> 16));
return r;
}
CompileArgs() = default;
explicit CompileArgs(ExclusiveContext* cx);
bool operator==(CompileArgs rhs) const;
bool operator!=(CompileArgs rhs) const { return !(*this == rhs); }
};
// A hoisting of constants that would otherwise require #including WasmModule.h
// everywhere. Values are asserted in WasmModule.h.
// Constants:
static const unsigned ActivationGlobalDataOffset = 0;
static const unsigned HeapGlobalDataOffset = sizeof(void*);
static const unsigned NaN64GlobalDataOffset = 2 * sizeof(void*);
static const unsigned NaN32GlobalDataOffset = 2 * sizeof(void*) + sizeof(double);
static const unsigned HeapGlobalDataOffset = ActivationGlobalDataOffset + sizeof(void*);
static const unsigned NaN64GlobalDataOffset = HeapGlobalDataOffset + sizeof(void*);
static const unsigned NaN32GlobalDataOffset = NaN64GlobalDataOffset + sizeof(double);
static const unsigned InitialGlobalDataBytes = NaN32GlobalDataOffset + sizeof(float);
} // namespace wasm
} // namespace js
#endif // asmjs_wasm_h
#endif // wasm_types_h

View File

@ -523,9 +523,9 @@ static void
GetCurrentAsmJSHeap(SharedMem<void*>* heap, size_t* length)
{
JSRuntime* rt = js::TlsPerThreadData.get()->runtimeFromMainThread();
AsmJSModule& mod = rt->asmJSActivationStack()->module();
*heap = mod.maybeHeap().cast<void*>();
*length = mod.heapLength();
wasm::Module& module = rt->asmJSActivationStack()->module().wasm();
*heap = module.maybeHeap().cast<void*>();
*length = module.heapLength();
}
int32_t

View File

@ -11,6 +11,7 @@
#include "jsiter.h"
#include "builtin/SelfHostingDefines.h"
#include "builtin/WeakMapObject.h"
#include "vm/GlobalObject.h"
#include "vm/SelfHosting.h"

View File

@ -9,6 +9,7 @@
#include "mozilla/Attributes.h"
#include "builtin/ModuleObject.h"
#include "frontend/TokenStream.h"
namespace js {

View File

@ -108,11 +108,12 @@ function testBuiltinD2D(name) {
enableSingleStepProfiling();
assertEq(f(.1), eval("Math." + name + "(.1)"));
var stacks = disableSingleStepProfiling();
assertStackContainsSeq(stacks, ">,f,>,Math." + name + ",f,>,f,>,>");
assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>");
}
}
for (name of ['sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'ceil', 'floor', 'exp', 'log'])
testBuiltinD2D(name);
function testBuiltinF2F(name) {
var m = asmCompile('g', USE_ASM + "var tof=g.Math.fround; var fun=g.Math." + name + "; function f(d) { d=tof(d); return tof(fun(d)) } return f");
for (var i = 0; i < 3; i++) {
@ -120,11 +121,12 @@ function testBuiltinF2F(name) {
enableSingleStepProfiling();
assertEq(f(.1), eval("Math.fround(Math." + name + "(Math.fround(.1)))"));
var stacks = disableSingleStepProfiling();
assertStackContainsSeq(stacks, ">,f,>,Math." + name + ",f,>,f,>,>");
assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>");
}
}
for (name of ['ceil', 'floor'])
testBuiltinF2F(name);
function testBuiltinDD2D(name) {
var m = asmCompile('g', USE_ASM + "var fun=g.Math." + name + "; function f(d, e) { d=+d; e=+e; return +fun(d,e) } return f");
for (var i = 0; i < 3; i++) {
@ -132,7 +134,7 @@ function testBuiltinDD2D(name) {
enableSingleStepProfiling();
assertEq(f(.1, .2), eval("Math." + name + "(.1, .2)"));
var stacks = disableSingleStepProfiling();
assertStackContainsSeq(stacks, ">,f,>,Math." + name + ",f,>,f,>,>");
assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>");
}
}
for (name of ['atan2', 'pow'])

View File

@ -8,7 +8,7 @@
#include "mozilla/MemoryReporting.h"
#include "asmjs/AsmJSModule.h"
#include "asmjs/WasmModule.h"
#include "jit/BaselineCompiler.h"
#include "jit/BaselineIC.h"
#include "jit/CompileInfo.h"
@ -51,7 +51,7 @@ BaselineScript::BaselineScript(uint32_t prologueOffset, uint32_t epilogueOffset,
: method_(nullptr),
templateScope_(nullptr),
fallbackStubSpace_(),
dependentAsmJSModules_(nullptr),
dependentWasmModules_(nullptr),
prologueOffset_(prologueOffset),
epilogueOffset_(epilogueOffset),
profilerEnterToggleOffset_(profilerEnterToggleOffset),
@ -485,60 +485,57 @@ BaselineScript::Destroy(FreeOp* fop, BaselineScript* script)
MOZ_ASSERT(!script->hasPendingIonBuilder());
script->unlinkDependentAsmJSModules(fop);
script->unlinkDependentWasmModules(fop);
fop->delete_(script);
}
void
BaselineScript::clearDependentAsmJSModules()
BaselineScript::clearDependentWasmModules()
{
// Remove any links from AsmJSModules that contain optimized FFI calls into
// Remove any links from wasm::Modules that contain optimized import calls into
// this BaselineScript.
if (dependentAsmJSModules_) {
for (size_t i = 0; i < dependentAsmJSModules_->length(); i++) {
DependentAsmJSModuleExit exit = (*dependentAsmJSModules_)[i];
exit.module->exit(exit.exitIndex).deoptimize(*exit.module);
}
dependentAsmJSModules_->clear();
if (dependentWasmModules_) {
for (DependentWasmModuleImport dep : *dependentWasmModules_)
dep.module->deoptimizeImportExit(dep.importIndex);
dependentWasmModules_->clear();
}
}
void
BaselineScript::unlinkDependentAsmJSModules(FreeOp* fop)
BaselineScript::unlinkDependentWasmModules(FreeOp* fop)
{
// Remove any links from AsmJSModules that contain optimized FFI calls into
// Remove any links from wasm::Modules that contain optimized FFI calls into
// this BaselineScript.
clearDependentAsmJSModules();
if (dependentAsmJSModules_) {
fop->delete_(dependentAsmJSModules_);
dependentAsmJSModules_ = nullptr;
clearDependentWasmModules();
if (dependentWasmModules_) {
fop->delete_(dependentWasmModules_);
dependentWasmModules_ = nullptr;
}
}
bool
BaselineScript::addDependentAsmJSModule(JSContext* cx, DependentAsmJSModuleExit exit)
BaselineScript::addDependentWasmModule(JSContext* cx, wasm::Module& module, uint32_t importIndex)
{
if (!dependentAsmJSModules_) {
dependentAsmJSModules_ = cx->new_<Vector<DependentAsmJSModuleExit> >(cx);
if (!dependentAsmJSModules_)
if (!dependentWasmModules_) {
dependentWasmModules_ = cx->new_<Vector<DependentWasmModuleImport> >(cx);
if (!dependentWasmModules_)
return false;
}
return dependentAsmJSModules_->append(exit);
return dependentWasmModules_->emplaceBack(&module, importIndex);
}
void
BaselineScript::removeDependentAsmJSModule(DependentAsmJSModuleExit exit)
BaselineScript::removeDependentWasmModule(wasm::Module& module, uint32_t importIndex)
{
if (!dependentAsmJSModules_)
if (!dependentWasmModules_)
return;
for (size_t i = 0; i < dependentAsmJSModules_->length(); i++) {
if ((*dependentAsmJSModules_)[i].module == exit.module &&
(*dependentAsmJSModules_)[i].exitIndex == exit.exitIndex)
for (size_t i = 0; i < dependentWasmModules_->length(); i++) {
if ((*dependentWasmModules_)[i].module == &module &&
(*dependentWasmModules_)[i].importIndex == importIndex)
{
dependentAsmJSModules_->erase(dependentAsmJSModules_->begin() + i);
dependentWasmModules_->erase(dependentWasmModules_->begin() + i);
break;
}
}

View File

@ -94,16 +94,16 @@ struct PCMappingIndexEntry
uint32_t bufferOffset;
};
// Describes a single AsmJSModule which jumps (via an FFI exit with the given
// index) directly to a BaselineScript or IonScript.
struct DependentAsmJSModuleExit
// Describes a single wasm::Module::ImportExit which jumps (via an import with
// the given index) directly to a BaselineScript or IonScript.
struct DependentWasmModuleImport
{
const AsmJSModule* module;
size_t exitIndex;
wasm::Module* module;
size_t importIndex;
DependentAsmJSModuleExit(const AsmJSModule* module, size_t exitIndex)
DependentWasmModuleImport(wasm::Module* module, size_t importIndex)
: module(module),
exitIndex(exitIndex)
importIndex(importIndex)
{ }
};
@ -129,9 +129,9 @@ struct BaselineScript
// Allocated space for fallback stubs.
FallbackICStubSpace fallbackStubSpace_;
// If non-null, the list of AsmJSModules that contain an optimized call
// If non-null, the list of wasm::Modules that contain an optimized call
// directly to this script.
Vector<DependentAsmJSModuleExit>* dependentAsmJSModules_;
Vector<DependentWasmModuleImport>* dependentWasmModules_;
// Native code offset right before the scope chain is initialized.
uint32_t prologueOffset_;
@ -400,10 +400,10 @@ struct BaselineScript
// the result may not be accurate.
jsbytecode* approximatePcForNativeAddress(JSScript* script, uint8_t* nativeAddress);
bool addDependentAsmJSModule(JSContext* cx, DependentAsmJSModuleExit exit);
void unlinkDependentAsmJSModules(FreeOp* fop);
void clearDependentAsmJSModules();
void removeDependentAsmJSModule(DependentAsmJSModuleExit exit);
bool addDependentWasmModule(JSContext* cx, wasm::Module& module, uint32_t importIndex);
void unlinkDependentWasmModules(FreeOp* fop);
void clearDependentWasmModules();
void removeDependentWasmModule(wasm::Module& module, uint32_t importIndex);
// Toggle debug traps (used for breakpoints and step mode) in the script.
// If |pc| is nullptr, toggle traps for all ops in the script. Else, only
@ -480,7 +480,7 @@ struct BaselineScript
pendingBuilder_ = builder;
// lazy linking cannot happen during asmjs to ion.
clearDependentAsmJSModules();
clearDependentWasmModules();
script->updateBaselineOrIonRaw(maybecx);
}

View File

@ -161,7 +161,7 @@ CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph, MacroAssembler*
CodeGenerator::~CodeGenerator()
{
MOZ_ASSERT_IF(!gen->compilingAsmJS(), masm.numAsmJSAbsoluteLinks() == 0);
MOZ_ASSERT_IF(!gen->compilingAsmJS(), masm.numAsmJSAbsoluteAddresses() == 0);
js_delete(scriptCounts_);
}
@ -7873,11 +7873,11 @@ CodeGenerator::visitRest(LRest* lir)
}
bool
CodeGenerator::generateAsmJS(AsmJSFunctionOffsets* offsets)
CodeGenerator::generateAsmJS(wasm::FuncOffsets* offsets)
{
JitSpew(JitSpew_Codegen, "# Emitting asm.js code");
GenerateAsmJSFunctionPrologue(masm, frameSize(), offsets);
wasm::GenerateFunctionPrologue(masm, frameSize(), offsets);
// Overflow checks are omitted by CodeGenerator in some cases (leaf
// functions with small framePushed). Perform overflow-checking after
@ -7897,7 +7897,7 @@ CodeGenerator::generateAsmJS(AsmJSFunctionOffsets* offsets)
return false;
masm.bind(&returnLabel_);
GenerateAsmJSFunctionEpilogue(masm, frameSize(), offsets);
wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
if (onOverflow.used()) {
// The stack overflow stub assumes that only sizeof(AsmJSFrame) bytes have

View File

@ -61,7 +61,7 @@ class CodeGenerator : public CodeGeneratorSpecific
public:
bool generate();
bool generateAsmJS(AsmJSFunctionOffsets *offsets);
bool generateAsmJS(wasm::FuncOffsets *offsets);
bool link(JSContext* cx, CompilerConstraintList* constraints);
bool linkSharedStubs(JSContext* cx);

View File

@ -3290,6 +3290,7 @@ AutoFlushICache::flush(uintptr_t start, size_t len)
PerThreadData* pt = TlsPerThreadData.get();
AutoFlushICache* afc = pt ? pt->PerThreadData::autoFlushICache() : nullptr;
if (!afc) {
MOZ_ASSERT(!IsCompilingAsmJS(), "asm.js should always create an AutoFlushICache");
JitSpewCont(JitSpew_CacheFlush, "#");
ExecutableAllocator::cacheFlush((void*)start, len);
MOZ_ASSERT(len <= 32);
@ -3303,6 +3304,7 @@ AutoFlushICache::flush(uintptr_t start, size_t len)
return;
}
MOZ_ASSERT(!IsCompilingAsmJS(), "asm.js should always flush within the range");
JitSpewCont(JitSpew_CacheFlush, afc->inhibit_ ? "x" : "*");
ExecutableAllocator::cacheFlush((void*)start, len);
#endif

View File

@ -38,7 +38,7 @@ class Linker
template <AllowGC allowGC>
JitCode* newCode(JSContext* cx, CodeKind kind, bool hasPatchableBackedges = false) {
MOZ_ASSERT(masm.numAsmJSAbsoluteLinks() == 0);
MOZ_ASSERT(masm.numAsmJSAbsoluteAddresses() == 0);
MOZ_ASSERT_IF(hasPatchableBackedges, kind == ION_CODE);
gc::AutoSuppressGC suppressGC(cx);

View File

@ -13916,17 +13916,17 @@ class MAsmJSCall final
union {
AsmJSInternalCallee internal_;
MDefinition* dynamic_;
wasm::Builtin builtin_;
wasm::SymbolicAddress builtin_;
} u;
public:
Callee() {}
explicit Callee(AsmJSInternalCallee callee) : which_(Internal) { u.internal_ = callee; }
explicit Callee(MDefinition* callee) : which_(Dynamic) { u.dynamic_ = callee; }
explicit Callee(wasm::Builtin callee) : which_(Builtin) { u.builtin_ = callee; }
explicit Callee(wasm::SymbolicAddress callee) : which_(Builtin) { u.builtin_ = callee; }
Which which() const { return which_; }
AsmJSInternalCallee internal() const { MOZ_ASSERT(which_ == Internal); return u.internal_; }
MDefinition* dynamic() const { MOZ_ASSERT(which_ == Dynamic); return u.dynamic_; }
wasm::Builtin builtin() const { MOZ_ASSERT(which_ == Builtin); return u.builtin_; }
wasm::SymbolicAddress builtin() const { MOZ_ASSERT(which_ == Builtin); return u.builtin_; }
};
private:

View File

@ -6,7 +6,6 @@
#include "jit/MIRGraph.h"
#include "asmjs/AsmJSValidate.h"
#include "jit/BytecodeAnalysis.h"
#include "jit/Ion.h"
#include "jit/JitSpewer.h"

View File

@ -388,15 +388,14 @@ class MacroAssembler : public MacroAssemblerSpecific
// asm.js compilation handles its own JitContext-pushing
struct AsmJSToken {};
explicit MacroAssembler(AsmJSToken, TempAllocator *alloc)
explicit MacroAssembler(AsmJSToken, TempAllocator& alloc)
: framePushed_(0),
#ifdef DEBUG
inCall_(false),
#endif
emitProfilingInstrumentation_(false)
{
if (alloc)
moveResolver_.setAllocator(*alloc);
moveResolver_.setAllocator(alloc);
#if defined(JS_CODEGEN_ARM)
initWithAllocator();

View File

@ -3334,7 +3334,7 @@ void Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction* inst)
*inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always);
// NOTE: we don't update the Auto Flush Cache! this function is currently
// only called from within AsmJSModule::patchHeapAccesses, which does that
// only called from within ModuleGenerator::finish, which does that
// for us. Don't call this!
}

View File

@ -1928,7 +1928,7 @@ MacroAssemblerARMCompat::movePtr(ImmPtr imm, Register dest)
void
MacroAssemblerARMCompat::movePtr(wasm::SymbolicAddress imm, Register dest)
{
append(AsmJSAbsoluteLink(CodeOffset(currentOffset()), imm));
append(AsmJSAbsoluteAddress(CodeOffset(currentOffset()), imm));
ma_movPatchable(Imm32(-1), dest, Always);
}

View File

@ -778,7 +778,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
}
void movePtr(wasm::SymbolicAddress imm, Register dest) {
BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest);
append(AsmJSAbsoluteLink(CodeOffset(off.getOffset()), imm));
append(AsmJSAbsoluteAddress(CodeOffset(off.getOffset()), imm));
}
void movePtr(ImmGCPtr imm, Register dest) {
BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest);

View File

@ -756,7 +756,7 @@ MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest)
void
MacroAssemblerMIPSCompat::movePtr(wasm::SymbolicAddress imm, Register dest)
{
append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm));
append(AsmJSAbsoluteAddress(CodeOffset(nextOffset().getOffset()), imm));
ma_liPatchable(dest, ImmWord(-1));
}

View File

@ -35,7 +35,6 @@
#include <float.h>
#include "asmjs/AsmJSValidate.h"
#include "jit/mips32/Assembler-mips32.h"
#include "vm/Runtime.h"

View File

@ -895,7 +895,7 @@ MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest)
void
MacroAssemblerMIPS64Compat::movePtr(wasm::SymbolicAddress imm, Register dest)
{
append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm));
append(AsmJSAbsoluteAddress(CodeOffset(nextOffset().getOffset()), imm));
ma_liPatchable(dest, ImmWord(-1));
}

View File

@ -36,7 +36,6 @@
#include <float.h>
#include "asmjs/AsmJSValidate.h"
#include "jit/mips64/Assembler-mips64.h"
#include "vm/Runtime.h"

View File

@ -11,7 +11,7 @@
#include <limits.h>
#include "asmjs/AsmJSFrameIterator.h"
#include "asmjs/WasmTypes.h"
#include "jit/JitAllocPolicy.h"
#include "jit/Label.h"
#include "jit/Registers.h"
@ -681,10 +681,10 @@ struct AsmJSGlobalAccess
// Represents an instruction to be patched and the intended pointee. These
// links are accumulated in the MacroAssembler, but patching is done outside
// the MacroAssembler (in AsmJSModule::staticallyLink).
struct AsmJSAbsoluteLink
// the MacroAssembler (in Module::staticallyLink).
struct AsmJSAbsoluteAddress
{
AsmJSAbsoluteLink(CodeOffset patchAt, wasm::SymbolicAddress target)
AsmJSAbsoluteAddress(CodeOffset patchAt, wasm::SymbolicAddress target)
: patchAt(patchAt), target(target) {}
CodeOffset patchAt;
@ -711,7 +711,7 @@ class AssemblerShared
wasm::CallSiteAndTargetVector callsites_;
wasm::HeapAccessVector heapAccesses_;
Vector<AsmJSGlobalAccess, 0, SystemAllocPolicy> asmJSGlobalAccesses_;
Vector<AsmJSAbsoluteLink, 0, SystemAllocPolicy> asmJSAbsoluteLinks_;
Vector<AsmJSAbsoluteAddress, 0, SystemAllocPolicy> asmJSAbsoluteAddresses_;
protected:
Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
@ -758,9 +758,9 @@ class AssemblerShared
size_t numAsmJSGlobalAccesses() const { return asmJSGlobalAccesses_.length(); }
AsmJSGlobalAccess asmJSGlobalAccess(size_t i) const { return asmJSGlobalAccesses_[i]; }
void append(AsmJSAbsoluteLink link) { enoughMemory_ &= asmJSAbsoluteLinks_.append(link); }
size_t numAsmJSAbsoluteLinks() const { return asmJSAbsoluteLinks_.length(); }
AsmJSAbsoluteLink asmJSAbsoluteLink(size_t i) const { return asmJSAbsoluteLinks_[i]; }
void append(AsmJSAbsoluteAddress link) { enoughMemory_ &= asmJSAbsoluteAddresses_.append(link); }
size_t numAsmJSAbsoluteAddresses() const { return asmJSAbsoluteAddresses_.length(); }
AsmJSAbsoluteAddress asmJSAbsoluteAddress(size_t i) const { return asmJSAbsoluteAddresses_[i]; }
static bool canUseInSingleByteInstruction(Register reg) { return true; }
@ -792,10 +792,10 @@ class AssemblerShared
for (; i < asmJSGlobalAccesses_.length(); i++)
asmJSGlobalAccesses_[i].patchAt.offsetBy(delta);
i = asmJSAbsoluteLinks_.length();
enoughMemory_ &= asmJSAbsoluteLinks_.appendAll(other.asmJSAbsoluteLinks_);
for (; i < asmJSAbsoluteLinks_.length(); i++)
asmJSAbsoluteLinks_[i].patchAt.offsetBy(delta);
i = asmJSAbsoluteAddresses_.length();
enoughMemory_ &= asmJSAbsoluteAddresses_.appendAll(other.asmJSAbsoluteAddresses_);
for (; i < asmJSAbsoluteAddresses_.length(); i++)
asmJSAbsoluteAddresses_[i].patchAt.offsetBy(delta);
i = codeLabels_.length();
enoughMemory_ &= codeLabels_.appendAll(other.codeLabels_);

View File

@ -1521,7 +1521,7 @@ CodeGeneratorShared::emitAsmJSCall(LAsmJSCall* ins)
masm.call(mir->desc(), ToRegister(ins->getOperand(mir->dynamicCalleeOperandIndex())));
break;
case MAsmJSCall::Callee::Builtin:
masm.call(BuiltinToImmediate(callee.builtin()));
masm.call(callee.builtin());
break;
}

View File

@ -601,7 +601,7 @@ class Assembler : public AssemblerX86Shared
}
void mov(wasm::SymbolicAddress imm, Register dest) {
masm.movq_i64r(-1, dest.encoding());
append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), imm));
append(AsmJSAbsoluteAddress(CodeOffset(masm.currentOffset()), imm));
}
void mov(const Operand& src, Register dest) {
movq(src, dest);

View File

@ -239,16 +239,6 @@ void
CodeGeneratorX64::visitAsmJSCall(LAsmJSCall* ins)
{
emitAsmJSCall(ins);
#ifdef DEBUG
Register scratch = ABIArgGenerator::NonReturn_VolatileReg0;
masm.movePtr(HeapReg, scratch);
masm.loadAsmJSHeapRegisterFromGlobalData();
Label ok;
masm.branchPtr(Assembler::Equal, HeapReg, scratch, &ok);
masm.breakpoint();
masm.bind(&ok);
#endif
}
void

View File

@ -288,7 +288,7 @@ class Assembler : public AssemblerX86Shared
}
void mov(wasm::SymbolicAddress imm, Register dest) {
masm.movl_i32r(-1, dest.encoding());
append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), imm));
append(AsmJSAbsoluteAddress(CodeOffset(masm.currentOffset()), imm));
}
void mov(const Operand& src, Register dest) {
movl(src, dest);
@ -367,11 +367,11 @@ class Assembler : public AssemblerX86Shared
}
void cmpl(Register rhs, wasm::SymbolicAddress lhs) {
masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1);
append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), lhs));
append(AsmJSAbsoluteAddress(CodeOffset(masm.currentOffset()), lhs));
}
void cmpl(Imm32 rhs, wasm::SymbolicAddress lhs) {
JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1);
append(AsmJSAbsoluteLink(CodeOffset(src.offset()), lhs));
append(AsmJSAbsoluteAddress(CodeOffset(src.offset()), lhs));
}
void adcl(Imm32 imm, Register dest) {

View File

@ -33,7 +33,6 @@
#include "jstypes.h"
#include "jsutil.h"
#include "asmjs/AsmJSModule.h"
#include "frontend/BytecodeCompiler.h"
#include "frontend/SourceNotes.h"
#include "gc/GCInternals.h"

View File

@ -825,6 +825,9 @@ class ScriptSourceHolder
{
ss->decref();
}
ScriptSource* get() const {
return ss;
}
};
struct CompressedSourceHasher

View File

@ -142,14 +142,16 @@ EXPORTS.js += [
]
UNIFIED_SOURCES += [
'asmjs/AsmJSFrameIterator.cpp',
'asmjs/AsmJSLink.cpp',
'asmjs/AsmJSModule.cpp',
'asmjs/AsmJSSignalHandlers.cpp',
'asmjs/AsmJSValidate.cpp',
'asmjs/WasmFrameIterator.cpp',
'asmjs/WasmGenerator.cpp',
'asmjs/WasmIonCompile.cpp',
'asmjs/WasmModule.cpp',
'asmjs/WasmSignalHandlers.cpp',
'asmjs/WasmStubs.cpp',
'asmjs/WasmTypes.cpp',
'builtin/AtomicsObject.cpp',
'builtin/Eval.cpp',
'builtin/Intl.cpp',

View File

@ -79,7 +79,7 @@ js::SetFakeCPUCount(size_t count)
}
bool
js::StartOffThreadWasmCompile(ExclusiveContext* cx, wasm::CompileTask* task)
js::StartOffThreadWasmCompile(ExclusiveContext* cx, wasm::IonCompileTask* task)
{
AutoLockHelperThreadState lock;
@ -737,7 +737,7 @@ GlobalHelperThreadState::canStartWasmCompile()
// Honor the maximum allowed threads to compile wasm jobs at once,
// to avoid oversaturating the machine.
if (!checkTaskThreadLimit<wasm::CompileTask*>(maxWasmCompilationThreads()))
if (!checkTaskThreadLimit<wasm::IonCompileTask*>(maxWasmCompilationThreads()))
return false;
return true;
@ -1201,11 +1201,11 @@ HelperThread::handleWasmWorkload()
currentTask.emplace(HelperThreadState().wasmWorklist().popCopy());
bool success = false;
wasm::CompileTask* task = wasmTask();
wasm::IonCompileTask* task = wasmTask();
{
AutoUnlockHelperThreadState unlock;
PerThreadData::AutoEnterRuntime enter(threadData.ptr(), task->args().runtime);
success = wasm::CompileFunction(task);
PerThreadData::AutoEnterRuntime enter(threadData.ptr(), task->runtime());
success = wasm::IonCompileFunction(task);
}
// On success, try to move work to the finished list.

View File

@ -20,7 +20,6 @@
#include "jscntxt.h"
#include "jslock.h"
#include "asmjs/WasmCompileArgs.h"
#include "frontend/TokenStream.h"
#include "jit/Ion.h"
@ -32,11 +31,10 @@ namespace jit {
class IonBuilder;
} // namespace jit
namespace wasm {
struct CompileArgs;
class CompileTask;
class FuncIR;
class FunctionCompileResults;
typedef Vector<CompileTask*, 0, SystemAllocPolicy> CompileTaskVector;
class IonCompileTask;
typedef Vector<IonCompileTask*, 0, SystemAllocPolicy> IonCompileTaskVector;
} // namespace wasm
// Per-process state for off thread work items.
@ -70,7 +68,7 @@ class GlobalHelperThreadState
IonBuilderList ionLazyLinkList_;
// wasm worklist and finished jobs.
wasm::CompileTaskVector wasmWorklist_, wasmFinishedList_;
wasm::IonCompileTaskVector wasmWorklist_, wasmFinishedList_;
public:
// For now, only allow a single parallel asm.js compilation to happen at a
@ -153,11 +151,11 @@ class GlobalHelperThreadState
return ionLazyLinkList_;
}
wasm::CompileTaskVector& wasmWorklist() {
wasm::IonCompileTaskVector& wasmWorklist() {
MOZ_ASSERT(isLocked());
return wasmWorklist_;
}
wasm::CompileTaskVector& wasmFinishedList() {
wasm::IonCompileTaskVector& wasmFinishedList() {
MOZ_ASSERT(isLocked());
return wasmFinishedList_;
}
@ -296,7 +294,7 @@ struct HelperThread
/* The current task being executed by this thread, if any. */
mozilla::Maybe<mozilla::Variant<jit::IonBuilder*,
wasm::CompileTask*,
wasm::IonCompileTask*,
ParseTask*,
SourceCompressionTask*,
GCHelperState*,
@ -312,8 +310,8 @@ struct HelperThread
}
/* Any wasm data currently being optimized by Ion on this thread. */
wasm::CompileTask* wasmTask() {
return maybeCurrentTaskAs<wasm::CompileTask*>();
wasm::IonCompileTask* wasmTask() {
return maybeCurrentTaskAs<wasm::IonCompileTask*>();
}
/* Any source being parsed/emitted on this thread. */
@ -383,7 +381,7 @@ PauseCurrentHelperThread();
/* Perform MIR optimization and LIR generation on a single function. */
bool
StartOffThreadWasmCompile(ExclusiveContext* cx, wasm::CompileTask* task);
StartOffThreadWasmCompile(ExclusiveContext* cx, wasm::IonCompileTask* task);
/*
* Schedule an Ion compilation for a script, given a builder which has been

View File

@ -39,7 +39,7 @@
#include "jswin.h"
#include "jswrapper.h"
#include "asmjs/AsmJSSignalHandlers.h"
#include "asmjs/WasmSignalHandlers.h"
#include "jit/arm/Simulator-arm.h"
#include "jit/arm64/vixl/Simulator-vixl.h"
#include "jit/JitCompartment.h"
@ -202,7 +202,7 @@ JSRuntime::JSRuntime(JSRuntime* parentRuntime)
destroyPrincipals(nullptr),
readPrincipals(nullptr),
errorReporter(nullptr),
linkedAsmJSModules(nullptr),
linkedWasmModules(nullptr),
propertyRemovals(0),
#if !EXPOSE_INTL_API
thousandsSeparator(0),
@ -346,7 +346,7 @@ JSRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
jitSupportsFloatingPoint = js::jit::JitSupportsFloatingPoint();
jitSupportsSimd = js::jit::JitSupportsSimd();
signalHandlersInstalled_ = EnsureSignalHandlersInstalled(this);
signalHandlersInstalled_ = wasm::EnsureSignalHandlersInstalled(this);
canUseSignalHandlers_ = signalHandlersInstalled_ && !SignalBasedTriggersDisabled();
if (!spsProfiler.init())

View File

@ -24,7 +24,7 @@
#include "jsscript.h"
#ifdef XP_DARWIN
# include "asmjs/AsmJSSignalHandlers.h"
# include "asmjs/WasmSignalHandlers.h"
#endif
#include "builtin/AtomicsObject.h"
#include "ds/FixedSizeHash.h"
@ -89,7 +89,6 @@ ReportOverRecursed(ExclusiveContext* cx);
class Activation;
class ActivationIterator;
class AsmJSActivation;
class AsmJSModule;
class MathCache;
namespace jit {
@ -106,6 +105,10 @@ class Simulator;
#endif
} // namespace jit
namespace wasm {
class Module;
} // namespace wasm
/*
* GetSrcNote cache to avoid O(n^2) growth in finding a source note for a
* given pc in a script. We use the script->code pointer to tag the cache,
@ -1146,7 +1149,7 @@ struct JSRuntime : public JS::shadow::Runtime,
void* data;
#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
js::AsmJSMachExceptionHandler asmJSMachExceptionHandler;
js::wasm::MachExceptionHandler wasmMachExceptionHandler;
#endif
private:
@ -1187,8 +1190,8 @@ struct JSRuntime : public JS::shadow::Runtime,
/* AsmJSCache callbacks are runtime-wide. */
JS::AsmJSCacheOps asmJSCacheOps;
/* Head of the linked list of linked asm.js modules. */
js::AsmJSModule* linkedAsmJSModules;
/* Head of the linked list of linked wasm modules. */
js::wasm::Module* linkedWasmModules;
/*
* The propertyRemovals counter is incremented for every JSObject::clear,

View File

@ -28,6 +28,8 @@
#include "jsobjinlines.h"
#include "vm/NativeObject-inl.h"
using namespace js;
static inline void*

View File

@ -1023,12 +1023,6 @@ InterpreterActivation::resumeGeneratorFrame(HandleFunction callee, HandleValue n
return true;
}
inline JSContext*
AsmJSActivation::cx()
{
return cx_->asJSContext();
}
inline bool
FrameIter::hasCachedSavedFrame() const
{

View File

@ -10,8 +10,8 @@
#include "jscntxt.h"
#include "asmjs/AsmJSFrameIterator.h"
#include "asmjs/AsmJSModule.h"
#include "asmjs/WasmFrameIterator.h"
#include "gc/Marking.h"
#include "jit/BaselineFrame.h"
#include "jit/JitcodeMap.h"
@ -608,7 +608,7 @@ FrameIter::settleOnActivation()
}
if (activation->isAsmJS()) {
data_.asmJSFrames_ = AsmJSFrameIterator(*data_.activations_->asAsmJS());
data_.asmJSFrames_ = wasm::FrameIterator(*data_.activations_->asAsmJS());
if (data_.asmJSFrames_.done()) {
++data_.activations_;
@ -986,7 +986,7 @@ FrameIter::scriptFilename() const
case JIT:
return script()->filename();
case ASMJS:
return data_.activations_->asAsmJS()->module().scriptSource()->filename();
return data_.activations_->asAsmJS()->module().wasm().filename();
}
MOZ_CRASH("Unexpected state");
@ -1744,12 +1744,12 @@ AsmJSActivation::AsmJSActivation(JSContext* cx, AsmJSModule& module)
entrySP_(nullptr),
resumePC_(nullptr),
fp_(nullptr),
packedExitReason_(wasm::ExitReason(wasm::ExitReason::None).pack())
exitReason_(wasm::ExitReason::None)
{
(void) entrySP_; // squelch GCC warning
prevAsmJSForModule_ = module.activation();
module.activation() = this;
prevAsmJSForModule_ = module.wasm().activation();
module.wasm().activation() = this;
prevAsmJS_ = cx->runtime()->asmJSActivationStack_;
cx->runtime()->asmJSActivationStack_ = this;
@ -1766,8 +1766,8 @@ AsmJSActivation::~AsmJSActivation()
MOZ_ASSERT(fp_ == nullptr);
MOZ_ASSERT(module_.activation() == this);
module_.activation() = prevAsmJSForModule_;
MOZ_ASSERT(module_.wasm().activation() == this);
module_.wasm().activation() = prevAsmJSForModule_;
JSContext* cx = cx_->asJSContext();
MOZ_ASSERT(cx->runtime()->asmJSActivationStack_ == this);
@ -1860,7 +1860,7 @@ JS::ProfilingFrameIterator::ProfilingFrameIterator(JSRuntime* rt, const Register
MOZ_ASSERT(activation_->isProfiling());
static_assert(sizeof(AsmJSProfilingFrameIterator) <= StorageSpace &&
static_assert(sizeof(wasm::ProfilingFrameIterator) <= StorageSpace &&
sizeof(jit::JitProfilingFrameIterator) <= StorageSpace,
"Need to increase storage");
@ -1916,7 +1916,7 @@ JS::ProfilingFrameIterator::iteratorConstruct(const RegisterState& state)
MOZ_ASSERT(activation_->isAsmJS() || activation_->isJit());
if (activation_->isAsmJS()) {
new (storage_.addr()) AsmJSProfilingFrameIterator(*activation_->asAsmJS(), state);
new (storage_.addr()) wasm::ProfilingFrameIterator(*activation_->asAsmJS(), state);
// Set savedPrevJitTop_ to the actual jitTop_ from the runtime.
savedPrevJitTop_ = activation_->cx()->runtime()->jitTop;
return;
@ -1933,7 +1933,7 @@ JS::ProfilingFrameIterator::iteratorConstruct()
MOZ_ASSERT(activation_->isAsmJS() || activation_->isJit());
if (activation_->isAsmJS()) {
new (storage_.addr()) AsmJSProfilingFrameIterator(*activation_->asAsmJS());
new (storage_.addr()) wasm::ProfilingFrameIterator(*activation_->asAsmJS());
return;
}
@ -1949,7 +1949,7 @@ JS::ProfilingFrameIterator::iteratorDestroy()
MOZ_ASSERT(activation_->isAsmJS() || activation_->isJit());
if (activation_->isAsmJS()) {
asmJSIter().~AsmJSProfilingFrameIterator();
asmJSIter().~ProfilingFrameIterator();
return;
}

View File

@ -16,7 +16,7 @@
#include "jsscript.h"
#include "jsutil.h"
#include "asmjs/AsmJSFrameIterator.h"
#include "asmjs/WasmFrameIterator.h"
#include "gc/Rooting.h"
#include "jit/JitFrameIterator.h"
#ifdef CHECK_OSIPOINT_REGISTERS
@ -1804,13 +1804,12 @@ class AsmJSActivation : public Activation
void* entrySP_;
void* resumePC_;
uint8_t* fp_;
uint32_t packedExitReason_;
wasm::ExitReason exitReason_;
public:
AsmJSActivation(JSContext* cx, AsmJSModule& module);
~AsmJSActivation();
inline JSContext* cx();
AsmJSModule& module() const { return module_; }
AsmJSActivation* prevAsmJS() const { return prevAsmJS_; }
@ -1823,7 +1822,7 @@ class AsmJSActivation : public Activation
uint8_t* fp() const { return fp_; }
// Returns the reason why asm.js code called out of asm.js code.
wasm::ExitReason exitReason() const { return wasm::ExitReason::unpack(packedExitReason_); }
wasm::ExitReason exitReason() const { return exitReason_; }
// Read by JIT code:
static unsigned offsetOfContext() { return offsetof(AsmJSActivation, cx_); }
@ -1832,7 +1831,7 @@ class AsmJSActivation : public Activation
// Written by JIT code:
static unsigned offsetOfEntrySP() { return offsetof(AsmJSActivation, entrySP_); }
static unsigned offsetOfFP() { return offsetof(AsmJSActivation, fp_); }
static unsigned offsetOfPackedExitReason() { return offsetof(AsmJSActivation, packedExitReason_); }
static unsigned offsetOfExitReason() { return offsetof(AsmJSActivation, exitReason_); }
// Read/written from SIGSEGV handler:
void setResumePC(void* pc) { resumePC_ = pc; }
@ -1889,7 +1888,7 @@ class FrameIter
jit::JitFrameIterator jitFrames_;
unsigned ionInlineFrameNo_;
AsmJSFrameIterator asmJSFrames_;
wasm::FrameIterator asmJSFrames_;
Data(JSContext* cx, SavedOption savedOption, ContextOption contextOption,
DebuggerEvalOption debuggerEvalOption, JSPrincipals* principals);