From 7963ee4e6d10682f0eaa07519af654e46d853391 Mon Sep 17 00:00:00 2001 From: Luke Wagner Date: Mon, 28 Dec 2015 17:39:21 -0600 Subject: [PATCH] Bug 1229642 - Split wasm::Module out of AsmJSModule (r=bbouvier) --- js/public/ProfilingFrameIterator.h | 12 +- js/src/asmjs/AsmJSFrameIterator.h | 160 -- js/src/asmjs/AsmJSLink.cpp | 294 +-- js/src/asmjs/AsmJSModule.cpp | 1945 +---------------- js/src/asmjs/AsmJSModule.h | 995 +-------- js/src/asmjs/AsmJSValidate.cpp | 353 +-- js/src/asmjs/AsmJSValidate.h | 1 - js/src/asmjs/WasmCompileArgs.h | 42 - ...rameIterator.cpp => WasmFrameIterator.cpp} | 484 ++-- js/src/asmjs/WasmFrameIterator.h | 121 + js/src/asmjs/WasmGenerator.cpp | 598 +++-- js/src/asmjs/WasmGenerator.h | 126 +- js/src/asmjs/WasmIR.h | 11 +- js/src/asmjs/WasmIonCompile.cpp | 78 +- js/src/asmjs/WasmIonCompile.h | 55 +- js/src/asmjs/WasmModule.cpp | 1368 ++++++++++++ js/src/asmjs/WasmModule.h | 569 +++++ js/src/asmjs/WasmSerialize.h | 350 +++ ...nalHandlers.cpp => WasmSignalHandlers.cpp} | 58 +- ...SSignalHandlers.h => WasmSignalHandlers.h} | 23 +- js/src/asmjs/WasmStubs.cpp | 391 ++-- js/src/asmjs/WasmStubs.h | 17 +- js/src/asmjs/WasmTypes.cpp | 292 +++ js/src/asmjs/{Wasm.h => WasmTypes.h} | 217 +- js/src/builtin/AtomicsObject.cpp | 6 +- js/src/builtin/WeakSetObject.cpp | 1 + js/src/frontend/ParseNode.h | 1 + js/src/jit-test/tests/asm.js/testProfiling.js | 8 +- js/src/jit/BaselineJIT.cpp | 55 +- js/src/jit/BaselineJIT.h | 28 +- js/src/jit/CodeGenerator.cpp | 8 +- js/src/jit/CodeGenerator.h | 2 +- js/src/jit/Ion.cpp | 2 + js/src/jit/Linker.h | 2 +- js/src/jit/MIR.h | 6 +- js/src/jit/MIRGraph.cpp | 1 - js/src/jit/MacroAssembler.h | 5 +- js/src/jit/arm/Assembler-arm.cpp | 2 +- js/src/jit/arm/MacroAssembler-arm.cpp | 2 +- js/src/jit/arm64/MacroAssembler-arm64.h | 2 +- js/src/jit/mips32/MacroAssembler-mips32.cpp | 2 +- js/src/jit/mips32/Simulator-mips32.cpp | 1 - js/src/jit/mips64/MacroAssembler-mips64.cpp | 2 +- js/src/jit/mips64/Simulator-mips64.cpp | 1 - js/src/jit/shared/Assembler-shared.h | 24 +- js/src/jit/shared/CodeGenerator-shared.cpp | 2 +- js/src/jit/x64/Assembler-x64.h | 2 +- js/src/jit/x64/CodeGenerator-x64.cpp | 10 - js/src/jit/x86/Assembler-x86.h | 6 +- js/src/jsopcode.cpp | 1 - js/src/jsscript.h | 3 + js/src/moz.build | 6 +- js/src/vm/HelperThreads.cpp | 10 +- js/src/vm/HelperThreads.h | 20 +- js/src/vm/Runtime.cpp | 6 +- js/src/vm/Runtime.h | 13 +- js/src/vm/SharedArrayObject.cpp | 2 + js/src/vm/Stack-inl.h | 6 - js/src/vm/Stack.cpp | 24 +- js/src/vm/Stack.h | 11 +- 60 files changed, 4535 insertions(+), 4308 deletions(-) delete mode 100644 js/src/asmjs/AsmJSFrameIterator.h delete mode 100644 js/src/asmjs/WasmCompileArgs.h rename js/src/asmjs/{AsmJSFrameIterator.cpp => WasmFrameIterator.cpp} (58%) create mode 100644 js/src/asmjs/WasmFrameIterator.h create mode 100644 js/src/asmjs/WasmModule.cpp create mode 100644 js/src/asmjs/WasmModule.h create mode 100644 js/src/asmjs/WasmSerialize.h rename js/src/asmjs/{AsmJSSignalHandlers.cpp => WasmSignalHandlers.cpp} (96%) rename js/src/asmjs/{AsmJSSignalHandlers.h => WasmSignalHandlers.h} (86%) create mode 100644 js/src/asmjs/WasmTypes.cpp rename js/src/asmjs/{Wasm.h => WasmTypes.h} (81%) diff --git a/js/public/ProfilingFrameIterator.h b/js/public/ProfilingFrameIterator.h index 1a79687eac2..4c22e568c3a 100644 --- a/js/public/ProfilingFrameIterator.h +++ b/js/public/ProfilingFrameIterator.h @@ -19,12 +19,14 @@ class JSScript; namespace js { class Activation; - class AsmJSProfilingFrameIterator; namespace jit { class JitActivation; class JitProfilingFrameIterator; class JitcodeGlobalEntry; } // namespace jit + namespace wasm { + class ProfilingFrameIterator; + } // namespace wasm } // namespace js namespace JS { @@ -49,15 +51,15 @@ class JS_PUBLIC_API(ProfilingFrameIterator) static const unsigned StorageSpace = 8 * sizeof(void*); mozilla::AlignedStorage storage_; - js::AsmJSProfilingFrameIterator& asmJSIter() { + js::wasm::ProfilingFrameIterator& asmJSIter() { MOZ_ASSERT(!done()); MOZ_ASSERT(isAsmJS()); - return *reinterpret_cast(storage_.addr()); + return *reinterpret_cast(storage_.addr()); } - const js::AsmJSProfilingFrameIterator& asmJSIter() const { + const js::wasm::ProfilingFrameIterator& asmJSIter() const { MOZ_ASSERT(!done()); MOZ_ASSERT(isAsmJS()); - return *reinterpret_cast(storage_.addr()); + return *reinterpret_cast(storage_.addr()); } js::jit::JitProfilingFrameIterator& jitIter() { diff --git a/js/src/asmjs/AsmJSFrameIterator.h b/js/src/asmjs/AsmJSFrameIterator.h deleted file mode 100644 index 4bf1d888be6..00000000000 --- a/js/src/asmjs/AsmJSFrameIterator.h +++ /dev/null @@ -1,160 +0,0 @@ -/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- - * vim: set ts=8 sts=4 et sw=4 tw=99: - * - * Copyright 2014 Mozilla Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef asmjs_AsmJSFrameIterator_h -#define asmjs_AsmJSFrameIterator_h - -#include - -#include "asmjs/Wasm.h" -#include "js/ProfilingFrameIterator.h" - -class JSAtom; - -namespace js { - -class AsmJSActivation; -class AsmJSModule; -namespace jit { class MacroAssembler; class Label; } -namespace wasm { class CallSite; } - -// Iterates over the frames of a single AsmJSActivation, called synchronously -// from C++ in the thread of the asm.js. The one exception is that this iterator -// may be called from the interrupt callback which may be called asynchronously -// from asm.js code; in this case, the backtrace may not be correct. -class AsmJSFrameIterator -{ - const AsmJSModule* module_; - const wasm::CallSite* callsite_; - uint8_t* fp_; - - // Really, a const AsmJSModule::CodeRange*, but no forward declarations of - // nested classes, so use void* to avoid pulling in all of AsmJSModule.h. - const void* codeRange_; - - void settle(); - - public: - explicit AsmJSFrameIterator() : module_(nullptr) {} - explicit AsmJSFrameIterator(const AsmJSActivation& activation); - void operator++(); - bool done() const { return !fp_; } - JSAtom* functionDisplayAtom() const; - unsigned computeLine(uint32_t* column) const; -}; - -// Iterates over the frames of a single AsmJSActivation, given an -// asynchrously-interrupted thread's state. If the activation's -// module is not in profiling mode, the activation is skipped. -class AsmJSProfilingFrameIterator -{ - const AsmJSModule* module_; - uint8_t* callerFP_; - void* callerPC_; - void* stackAddress_; - wasm::ExitReason exitReason_; - - // Really, a const AsmJSModule::CodeRange*, but no forward declarations of - // nested classes, so use void* to avoid pulling in all of AsmJSModule.h. - const void* codeRange_; - - void initFromFP(const AsmJSActivation& activation); - - public: - AsmJSProfilingFrameIterator() : codeRange_(nullptr) {} - explicit AsmJSProfilingFrameIterator(const AsmJSActivation& activation); - AsmJSProfilingFrameIterator(const AsmJSActivation& activation, - const JS::ProfilingFrameIterator::RegisterState& state); - void operator++(); - bool done() const { return !codeRange_; } - - void* stackAddress() const { MOZ_ASSERT(!done()); return stackAddress_; } - const char* label() const; -}; - -/******************************************************************************/ -// Prologue/epilogue code generation. - -struct AsmJSOffsets -{ - MOZ_IMPLICIT AsmJSOffsets(uint32_t begin = 0, - uint32_t end = 0) - : begin(begin), end(end) - {} - - // These define a [begin, end) contiguous range of instructions compiled - // into an AsmJSModule::CodeRange. - uint32_t begin; - uint32_t end; -}; - -struct AsmJSProfilingOffsets : AsmJSOffsets -{ - MOZ_IMPLICIT AsmJSProfilingOffsets(uint32_t profilingReturn = 0) - : AsmJSOffsets(), profilingReturn(profilingReturn) - {} - - // For CodeRanges with AsmJSProfilingOffsets, 'begin' is the offset of the - // profiling entry. - uint32_t profilingEntry() const { return begin; } - - // The profiling return is the offset of the return instruction, which - // precedes the 'end' by a variable number of instructions due to - // out-of-line codegen. - uint32_t profilingReturn; -}; - -struct AsmJSFunctionOffsets : AsmJSProfilingOffsets -{ - MOZ_IMPLICIT AsmJSFunctionOffsets(uint32_t nonProfilingEntry = 0, - uint32_t profilingJump = 0, - uint32_t profilingEpilogue = 0) - : AsmJSProfilingOffsets(), - nonProfilingEntry(nonProfilingEntry), - profilingJump(profilingJump), - profilingEpilogue(profilingEpilogue) - {} - - // Function CodeRanges have an additional non-profiling entry that comes - // after the profiling entry and a non-profiling epilogue that comes before - // the profiling epilogue. - uint32_t nonProfilingEntry; - - // When profiling is enabled, the 'nop' at offset 'profilingJump' is - // overwritten to be a jump to 'profilingEpilogue'. - uint32_t profilingJump; - uint32_t profilingEpilogue; -}; - -void -GenerateAsmJSExitPrologue(jit::MacroAssembler& masm, unsigned framePushed, wasm::ExitReason reason, - AsmJSProfilingOffsets* offsets, jit::Label* maybeEntry = nullptr); -void -GenerateAsmJSExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed, wasm::ExitReason reason, - AsmJSProfilingOffsets* offsets); - -void -GenerateAsmJSFunctionPrologue(jit::MacroAssembler& masm, unsigned framePushed, - AsmJSFunctionOffsets* offsets); -void -GenerateAsmJSFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed, - AsmJSFunctionOffsets* offsets); - -} // namespace js - -#endif // asmjs_AsmJSFrameIterator_h diff --git a/js/src/asmjs/AsmJSLink.cpp b/js/src/asmjs/AsmJSLink.cpp index 7dc0276feba..aa13cb9623e 100644 --- a/js/src/asmjs/AsmJSLink.cpp +++ b/js/src/asmjs/AsmJSLink.cpp @@ -20,10 +20,6 @@ #include "mozilla/PodOperations.h" -#ifdef MOZ_VTUNE -# include "vtune/VTuneWrapper.h" -#endif - #include "jscntxt.h" #include "jsmath.h" #include "jsprf.h" @@ -35,9 +31,6 @@ #include "frontend/BytecodeCompiler.h" #include "jit/Ion.h" #include "jit/JitCommon.h" -#ifdef JS_ION_PERF -# include "jit/PerfSpewer.h" -#endif #include "vm/ArrayBufferObject.h" #include "vm/SharedArrayObject.h" #include "vm/StringBuffer.h" @@ -54,21 +47,6 @@ using namespace js::wasm; using mozilla::IsNaN; using mozilla::PodZero; -static bool -CloneModule(JSContext* cx, MutableHandle moduleObj) -{ - ScopedJSDeletePtr module; - if (!moduleObj->module().clone(cx, &module)) - return false; - - AsmJSModuleObject* newModuleObj = AsmJSModuleObject::create(cx, &module); - if (!newModuleObj) - return false; - - moduleObj.set(newModuleObj); - return true; -} - static bool LinkFail(JSContext* cx, const char* str) { @@ -127,10 +105,10 @@ HasPureCoercion(JSContext* cx, HandleValue v) } static bool -ValidateGlobalVariable(JSContext* cx, const AsmJSModule& module, AsmJSModule::Global& global, +ValidateGlobalVariable(JSContext* cx, const AsmJSModule::Global& global, uint8_t* globalData, HandleValue importVal) { - void* datum = module.globalData() + global.varGlobalDataOffset(); + void* datum = globalData + global.varGlobalDataOffset(); switch (global.varInitKind()) { case AsmJSModule::Global::InitConstant: { @@ -214,8 +192,8 @@ ValidateGlobalVariable(JSContext* cx, const AsmJSModule& module, AsmJSModule::Gl } static bool -ValidateFFI(JSContext* cx, AsmJSModule::Global& global, HandleValue importVal, - AutoObjectVector* ffis) +ValidateFFI(JSContext* cx, const AsmJSModule::Global& global, HandleValue importVal, + AutoVectorRooter* ffis) { RootedPropertyName field(cx, global.ffiField()); RootedValue v(cx); @@ -230,7 +208,7 @@ ValidateFFI(JSContext* cx, AsmJSModule::Global& global, HandleValue importVal, } static bool -ValidateArrayView(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal) +ValidateArrayView(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal) { RootedPropertyName field(cx, global.maybeViewName()); if (!field) @@ -272,7 +250,7 @@ ValidateByteLength(JSContext* cx, HandleValue globalVal) } static bool -ValidateMathBuiltinFunction(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal) +ValidateMathBuiltinFunction(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal) { RootedValue v(cx); if (!GetDataProperty(cx, globalVal, cx->names().Math, &v)) @@ -334,7 +312,7 @@ AsmJSSimdTypeToTypeDescrType(AsmJSSimdType type) } static bool -ValidateSimdType(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal, +ValidateSimdType(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal, MutableHandleValue out) { RootedValue v(cx); @@ -366,14 +344,14 @@ ValidateSimdType(JSContext* cx, AsmJSModule::Global& global, HandleValue globalV } static bool -ValidateSimdType(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal) +ValidateSimdType(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal) { RootedValue _(cx); return ValidateSimdType(cx, global, globalVal, &_); } static bool -ValidateSimdOperation(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal) +ValidateSimdOperation(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal) { // SIMD operations are loaded from the SIMD type, so the type must have been // validated before the operation. @@ -426,7 +404,7 @@ ValidateSimdOperation(JSContext* cx, AsmJSModule::Global& global, HandleValue gl } static bool -ValidateAtomicsBuiltinFunction(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal) +ValidateAtomicsBuiltinFunction(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal) { RootedValue v(cx); if (!GetDataProperty(cx, globalVal, cx->names().Atomics, &v)) @@ -457,7 +435,7 @@ ValidateAtomicsBuiltinFunction(JSContext* cx, AsmJSModule::Global& global, Handl } static bool -ValidateConstant(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal) +ValidateConstant(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal) { RootedPropertyName field(cx, global.constantName()); RootedValue v(cx, globalVal); @@ -486,12 +464,20 @@ ValidateConstant(JSContext* cx, AsmJSModule::Global& global, HandleValue globalV } static bool -LinkModuleToHeap(JSContext* cx, AsmJSModule& module, Handle heap) +CheckBuffer(JSContext* cx, AsmJSModule& module, HandleValue bufferVal, + MutableHandle buffer) { - uint32_t heapLength = heap->byteLength(); + if (module.isSharedView() && !IsSharedArrayBuffer(bufferVal)) + return LinkFail(cx, "shared views can only be constructed onto SharedArrayBuffer"); + + if (!module.isSharedView() && !IsArrayBuffer(bufferVal)) + return LinkFail(cx, "unshared views can only be constructed onto ArrayBuffer"); + + buffer.set(&AsAnyArrayBuffer(bufferVal)); + uint32_t heapLength = buffer->byteLength(); if (!IsValidAsmJSHeapLength(heapLength)) { - ScopedJSFreePtr msg( + UniqueChars msg( JS_smprintf("ArrayBuffer byteLength 0x%x is not a valid heap length. The next " "valid length is 0x%x", heapLength, @@ -503,7 +489,7 @@ LinkModuleToHeap(JSContext* cx, AsmJSModule& module, Handle msg( + UniqueChars msg( JS_smprintf("ArrayBuffer byteLength of 0x%x is less than 0x%x (the size implied " "by const heap accesses and/or change-heap minimum-length requirements).", heapLength, @@ -512,60 +498,48 @@ LinkModuleToHeap(JSContext* cx, AsmJSModule& module, Handle module.maxHeapLength()) { - ScopedJSFreePtr msg( + UniqueChars msg( JS_smprintf("ArrayBuffer byteLength 0x%x is greater than maximum length of 0x%x", heapLength, module.maxHeapLength())); return LinkFail(cx, msg.get()); } - // If we've generated the code with signal handlers in mind (for bounds - // checks on x64 and for interrupt callback requesting on all platforms), - // we need to be able to use signals at runtime. In particular, a module - // can have been created using signals and cached, and executed without - // signals activated. - if (module.usesSignalHandlersForInterrupt() && !cx->canUseSignalHandlers()) - return LinkFail(cx, "Code generated with signal handlers but signals are deactivated"); + // Shell builtins may have disabled signal handlers since the module we're + // cloning was compiled. LookupAsmJSModuleInCache checks for signal handlers + // as well for the caching case. + if (module.wasm().compileArgs() != CompileArgs(cx)) + return LinkFail(cx, "Signals have been toggled since compilation"); - if (heap->is()) { - Rooted abheap(cx, &heap->as()); - if (!ArrayBufferObject::prepareForAsmJS(cx, abheap, module.usesSignalHandlersForOOB())) + if (buffer->is()) { + Rooted abheap(cx, &buffer->as()); + bool useSignalHandlers = module.wasm().compileArgs().useSignalHandlersForOOB; + if (!ArrayBufferObject::prepareForAsmJS(cx, abheap, useSignalHandlers)) return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use"); } - module.initHeap(heap, cx); return true; } static bool DynamicallyLinkModule(JSContext* cx, const CallArgs& args, AsmJSModule& module) { - module.setIsDynamicallyLinked(cx->runtime()); - HandleValue globalVal = args.get(0); HandleValue importVal = args.get(1); HandleValue bufferVal = args.get(2); - Rooted heap(cx); - if (module.hasArrayView()) { - if (module.isSharedView() && !IsSharedArrayBuffer(bufferVal)) - return LinkFail(cx, "shared views can only be constructed onto SharedArrayBuffer"); - if (!module.isSharedView() && !IsArrayBuffer(bufferVal)) - return LinkFail(cx, "unshared views can only be constructed onto ArrayBuffer"); - heap = &AsAnyArrayBuffer(bufferVal); - if (!LinkModuleToHeap(cx, module, heap)) - return false; - } + Rooted buffer(cx); + if (module.hasArrayView() && !CheckBuffer(cx, module, bufferVal, &buffer)) + return false; - AutoObjectVector ffis(cx); + AutoVectorRooter ffis(cx); if (!ffis.resize(module.numFFIs())) return false; - for (unsigned i = 0; i < module.numGlobals(); i++) { - AsmJSModule::Global& global = module.global(i); + for (const AsmJSModule::Global& global : module.globals()) { switch (global.which()) { case AsmJSModule::Global::Variable: - if (!ValidateGlobalVariable(cx, module, global, importVal)) + if (!ValidateGlobalVariable(cx, global, module.wasm().globalData(), importVal)) return false; break; case AsmJSModule::Global::FFI: @@ -604,15 +578,13 @@ DynamicallyLinkModule(JSContext* cx, const CallArgs& args, AsmJSModule& module) } } - for (unsigned i = 0; i < module.numExits(); i++) { - const AsmJSModule::Exit& exit = module.exit(i); - exit.datum(module).fun = &ffis[exit.ffiIndex()]->as(); + AutoVectorRooter imports(cx); + for (const AsmJSModule::Import& import : module.imports()) { + if (!imports.append(ffis[import.ffiIndex()])) + return false; } - // See the comment in AllocateExecutableMemory. - ExecutableAllocator::makeExecutable(module.codeBase(), module.codeBytes()); - - return true; + return module.wasm().dynamicallyLink(cx, buffer, imports); } static bool @@ -641,10 +613,11 @@ ChangeHeap(JSContext* cx, AsmJSModule& module, const CallArgs& args) MOZ_ASSERT(IsValidAsmJSHeapLength(heapLength)); - if (!ArrayBufferObject::prepareForAsmJS(cx, newBuffer, module.usesSignalHandlersForOOB())) + bool useSignalHandlers = module.wasm().compileArgs().useSignalHandlersForOOB; + if (!ArrayBufferObject::prepareForAsmJS(cx, newBuffer, useSignalHandlers)) return false; - args.rval().set(BooleanValue(module.changeHeap(newBuffer, cx))); + args.rval().set(BooleanValue(module.wasm().changeHeap(newBuffer, cx))); return true; } @@ -655,20 +628,13 @@ static const unsigned ASM_MODULE_SLOT = 0; static const unsigned ASM_EXPORT_INDEX_SLOT = 1; static unsigned -FunctionToExportedFunctionIndex(HandleFunction fun) +FunctionToExportIndex(HandleFunction fun) { MOZ_ASSERT(IsAsmJSFunction(fun)); Value v = fun->getExtendedSlot(ASM_EXPORT_INDEX_SLOT); return v.toInt32(); } -static const AsmJSModule::ExportedFunction& -FunctionToExportedFunction(HandleFunction fun, AsmJSModule& module) -{ - unsigned funIndex = FunctionToExportedFunctionIndex(fun); - return module.exportedFunction(funIndex); -} - static AsmJSModule& FunctionToEnclosingModule(HandleFunction fun) { @@ -681,12 +647,15 @@ CallAsmJS(JSContext* cx, unsigned argc, Value* vp) { CallArgs callArgs = CallArgsFromVp(argc, vp); RootedFunction callee(cx, &callArgs.callee().as()); - AsmJSModule& module = FunctionToEnclosingModule(callee); - const AsmJSModule::ExportedFunction& func = FunctionToExportedFunction(callee, module); // The heap-changing function is a special-case and is implemented by C++. - if (func.isChangeHeap()) - return ChangeHeap(cx, module, callArgs); + AsmJSModule& asmJSModule = FunctionToEnclosingModule(callee); + const AsmJSModule::Export& asmJSFunc = asmJSModule.exports()[FunctionToExportIndex(callee)]; + if (asmJSFunc.isChangeHeap()) + return ChangeHeap(cx, asmJSModule, callArgs); + + Module& module = asmJSModule.wasm(); + const Export& func = module.exports()[asmJSFunc.wasmIndex()]; // Enable/disable profiling in the asm.js module to match the current global // profiling state. Don't do this if the module is already active on the @@ -703,7 +672,7 @@ CallAsmJS(JSContext* cx, unsigned argc, Value* vp) // registers and stack memory and then calls into the internal entry point. // The return value is stored in the first element of the array (which, // therefore, must have length >= 1). - js::Vector coercedArgs(cx); + Vector coercedArgs(cx); if (!coercedArgs.resize(Max(1, func.sig().args().length()))) return false; @@ -767,11 +736,11 @@ CallAsmJS(JSContext* cx, unsigned argc, Value* vp) // that the optimized asm.js-to-Ion FFI call path (which we want to be // very fast) can avoid doing so. The JitActivation is marked as // inactive so stack iteration will skip over it. - AsmJSActivation activation(cx, module); + AsmJSActivation activation(cx, asmJSModule); JitActivation jitActivation(cx, /* active */ false); // Call the per-exported-function trampoline created by GenerateEntry. - AsmJSModule::CodePtr enter = module.entryTrampoline(func); + Module::EntryFuncPtr enter = module.entryTrampoline(func); if (!CALL_GENERATED_2(enter, coercedArgs.begin(), module.globalData())) return false; } @@ -826,11 +795,14 @@ CallAsmJS(JSContext* cx, unsigned argc, Value* vp) } static JSFunction* -NewExportedFunction(JSContext* cx, const AsmJSModule::ExportedFunction& func, +NewExportedFunction(JSContext* cx, const AsmJSModule& module, const AsmJSModule::Export& func, HandleObject moduleObj, unsigned exportIndex) { + unsigned numArgs = func.isChangeHeap() + ? 1 + : module.wasm().exports()[func.wasmIndex()].sig().args().length(); + RootedPropertyName name(cx, func.name()); - unsigned numArgs = func.isChangeHeap() ? 1 : func.sig().args().length(); JSFunction* fun = NewNativeConstructor(cx, CallAsmJS, numArgs, name, gc::AllocKind::FUNCTION_EXTENDED, GenericObject, @@ -850,10 +822,12 @@ HandleDynamicLinkFailure(JSContext* cx, const CallArgs& args, AsmJSModule& modul if (cx->isExceptionPending()) return false; + ScriptSource* source = module.scriptSource(); + // Source discarding is allowed to affect JS semantics because it is never // enabled for normal JS content. - bool haveSource = module.scriptSource()->hasSourceData(); - if (!haveSource && !JSScript::loadSource(cx, module.scriptSource(), &haveSource)) + bool haveSource = source->hasSourceData(); + if (!haveSource && !JSScript::loadSource(cx, source, &haveSource)) return false; if (!haveSource) { JS_ReportError(cx, "asm.js link failure with source discarding enabled"); @@ -862,7 +836,7 @@ HandleDynamicLinkFailure(JSContext* cx, const CallArgs& args, AsmJSModule& modul uint32_t begin = module.srcBodyStart(); // starts right after 'use asm' uint32_t end = module.srcEndBeforeCurly(); - Rooted src(cx, module.scriptSource()->substringDontDeflate(cx, begin, end)); + Rooted src(cx, source->substringDontDeflate(cx, begin, end)); if (!src) return false; @@ -884,8 +858,8 @@ HandleDynamicLinkFailure(JSContext* cx, const CallArgs& args, AsmJSModule& modul formals.infallibleAppend(module.bufferArgumentName()); CompileOptions options(cx); - options.setMutedErrors(module.scriptSource()->mutedErrors()) - .setFile(module.scriptSource()->filename()) + options.setMutedErrors(source->mutedErrors()) + .setFile(source->filename()) .setNoScriptRval(false); // The exported function inherits an implicit strict context if the module @@ -910,112 +884,27 @@ HandleDynamicLinkFailure(JSContext* cx, const CallArgs& args, AsmJSModule& modul return Invoke(cx, args, args.isConstructing() ? CONSTRUCT : NO_CONSTRUCT); } -#ifdef MOZ_VTUNE -static bool -SendFunctionsToVTune(JSContext* cx, AsmJSModule& module) -{ - uint8_t* base = module.codeBase(); - - for (unsigned i = 0; i < module.numProfiledFunctions(); i++) { - const AsmJSModule::ProfiledFunction& func = module.profiledFunction(i); - - uint8_t* start = base + func.pod.startCodeOffset; - uint8_t* end = base + func.pod.endCodeOffset; - MOZ_ASSERT(end >= start); - - unsigned method_id = iJIT_GetNewMethodID(); - if (method_id == 0) - return false; - - JSAutoByteString bytes; - const char* method_name = AtomToPrintableString(cx, func.name, &bytes); - if (!method_name) - return false; - - iJIT_Method_Load method; - method.method_id = method_id; - method.method_name = const_cast(method_name); - method.method_load_address = (void*)start; - method.method_size = unsigned(end - start); - method.line_number_size = 0; - method.line_number_table = nullptr; - method.class_id = 0; - method.class_file_name = nullptr; - method.source_file_name = nullptr; - - iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, (void*)&method); - } - - return true; -} -#endif - -#ifdef JS_ION_PERF -static bool -SendFunctionsToPerf(JSContext* cx, AsmJSModule& module) -{ - if (!PerfFuncEnabled()) - return true; - - uintptr_t base = (uintptr_t) module.codeBase(); - const char* filename = module.scriptSource()->filename(); - - for (unsigned i = 0; i < module.numProfiledFunctions(); i++) { - const AsmJSModule::ProfiledFunction& func = module.profiledFunction(i); - uintptr_t start = base + (unsigned long) func.pod.startCodeOffset; - uintptr_t end = base + (unsigned long) func.pod.endCodeOffset; - MOZ_ASSERT(end >= start); - size_t size = end - start; - - JSAutoByteString bytes; - const char* name = AtomToPrintableString(cx, func.name, &bytes); - if (!name) - return false; - - writePerfSpewerAsmJSFunctionMap(start, size, filename, func.pod.lineno, - func.pod.columnIndex, name); - } - - return true; -} -#endif - -static bool -SendModuleToAttachedProfiler(JSContext* cx, AsmJSModule& module) -{ -#if defined(MOZ_VTUNE) - if (IsVTuneProfilingActive() && !SendFunctionsToVTune(cx, module)) - return false; -#endif -#if defined(JS_ION_PERF) - if (!SendFunctionsToPerf(cx, module)) - return false; -#endif - - return true; -} - - static JSObject* -CreateExportObject(JSContext* cx, Handle moduleObj) +CreateExportObject(JSContext* cx, HandleAsmJSModule moduleObj) { AsmJSModule& module = moduleObj->module(); + const AsmJSModule::ExportVector& exports = module.exports(); - if (module.numExportedFunctions() == 1) { - const AsmJSModule::ExportedFunction& func = module.exportedFunction(0); + if (exports.length() == 1) { + const AsmJSModule::Export& func = exports[0]; if (!func.maybeFieldName()) - return NewExportedFunction(cx, func, moduleObj, 0); + return NewExportedFunction(cx, module, func, moduleObj, 0); } - gc::AllocKind allocKind = gc::GetGCObjectKind(module.numExportedFunctions()); + gc::AllocKind allocKind = gc::GetGCObjectKind(exports.length()); RootedPlainObject obj(cx, NewBuiltinClassInstance(cx, allocKind)); if (!obj) return nullptr; - for (unsigned i = 0; i < module.numExportedFunctions(); i++) { - const AsmJSModule::ExportedFunction& func = module.exportedFunction(i); + for (unsigned i = 0; i < exports.length(); i++) { + const AsmJSModule::Export& func = exports[i]; - RootedFunction fun(cx, NewExportedFunction(cx, func, moduleObj, i)); + RootedFunction fun(cx, NewExportedFunction(cx, module, func, moduleObj, i)); if (!fun) return nullptr; @@ -1048,19 +937,23 @@ LinkAsmJS(JSContext* cx, unsigned argc, JS::Value* vp) RootedFunction fun(cx, &args.callee().as()); Rooted moduleObj(cx, &ModuleFunctionToModuleObject(fun)); - // When a module is linked, it is dynamically specialized to the given // arguments (buffer, ffis). Thus, if the module is linked again (it is just // a function so it can be called multiple times), we need to clone a new // module. - if (moduleObj->module().isDynamicallyLinked() && !CloneModule(cx, &moduleObj)) - return false; + if (moduleObj->module().wasm().dynamicallyLinked()) { + Rooted clone(cx, AsmJSModuleObject::create(cx)); + if (!clone) + return false; + + if (!moduleObj->module().clone(cx, clone)) + return false; + + moduleObj = clone; + } AsmJSModule& module = moduleObj->module(); - AutoFlushICache afc("LinkAsmJS"); - module.setAutoFlushICacheRange(); - // Link the module by performing the link-time validation checks in the // asm.js spec and then patching the generated module to associate it with // the given heap (ArrayBuffer) and a new global data segment (the closure @@ -1072,11 +965,6 @@ LinkAsmJS(JSContext* cx, unsigned argc, JS::Value* vp) return HandleDynamicLinkFailure(cx, args, module, name); } - // Notify profilers so that asm.js generated code shows up with JS function - // names and lines in native (i.e., not SPS) profilers. - if (!SendModuleToAttachedProfiler(cx, module)) - return false; - // Link-time validation succeeded, so wrap all the exported functions with // CallAsmJS builtins that trampoline into the generated code. JSObject* obj = CreateExportObject(cx, moduleObj); @@ -1252,7 +1140,7 @@ js::IsAsmJSModuleLoadedFromCache(JSContext* cx, unsigned argc, Value* vp) return false; } - bool loadedFromCache = ModuleFunctionToModuleObject(fun).module().loadedFromCache(); + bool loadedFromCache = ModuleFunctionToModuleObject(fun).module().wasm().loadedFromCache(); args.rval().set(BooleanValue(loadedFromCache)); return true; @@ -1277,7 +1165,7 @@ JSString* js::AsmJSFunctionToString(JSContext* cx, HandleFunction fun) { AsmJSModule& module = FunctionToEnclosingModule(fun); - const AsmJSModule::ExportedFunction& f = FunctionToExportedFunction(fun, module); + const AsmJSModule::Export& f = module.exports()[FunctionToExportIndex(fun)]; uint32_t begin = module.srcStart() + f.startOffsetInModule(); uint32_t end = module.srcStart() + f.endOffsetInModule(); diff --git a/js/src/asmjs/AsmJSModule.cpp b/js/src/asmjs/AsmJSModule.cpp index 350827042ee..79b041ff6b9 100644 --- a/js/src/asmjs/AsmJSModule.cpp +++ b/js/src/asmjs/AsmJSModule.cpp @@ -18,937 +18,103 @@ #include "asmjs/AsmJSModule.h" -#include "mozilla/BinarySearch.h" #include "mozilla/Compression.h" #include "mozilla/EnumeratedRange.h" #include "mozilla/PodOperations.h" -#include "mozilla/TaggedAnonymousMemory.h" -#include "mozilla/Vector.h" -#include "jslibmath.h" -#include "jsmath.h" #include "jsprf.h" -#include "builtin/AtomicsObject.h" +#include "asmjs/WasmSerialize.h" #include "frontend/Parser.h" -#include "jit/IonCode.h" -#ifdef JS_ION_PERF -# include "jit/PerfSpewer.h" -#endif #include "js/Class.h" -#include "js/Conversions.h" #include "js/MemoryMetrics.h" -#include "vm/Time.h" #include "jsobjinlines.h" #include "frontend/ParseNode-inl.h" -#include "jit/MacroAssembler-inl.h" -#include "vm/ArrayBufferObject-inl.h" -#include "vm/Stack-inl.h" using namespace js; +using namespace js::frontend; using namespace js::jit; using namespace js::wasm; -using namespace js::frontend; -using mozilla::BinarySearch; -using mozilla::Compression::LZ4; -using mozilla::MakeEnumeratedRange; -using mozilla::MallocSizeOf; -using mozilla::PodCopy; -using mozilla::PodEqual; using mozilla::PodZero; -using mozilla::Swap; -using JS::GenericNaN; - -static uint8_t* -AllocateExecutableMemory(ExclusiveContext* cx, size_t bytes) -{ - // On most platforms, this will allocate RWX memory. On iOS, or when - // --non-writable-jitcode is used, this will allocate RW memory. In this - // case, DynamicallyLinkModule will reprotect the code as RX. - unsigned permissions = - ExecutableAllocator::initialProtectionFlags(ExecutableAllocator::Writable); - void* p = AllocateExecutableMemory(nullptr, bytes, permissions, "asm-js-code", AsmJSPageSize); - if (!p) - ReportOutOfMemory(cx); - return (uint8_t*)p; -} +using mozilla::PodEqual; +using mozilla::Compression::LZ4; AsmJSModule::AsmJSModule(ScriptSource* scriptSource, uint32_t srcStart, uint32_t srcBodyStart, - bool strict, bool canUseSignalHandlers) - : srcStart_(srcStart), + bool strict) + : scriptSource_(scriptSource), + srcStart_(srcStart), srcBodyStart_(srcBodyStart), - scriptSource_(scriptSource), globalArgumentName_(nullptr), importArgumentName_(nullptr), - bufferArgumentName_(nullptr), - code_(nullptr), - interruptExit_(nullptr), - prevLinked_(nullptr), - nextLinked_(nullptr), - dynamicallyLinked_(false), - loadedFromCache_(false), - profilingEnabled_(false), - interrupted_(false) + bufferArgumentName_(nullptr) { mozilla::PodZero(&pod); - pod.globalBytes_ = sInitialGlobalDataBytes; pod.minHeapLength_ = RoundUpToNextValidAsmJSHeapLength(0); pod.maxHeapLength_ = 0x80000000; pod.strict_ = strict; - pod.canUseSignalHandlers_ = canUseSignalHandlers; // AsmJSCheckedImmediateRange should be defined to be at most the minimum // heap length so that offsets can be folded into bounds checks. MOZ_ASSERT(pod.minHeapLength_ - AsmJSCheckedImmediateRange <= pod.minHeapLength_); - - scriptSource_->incref(); -} - -AsmJSModule::~AsmJSModule() -{ - MOZ_ASSERT(!interrupted_); - - scriptSource_->decref(); - - if (code_) { - for (unsigned i = 0; i < numExits(); i++) { - AsmJSModule::ExitDatum& exitDatum = exit(i).datum(*this); - if (!exitDatum.baselineScript) - continue; - - jit::DependentAsmJSModuleExit exit(this, i); - exitDatum.baselineScript->removeDependentAsmJSModule(exit); - } - - DeallocateExecutableMemory(code_, pod.totalBytes_, AsmJSPageSize); - } - - if (prevLinked_) - *prevLinked_ = nextLinked_; - if (nextLinked_) - nextLinked_->prevLinked_ = prevLinked_; } void AsmJSModule::trace(JSTracer* trc) { + if (wasm_) + wasm_->trace(trc); for (Global& global : globals_) global.trace(trc); - for (Exit& exit : exits_) { - if (exit.datum(*this).fun) - TraceEdge(trc, &exit.datum(*this).fun, "asm.js imported function"); - } - for (ExportedFunction& exp : exports_) + for (Export& exp : exports_) exp.trace(trc); - for (Name& name : names_) - TraceManuallyBarrieredEdge(trc, &name.name(), "asm.js module function name"); -#if defined(MOZ_VTUNE) || defined(JS_ION_PERF) - for (ProfiledFunction& profiledFunction : profiledFunctions_) - profiledFunction.trace(trc); -#endif if (globalArgumentName_) TraceManuallyBarrieredEdge(trc, &globalArgumentName_, "asm.js global argument name"); if (importArgumentName_) TraceManuallyBarrieredEdge(trc, &importArgumentName_, "asm.js import argument name"); if (bufferArgumentName_) TraceManuallyBarrieredEdge(trc, &bufferArgumentName_, "asm.js buffer argument name"); - if (maybeHeap_) - TraceEdge(trc, &maybeHeap_, "asm.js heap"); } void AsmJSModule::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode, size_t* asmJSModuleData) { - *asmJSModuleCode += pod.totalBytes_; + if (wasm_) + wasm_->addSizeOfMisc(mallocSizeOf, asmJSModuleCode, asmJSModuleData); + + if (linkData_) + *asmJSModuleData += linkData_->sizeOfExcludingThis(mallocSizeOf); + *asmJSModuleData += mallocSizeOf(this) + globals_.sizeOfExcludingThis(mallocSizeOf) + - exits_.sizeOfExcludingThis(mallocSizeOf) + - exports_.sizeOfExcludingThis(mallocSizeOf) + - callSites_.sizeOfExcludingThis(mallocSizeOf) + - codeRanges_.sizeOfExcludingThis(mallocSizeOf) + - names_.sizeOfExcludingThis(mallocSizeOf) + - heapAccesses_.sizeOfExcludingThis(mallocSizeOf) + -#if defined(MOZ_VTUNE) || defined(JS_ION_PERF) - profiledFunctions_.sizeOfExcludingThis(mallocSizeOf) + -#endif - staticLinkData_.sizeOfExcludingThis(mallocSizeOf); + imports_.sizeOfExcludingThis(mallocSizeOf) + + exports_.sizeOfExcludingThis(mallocSizeOf); } -struct CallSiteRetAddrOffset -{ - const CallSiteVector& callSites; - explicit CallSiteRetAddrOffset(const CallSiteVector& callSites) : callSites(callSites) {} - uint32_t operator[](size_t index) const { - return callSites[index].returnAddressOffset(); - } -}; - -const CallSite* -AsmJSModule::lookupCallSite(void* returnAddress) const -{ - MOZ_ASSERT(isFinished()); - - uint32_t target = ((uint8_t*)returnAddress) - code_; - size_t lowerBound = 0; - size_t upperBound = callSites_.length(); - - size_t match; - if (!BinarySearch(CallSiteRetAddrOffset(callSites_), lowerBound, upperBound, target, &match)) - return nullptr; - - return &callSites_[match]; -} - -namespace js { - -// Create an ordering on CodeRange and pc offsets suitable for BinarySearch. -// Stick these in the same namespace as AsmJSModule so that argument-dependent -// lookup will find it. -bool -operator==(size_t pcOffset, const AsmJSModule::CodeRange& rhs) -{ - return pcOffset >= rhs.begin() && pcOffset < rhs.end(); -} -bool -operator<=(const AsmJSModule::CodeRange& lhs, const AsmJSModule::CodeRange& rhs) -{ - return lhs.begin() <= rhs.begin(); -} -bool -operator<(size_t pcOffset, const AsmJSModule::CodeRange& rhs) -{ - return pcOffset < rhs.begin(); -} - -} // namespace js - -const AsmJSModule::CodeRange* -AsmJSModule::lookupCodeRange(void* pc) const -{ - MOZ_ASSERT(isFinished()); - - uint32_t target = ((uint8_t*)pc) - code_; - size_t lowerBound = 0; - size_t upperBound = codeRanges_.length(); - - size_t match; - if (!BinarySearch(codeRanges_, lowerBound, upperBound, target, &match)) - return nullptr; - - return &codeRanges_[match]; -} - -struct HeapAccessOffset -{ - const HeapAccessVector& accesses; - explicit HeapAccessOffset(const HeapAccessVector& accesses) : accesses(accesses) {} - uintptr_t operator[](size_t index) const { - return accesses[index].insnOffset(); - } -}; - -const HeapAccess* -AsmJSModule::lookupHeapAccess(void* pc) const -{ - MOZ_ASSERT(isFinished()); - MOZ_ASSERT(containsFunctionPC(pc)); - - uint32_t target = ((uint8_t*)pc) - code_; - size_t lowerBound = 0; - size_t upperBound = heapAccesses_.length(); - - size_t match; - if (!BinarySearch(HeapAccessOffset(heapAccesses_), lowerBound, upperBound, target, &match)) - return nullptr; - - return &heapAccesses_[match]; -} - -bool -AsmJSModule::finish(ExclusiveContext* cx, TokenStream& tokenStream, MacroAssembler& masm) +void +AsmJSModule::finish(Module* wasm, wasm::UniqueStaticLinkData linkData, + uint32_t endBeforeCurly, uint32_t endAfterCurly) { MOZ_ASSERT(!isFinished()); - uint32_t endBeforeCurly = tokenStream.currentToken().pos.end; - TokenPos pos; - if (!tokenStream.peekTokenPos(&pos, TokenStream::Operand)) - return false; - uint32_t endAfterCurly = pos.end; + wasm_.reset(wasm); + linkData_ = Move(linkData); + MOZ_ASSERT(endBeforeCurly >= srcBodyStart_); MOZ_ASSERT(endAfterCurly >= srcBodyStart_); pod.srcLength_ = endBeforeCurly - srcStart_; pod.srcLengthWithRightBrace_ = endAfterCurly - srcStart_; - // Start global data on a new page so JIT code may be given independent - // protection flags. - pod.codeBytes_ = AlignBytes(masm.bytesNeeded(), AsmJSPageSize); - MOZ_ASSERT(pod.functionBytes_ <= pod.codeBytes_); - - // The entire region is allocated via mmap/VirtualAlloc which requires - // units of pages. - pod.totalBytes_ = AlignBytes(pod.codeBytes_ + pod.globalBytes_, AsmJSPageSize); - - MOZ_ASSERT(!code_); - code_ = AllocateExecutableMemory(cx, pod.totalBytes_); - if (!code_) - return false; - - // Delay flushing until dynamic linking. The flush-inhibited range is set within - // masm.executableCopy. - AutoFlushICache afc("CheckModule", /* inhibit = */ true); - - // Copy the code from the MacroAssembler into its final resting place in the - // AsmJSModule. - MOZ_ASSERT(uintptr_t(code_) % AsmJSPageSize == 0); - masm.executableCopy(code_); - - // c.f. JitCode::copyFrom - MOZ_ASSERT(masm.jumpRelocationTableBytes() == 0); - MOZ_ASSERT(masm.dataRelocationTableBytes() == 0); - MOZ_ASSERT(masm.preBarrierTableBytes() == 0); - MOZ_ASSERT(!masm.hasSelfReference()); - - // Heap-access metadata used for link-time patching and fault-handling. - heapAccesses_ = masm.extractHeapAccesses(); - - // Call-site metadata used for stack unwinding. - const CallSiteAndTargetVector& callSites = masm.callSites(); - if (!callSites_.appendAll(callSites)) - return false; - - // Absolute link metadata: absolute addresses that refer to some fixed - // address in the address space. - AbsoluteLinkArray& absoluteLinks = staticLinkData_.absoluteLinks; - for (size_t i = 0; i < masm.numAsmJSAbsoluteLinks(); i++) { - AsmJSAbsoluteLink src = masm.asmJSAbsoluteLink(i); - if (!absoluteLinks[src.target].append(src.patchAt.offset())) - return false; - } - - // Relative link metadata: absolute addresses that refer to another point within - // the asm.js module. - - // CodeLabels are used for switch cases and loads from floating-point / - // SIMD values in the constant pool. - for (size_t i = 0; i < masm.numCodeLabels(); i++) { - CodeLabel cl = masm.codeLabel(i); - RelativeLink link(RelativeLink::CodeLabel); - link.patchAtOffset = masm.labelToPatchOffset(*cl.patchAt()); - link.targetOffset = cl.target()->offset(); - if (!staticLinkData_.relativeLinks.append(link)) - return false; - } - -#if defined(JS_CODEGEN_X86) - // Global data accesses in x86 need to be patched with the absolute - // address of the global. Globals are allocated sequentially after the - // code section so we can just use an RelativeLink. - for (size_t i = 0; i < masm.numAsmJSGlobalAccesses(); i++) { - AsmJSGlobalAccess a = masm.asmJSGlobalAccess(i); - RelativeLink link(RelativeLink::RawPointer); - link.patchAtOffset = masm.labelToPatchOffset(a.patchAt); - link.targetOffset = offsetOfGlobalData() + a.globalDataOffset; - if (!staticLinkData_.relativeLinks.append(link)) - return false; - } -#endif - -#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) - // On MIPS we need to update all the long jumps because they contain an - // absolute adress. The values are correctly patched for the current address - // space, but not after serialization or profiling-mode toggling. - for (size_t i = 0; i < masm.numLongJumps(); i++) { - size_t off = masm.longJump(i); - RelativeLink link(RelativeLink::InstructionImmediate); - link.patchAtOffset = off; - link.targetOffset = Assembler::ExtractInstructionImmediate(code_ + off) - uintptr_t(code_); - if (!staticLinkData_.relativeLinks.append(link)) - return false; - } -#endif - -#if defined(JS_CODEGEN_X64) - // Global data accesses on x64 use rip-relative addressing and thus do - // not need patching after deserialization. - for (size_t i = 0; i < masm.numAsmJSGlobalAccesses(); i++) { - AsmJSGlobalAccess a = masm.asmJSGlobalAccess(i); - masm.patchAsmJSGlobalAccess(a.patchAt, code_, globalData(), a.globalDataOffset); - } -#endif - - return true; -} - -void -AsmJSModule::setAutoFlushICacheRange() -{ MOZ_ASSERT(isFinished()); - AutoFlushICache::setRange(uintptr_t(code_), pod.codeBytes_); -} - -static void -AsmJSReportOverRecursed() -{ - JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx(); - ReportOverRecursed(cx); -} - -static void -OnDetached() -{ - // See hasDetachedHeap comment in LinkAsmJS. - JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx(); - JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_OUT_OF_MEMORY); -} - -static void -OnOutOfBounds() -{ - JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx(); - JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX); -} - -static void -OnImpreciseConversion() -{ - JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx(); - JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_SIMD_FAILED_CONVERSION); -} - -static bool -AsmJSHandleExecutionInterrupt() -{ - AsmJSActivation* act = JSRuntime::innermostAsmJSActivation(); - act->module().setInterrupted(true); - bool ret = CheckForInterrupt(act->cx()); - act->module().setInterrupted(false); - return ret; -} - -static int32_t -CoerceInPlace_ToInt32(MutableHandleValue val) -{ - JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx(); - - int32_t i32; - if (!ToInt32(cx, val, &i32)) - return false; - val.set(Int32Value(i32)); - - return true; -} - -static int32_t -CoerceInPlace_ToNumber(MutableHandleValue val) -{ - JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx(); - - double dbl; - if (!ToNumber(cx, val, &dbl)) - return false; - val.set(DoubleValue(dbl)); - - return true; -} - -static bool -TryEnablingJit(JSContext* cx, AsmJSModule& module, HandleFunction fun, uint32_t exitIndex, - int32_t argc, Value* argv) -{ - if (!fun->hasScript()) - return true; - - // Test if the function is JIT compiled. - JSScript* script = fun->nonLazyScript(); - if (!script->hasBaselineScript()) { - MOZ_ASSERT(!script->hasIonScript()); - return true; - } - - // Don't enable jit entry when we have a pending ion builder. - // Take the interpreter path which will link it and enable - // the fast path on the next call. - if (script->baselineScript()->hasPendingIonBuilder()) - return true; - - // Currently we can't rectify arguments. Therefore disabling if argc is too low. - if (fun->nargs() > size_t(argc)) - return true; - - // Ensure the argument types are included in the argument TypeSets stored in - // the TypeScript. This is necessary for Ion, because the FFI exit will - // use the skip-arg-checks entry point. - // - // Note that the TypeScript is never discarded while the script has a - // BaselineScript, so if those checks hold now they must hold at least until - // the BaselineScript is discarded and when that happens the FFI exit is - // patched back. - if (!TypeScript::ThisTypes(script)->hasType(TypeSet::UndefinedType())) - return true; - for (uint32_t i = 0; i < fun->nargs(); i++) { - StackTypeSet* typeset = TypeScript::ArgTypes(script, i); - TypeSet::Type type = TypeSet::DoubleType(); - if (!argv[i].isDouble()) - type = TypeSet::PrimitiveType(argv[i].extractNonDoubleType()); - if (!typeset->hasType(type)) - return true; - } - - // The exit may have become optimized while executing the FFI. - AsmJSModule::Exit& exit = module.exit(exitIndex); - if (exit.isOptimized(module)) - return true; - - BaselineScript* baselineScript = script->baselineScript(); - if (!baselineScript->addDependentAsmJSModule(cx, DependentAsmJSModuleExit(&module, exitIndex))) - return false; - - exit.optimize(module, baselineScript); - return true; -} - -static bool -InvokeFromAsmJS(AsmJSActivation* activation, int32_t exitIndex, int32_t argc, Value* argv, - MutableHandleValue rval) -{ - JSContext* cx = activation->cx(); - AsmJSModule& module = activation->module(); - - RootedFunction fun(cx, module.exit(exitIndex).datum(module).fun); - RootedValue fval(cx, ObjectValue(*fun)); - if (!Invoke(cx, UndefinedValue(), fval, argc, argv, rval)) - return false; - - return TryEnablingJit(cx, module, fun, exitIndex, argc, argv); -} - -// Use an int32_t return type instead of bool since bool does not have a -// specified width and the caller is assuming a word-sized return. -static int32_t -InvokeFromAsmJS_Ignore(int32_t exitIndex, int32_t argc, Value* argv) -{ - AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation(); - JSContext* cx = activation->cx(); - - RootedValue rval(cx); - return InvokeFromAsmJS(activation, exitIndex, argc, argv, &rval); -} - -// Use an int32_t return type instead of bool since bool does not have a -// specified width and the caller is assuming a word-sized return. -static int32_t -InvokeFromAsmJS_ToInt32(int32_t exitIndex, int32_t argc, Value* argv) -{ - AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation(); - JSContext* cx = activation->cx(); - - RootedValue rval(cx); - if (!InvokeFromAsmJS(activation, exitIndex, argc, argv, &rval)) - return false; - - int32_t i32; - if (!ToInt32(cx, rval, &i32)) - return false; - - argv[0] = Int32Value(i32); - return true; -} - -// Use an int32_t return type instead of bool since bool does not have a -// specified width and the caller is assuming a word-sized return. -static int32_t -InvokeFromAsmJS_ToNumber(int32_t exitIndex, int32_t argc, Value* argv) -{ - AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation(); - JSContext* cx = activation->cx(); - - RootedValue rval(cx); - if (!InvokeFromAsmJS(activation, exitIndex, argc, argv, &rval)) - return false; - - double dbl; - if (!ToNumber(cx, rval, &dbl)) - return false; - - argv[0] = DoubleValue(dbl); - return true; -} - -#if defined(JS_CODEGEN_ARM) -extern "C" { - -extern MOZ_EXPORT int64_t -__aeabi_idivmod(int, int); - -extern MOZ_EXPORT int64_t -__aeabi_uidivmod(int, int); - -} -#endif - -template -static inline void* -FuncCast(F* pf) -{ - return JS_FUNC_TO_DATA_PTR(void*, pf); -} - -static void* -RedirectCall(void* fun, ABIFunctionType type) -{ -#ifdef JS_SIMULATOR - fun = Simulator::RedirectNativeFunction(fun, type); -#endif - return fun; -} - -static void* -AddressOf(SymbolicAddress imm, ExclusiveContext* cx) -{ - switch (imm) { - case SymbolicAddress::Runtime: - return cx->runtimeAddressForJit(); - case SymbolicAddress::RuntimeInterruptUint32: - return cx->runtimeAddressOfInterruptUint32(); - case SymbolicAddress::StackLimit: - return cx->stackLimitAddressForJitCode(StackForUntrustedScript); - case SymbolicAddress::ReportOverRecursed: - return RedirectCall(FuncCast(AsmJSReportOverRecursed), Args_General0); - case SymbolicAddress::OnDetached: - return RedirectCall(FuncCast(OnDetached), Args_General0); - case SymbolicAddress::OnOutOfBounds: - return RedirectCall(FuncCast(OnOutOfBounds), Args_General0); - case SymbolicAddress::OnImpreciseConversion: - return RedirectCall(FuncCast(OnImpreciseConversion), Args_General0); - case SymbolicAddress::HandleExecutionInterrupt: - return RedirectCall(FuncCast(AsmJSHandleExecutionInterrupt), Args_General0); - case SymbolicAddress::InvokeFromAsmJS_Ignore: - return RedirectCall(FuncCast(InvokeFromAsmJS_Ignore), Args_General3); - case SymbolicAddress::InvokeFromAsmJS_ToInt32: - return RedirectCall(FuncCast(InvokeFromAsmJS_ToInt32), Args_General3); - case SymbolicAddress::InvokeFromAsmJS_ToNumber: - return RedirectCall(FuncCast(InvokeFromAsmJS_ToNumber), Args_General3); - case SymbolicAddress::CoerceInPlace_ToInt32: - return RedirectCall(FuncCast(CoerceInPlace_ToInt32), Args_General1); - case SymbolicAddress::CoerceInPlace_ToNumber: - return RedirectCall(FuncCast(CoerceInPlace_ToNumber), Args_General1); - case SymbolicAddress::ToInt32: - return RedirectCall(FuncCast(JS::ToInt32), Args_Int_Double); -#if defined(JS_CODEGEN_ARM) - case SymbolicAddress::aeabi_idivmod: - return RedirectCall(FuncCast(__aeabi_idivmod), Args_General2); - case SymbolicAddress::aeabi_uidivmod: - return RedirectCall(FuncCast(__aeabi_uidivmod), Args_General2); - case SymbolicAddress::AtomicCmpXchg: - return RedirectCall(FuncCast(js::atomics_cmpxchg_asm_callout), Args_General4); - case SymbolicAddress::AtomicXchg: - return RedirectCall(FuncCast(js::atomics_xchg_asm_callout), Args_General3); - case SymbolicAddress::AtomicFetchAdd: - return RedirectCall(FuncCast(js::atomics_add_asm_callout), Args_General3); - case SymbolicAddress::AtomicFetchSub: - return RedirectCall(FuncCast(js::atomics_sub_asm_callout), Args_General3); - case SymbolicAddress::AtomicFetchAnd: - return RedirectCall(FuncCast(js::atomics_and_asm_callout), Args_General3); - case SymbolicAddress::AtomicFetchOr: - return RedirectCall(FuncCast(js::atomics_or_asm_callout), Args_General3); - case SymbolicAddress::AtomicFetchXor: - return RedirectCall(FuncCast(js::atomics_xor_asm_callout), Args_General3); -#endif - case SymbolicAddress::ModD: - return RedirectCall(FuncCast(NumberMod), Args_Double_DoubleDouble); - case SymbolicAddress::SinD: -#ifdef _WIN64 - // Workaround a VS 2013 sin issue, see math_sin_uncached. - return RedirectCall(FuncCast(js::math_sin_uncached), Args_Double_Double); -#else - return RedirectCall(FuncCast(sin), Args_Double_Double); -#endif - case SymbolicAddress::CosD: - return RedirectCall(FuncCast(cos), Args_Double_Double); - case SymbolicAddress::TanD: - return RedirectCall(FuncCast(tan), Args_Double_Double); - case SymbolicAddress::ASinD: - return RedirectCall(FuncCast(asin), Args_Double_Double); - case SymbolicAddress::ACosD: - return RedirectCall(FuncCast(acos), Args_Double_Double); - case SymbolicAddress::ATanD: - return RedirectCall(FuncCast(atan), Args_Double_Double); - case SymbolicAddress::CeilD: - return RedirectCall(FuncCast(ceil), Args_Double_Double); - case SymbolicAddress::CeilF: - return RedirectCall(FuncCast(ceilf), Args_Float32_Float32); - case SymbolicAddress::FloorD: - return RedirectCall(FuncCast(floor), Args_Double_Double); - case SymbolicAddress::FloorF: - return RedirectCall(FuncCast(floorf), Args_Float32_Float32); - case SymbolicAddress::ExpD: - return RedirectCall(FuncCast(exp), Args_Double_Double); - case SymbolicAddress::LogD: - return RedirectCall(FuncCast(log), Args_Double_Double); - case SymbolicAddress::PowD: - return RedirectCall(FuncCast(ecmaPow), Args_Double_DoubleDouble); - case SymbolicAddress::ATan2D: - return RedirectCall(FuncCast(ecmaAtan2), Args_Double_DoubleDouble); - case SymbolicAddress::Limit: - break; - } - - MOZ_CRASH("Bad SymbolicAddress"); -} - -void -AsmJSModule::staticallyLink(ExclusiveContext* cx) -{ - MOZ_ASSERT(isFinished()); - - // Process staticLinkData_ - - MOZ_ASSERT(staticLinkData_.pod.interruptExitOffset != 0); - interruptExit_ = code_ + staticLinkData_.pod.interruptExitOffset; - - MOZ_ASSERT(staticLinkData_.pod.outOfBoundsExitOffset != 0); - outOfBoundsExit_ = code_ + staticLinkData_.pod.outOfBoundsExitOffset; - - for (size_t i = 0; i < staticLinkData_.relativeLinks.length(); i++) { - RelativeLink link = staticLinkData_.relativeLinks[i]; - uint8_t* patchAt = code_ + link.patchAtOffset; - uint8_t* target = code_ + link.targetOffset; - - // In the case of long-jumps on MIPS and possibly future cases, a - // RelativeLink is used to patch a pointer to the function entry. If - // profiling is enabled (by cloning a module with profiling enabled), - // the target should be the profiling entry. - if (profilingEnabled_) { - const CodeRange* codeRange = lookupCodeRange(target); - if (codeRange && codeRange->isFunction() && link.targetOffset == codeRange->entry()) - target = code_ + codeRange->profilingEntry(); - } - - if (link.isRawPointerPatch()) - *(uint8_t**)(patchAt) = target; - else - Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target)); - } - - for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) { - const OffsetVector& offsets = staticLinkData_.absoluteLinks[imm]; - for (size_t i = 0; i < offsets.length(); i++) { - uint8_t* patchAt = code_ + offsets[i]; - void* target = AddressOf(imm, cx); - - // Builtin calls are another case where, when profiling is enabled, - // we must point to the profiling entry. - Builtin builtin; - if (profilingEnabled_ && ImmediateIsBuiltin(imm, &builtin)) { - const CodeRange* codeRange = lookupCodeRange(patchAt); - if (codeRange->isFunction()) - target = code_ + staticLinkData_.pod.builtinThunkOffsets[builtin]; - } - - Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt), - PatchedImmPtr(target), - PatchedImmPtr((void*)-1)); - } - } - - // Initialize global data segment - - *(double*)(globalData() + NaN64GlobalDataOffset) = GenericNaN(); - *(float*)(globalData() + NaN32GlobalDataOffset) = GenericNaN(); - - for (size_t tableIndex = 0; tableIndex < staticLinkData_.funcPtrTables.length(); tableIndex++) { - FuncPtrTable& funcPtrTable = staticLinkData_.funcPtrTables[tableIndex]; - const OffsetVector& offsets = funcPtrTable.elemOffsets(); - auto array = reinterpret_cast(globalData() + funcPtrTable.globalDataOffset()); - for (size_t elemIndex = 0; elemIndex < offsets.length(); elemIndex++) { - uint8_t* target = code_ + offsets[elemIndex]; - if (profilingEnabled_) - target = code_ + lookupCodeRange(target)->profilingEntry(); - array[elemIndex] = target; - } - } - - for (AsmJSModule::Exit& exit : exits_) - exit.initDatum(*this); -} - -void -AsmJSModule::initHeap(Handle heap, JSContext* cx) -{ - MOZ_ASSERT_IF(heap->is(), heap->as().isAsmJS()); - MOZ_ASSERT(IsValidAsmJSHeapLength(heap->byteLength())); - MOZ_ASSERT(dynamicallyLinked_); - MOZ_ASSERT(!maybeHeap_); - - maybeHeap_ = heap; - // heapDatum() may point to shared memory but that memory is only - // accessed from maybeHeap(), which wraps it, and from - // hasDetachedHeap(), which checks it for null. - heapDatum() = heap->dataPointerEither().unwrap(/*safe - explained above*/); - -#if defined(JS_CODEGEN_X86) - uint8_t* heapOffset = heap->dataPointerEither().unwrap(/*safe - used for value*/); - uint32_t heapLength = heap->byteLength(); - for (unsigned i = 0; i < heapAccesses_.length(); i++) { - const HeapAccess& access = heapAccesses_[i]; - // An access is out-of-bounds iff - // ptr + offset + data-type-byte-size > heapLength - // i.e. ptr > heapLength - data-type-byte-size - offset. - // data-type-byte-size and offset are already included in the addend - // so we just have to add the heap length here. - if (access.hasLengthCheck()) - X86Encoding::AddInt32(access.patchLengthAt(code_), heapLength); - void* addr = access.patchHeapPtrImmAt(code_); - uint32_t disp = reinterpret_cast(X86Encoding::GetPointer(addr)); - MOZ_ASSERT(disp <= INT32_MAX); - X86Encoding::SetPointer(addr, (void*)(heapOffset + disp)); - } -#elif defined(JS_CODEGEN_X64) - // Even with signal handling being used for most bounds checks, there may be - // atomic operations that depend on explicit checks. - // - // If we have any explicit bounds checks, we need to patch the heap length - // checks at the right places. All accesses that have been recorded are the - // only ones that need bound checks (see also - // CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap) - uint32_t heapLength = heap->byteLength(); - for (size_t i = 0; i < heapAccesses_.length(); i++) { - const HeapAccess& access = heapAccesses_[i]; - // See comment above for x86 codegen. - if (access.hasLengthCheck()) - X86Encoding::AddInt32(access.patchLengthAt(code_), heapLength); - } -#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) - uint32_t heapLength = heap->byteLength(); - for (unsigned i = 0; i < heapAccesses_.length(); i++) { - jit::Assembler::UpdateBoundsCheck(heapLength, - (jit::Instruction*)(heapAccesses_[i].insnOffset() + code_)); - } -#endif -} - -void -AsmJSModule::restoreHeapToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer) -{ -#if defined(JS_CODEGEN_X86) - if (maybePrevBuffer) { - // Subtract out the base-pointer added by AsmJSModule::initHeap. - uint8_t* ptrBase = maybePrevBuffer->dataPointerEither().unwrap(/*safe - used for value*/); - uint32_t heapLength = maybePrevBuffer->byteLength(); - for (unsigned i = 0; i < heapAccesses_.length(); i++) { - const HeapAccess& access = heapAccesses_[i]; - // Subtract the heap length back out, leaving the raw displacement in place. - if (access.hasLengthCheck()) - X86Encoding::AddInt32(access.patchLengthAt(code_), -heapLength); - void* addr = access.patchHeapPtrImmAt(code_); - uint8_t* ptr = reinterpret_cast(X86Encoding::GetPointer(addr)); - MOZ_ASSERT(ptr >= ptrBase); - X86Encoding::SetPointer(addr, (void*)(ptr - ptrBase)); - } - } -#elif defined(JS_CODEGEN_X64) - if (maybePrevBuffer) { - uint32_t heapLength = maybePrevBuffer->byteLength(); - for (unsigned i = 0; i < heapAccesses_.length(); i++) { - const HeapAccess& access = heapAccesses_[i]; - // See comment above for x86 codegen. - if (access.hasLengthCheck()) - X86Encoding::AddInt32(access.patchLengthAt(code_), -heapLength); - } - } -#endif - - maybeHeap_ = nullptr; - heapDatum() = nullptr; -} - -void -AsmJSModule::restoreToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer, - uint8_t* prevCode, - ExclusiveContext* cx) -{ -#ifdef DEBUG - // Put the absolute links back to -1 so PatchDataWithValueCheck assertions - // in staticallyLink are valid. - for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) { - void* callee = AddressOf(imm, cx); - - // If we are in profiling mode, calls to builtins will have been patched - // by setProfilingEnabled to be calls to thunks. - Builtin builtin; - void* profilingCallee = profilingEnabled_ && ImmediateIsBuiltin(imm, &builtin) - ? prevCode + staticLinkData_.pod.builtinThunkOffsets[builtin] - : nullptr; - - const AsmJSModule::OffsetVector& offsets = staticLinkData_.absoluteLinks[imm]; - for (size_t i = 0; i < offsets.length(); i++) { - uint8_t* caller = code_ + offsets[i]; - void* originalValue = profilingCallee && !lookupCodeRange(caller)->isThunk() - ? profilingCallee - : callee; - Assembler::PatchDataWithValueCheck(CodeLocationLabel(caller), - PatchedImmPtr((void*)-1), - PatchedImmPtr(originalValue)); - } - } -#endif - - restoreHeapToInitialState(maybePrevBuffer); -} - -namespace { - -class MOZ_STACK_CLASS AutoMutateCode -{ - AutoWritableJitCode awjc_; - AutoFlushICache afc_; - - public: - AutoMutateCode(JSContext* cx, AsmJSModule& module, const char* name) - : awjc_(cx->runtime(), module.codeBase(), module.codeBytes()), - afc_(name) - { - module.setAutoFlushICacheRange(); - } -}; - -} // namespace - -bool -AsmJSModule::detachHeap(JSContext* cx) -{ - MOZ_ASSERT(isDynamicallyLinked()); - MOZ_ASSERT(maybeHeap_); - - // Content JS should not be able to run (and detach heap) from within an - // interrupt callback, but in case it does, fail. Otherwise, the heap can - // change at an arbitrary instruction and break the assumption below. - if (interrupted_) { - JS_ReportError(cx, "attempt to detach from inside interrupt handler"); - return false; - } - - // Even if this->active(), to reach here, the activation must have called - // out via an FFI stub. FFI stubs check if heapDatum() is null on reentry - // and throw an exception if so. - MOZ_ASSERT_IF(active(), activation()->exitReason().kind() == ExitReason::Jit || - activation()->exitReason().kind() == ExitReason::Slow); - - AutoMutateCode amc(cx, *this, "AsmJSModule::detachHeap"); - restoreHeapToInitialState(maybeHeap_); - - MOZ_ASSERT(hasDetachedHeap()); - return true; } bool js::OnDetachAsmJSArrayBuffer(JSContext* cx, Handle buffer) { - for (AsmJSModule* m = cx->runtime()->linkedAsmJSModules; m; m = m->nextLinked()) { - if (buffer == m->maybeHeapBufferObject() && !m->detachHeap(cx)) + for (Module* m = cx->runtime()->linkedWasmModules; m; m = m->nextLinked()) { + if (buffer == m->maybeBuffer() && !m->detachHeap(cx)) return false; } return true; @@ -957,13 +123,17 @@ js::OnDetachAsmJSArrayBuffer(JSContext* cx, Handle buffer) static void AsmJSModuleObject_finalize(FreeOp* fop, JSObject* obj) { - fop->delete_(&obj->as().module()); + AsmJSModuleObject& moduleObj = obj->as(); + if (moduleObj.hasModule()) + fop->delete_(&moduleObj.module()); } static void AsmJSModuleObject_trace(JSTracer* trc, JSObject* obj) { - obj->as().module().trace(trc); + AsmJSModuleObject& moduleObj = obj->as(); + if (moduleObj.hasModule()) + moduleObj.module().trace(trc); } const Class AsmJSModuleObject::class_ = { @@ -985,17 +155,29 @@ const Class AsmJSModuleObject::class_ = { }; AsmJSModuleObject* -AsmJSModuleObject::create(ExclusiveContext* cx, ScopedJSDeletePtr* module) +AsmJSModuleObject::create(ExclusiveContext* cx) { AutoSetNewObjectMetadata metadata(cx); JSObject* obj = NewObjectWithGivenProto(cx, &AsmJSModuleObject::class_, nullptr); if (!obj) return nullptr; - AsmJSModuleObject* nobj = &obj->as(); + return &obj->as(); +} - nobj->setReservedSlot(MODULE_SLOT, PrivateValue(module->forget())); +bool +AsmJSModuleObject::hasModule() const +{ + MOZ_ASSERT(is()); + return !getReservedSlot(MODULE_SLOT).isUndefined(); +} - return nobj; +void +AsmJSModuleObject::setModule(AsmJSModule* newModule) +{ + MOZ_ASSERT(is()); + if (hasModule()) + js_delete(&module()); + setReservedSlot(MODULE_SLOT, PrivateValue(newModule)); } AsmJSModule& @@ -1005,264 +187,6 @@ AsmJSModuleObject::module() const return *(AsmJSModule*)getReservedSlot(MODULE_SLOT).toPrivate(); } -static inline uint8_t* -WriteBytes(uint8_t* dst, const void* src, size_t nbytes) -{ - memcpy(dst, src, nbytes); - return dst + nbytes; -} - -static inline const uint8_t* -ReadBytes(const uint8_t* src, void* dst, size_t nbytes) -{ - memcpy(dst, src, nbytes); - return src + nbytes; -} - -template -static inline uint8_t* -WriteScalar(uint8_t* dst, T t) -{ - memcpy(dst, &t, sizeof(t)); - return dst + sizeof(t); -} - -template -static inline const uint8_t* -ReadScalar(const uint8_t* src, T* dst) -{ - memcpy(dst, src, sizeof(*dst)); - return src + sizeof(*dst); -} - -static size_t -SerializedNameSize(PropertyName* name) -{ - size_t s = sizeof(uint32_t); - if (name) - s += name->length() * (name->hasLatin1Chars() ? sizeof(Latin1Char) : sizeof(char16_t)); - return s; -} - -size_t -AsmJSModule::Name::serializedSize() const -{ - return SerializedNameSize(name_); -} - -static uint8_t* -SerializeName(uint8_t* cursor, PropertyName* name) -{ - MOZ_ASSERT_IF(name, !name->empty()); - if (name) { - static_assert(JSString::MAX_LENGTH <= INT32_MAX, "String length must fit in 31 bits"); - uint32_t length = name->length(); - uint32_t lengthAndEncoding = (length << 1) | uint32_t(name->hasLatin1Chars()); - cursor = WriteScalar(cursor, lengthAndEncoding); - JS::AutoCheckCannotGC nogc; - if (name->hasLatin1Chars()) - cursor = WriteBytes(cursor, name->latin1Chars(nogc), length * sizeof(Latin1Char)); - else - cursor = WriteBytes(cursor, name->twoByteChars(nogc), length * sizeof(char16_t)); - } else { - cursor = WriteScalar(cursor, 0); - } - return cursor; -} - -uint8_t* -AsmJSModule::Name::serialize(uint8_t* cursor) const -{ - return SerializeName(cursor, name_); -} - -template -static const uint8_t* -DeserializeChars(ExclusiveContext* cx, const uint8_t* cursor, size_t length, PropertyName** name) -{ - Vector tmp(cx); - CharT* src; - if ((size_t(cursor) & (sizeof(CharT) - 1)) != 0) { - // Align 'src' for AtomizeChars. - if (!tmp.resize(length)) - return nullptr; - memcpy(tmp.begin(), cursor, length * sizeof(CharT)); - src = tmp.begin(); - } else { - src = (CharT*)cursor; - } - - JSAtom* atom = AtomizeChars(cx, src, length); - if (!atom) - return nullptr; - - *name = atom->asPropertyName(); - return cursor + length * sizeof(CharT); -} - -static const uint8_t* -DeserializeName(ExclusiveContext* cx, const uint8_t* cursor, PropertyName** name) -{ - uint32_t lengthAndEncoding; - cursor = ReadScalar(cursor, &lengthAndEncoding); - - uint32_t length = lengthAndEncoding >> 1; - if (length == 0) { - *name = nullptr; - return cursor; - } - - bool latin1 = lengthAndEncoding & 0x1; - return latin1 - ? DeserializeChars(cx, cursor, length, name) - : DeserializeChars(cx, cursor, length, name); -} - -const uint8_t* -AsmJSModule::Name::deserialize(ExclusiveContext* cx, const uint8_t* cursor) -{ - return DeserializeName(cx, cursor, &name_); -} - -bool -AsmJSModule::Name::clone(ExclusiveContext* cx, Name* out) const -{ - out->name_ = name_; - return true; -} - -template -size_t -SerializedVectorSize(const mozilla::Vector& vec) -{ - size_t size = sizeof(uint32_t); - for (size_t i = 0; i < vec.length(); i++) - size += vec[i].serializedSize(); - return size; -} - -template -uint8_t* -SerializeVector(uint8_t* cursor, const mozilla::Vector& vec) -{ - cursor = WriteScalar(cursor, vec.length()); - for (size_t i = 0; i < vec.length(); i++) - cursor = vec[i].serialize(cursor); - return cursor; -} - -template -const uint8_t* -DeserializeVector(ExclusiveContext* cx, const uint8_t* cursor, - mozilla::Vector* vec) -{ - uint32_t length; - cursor = ReadScalar(cursor, &length); - if (!vec->resize(length)) - return nullptr; - for (size_t i = 0; i < vec->length(); i++) { - if (!(cursor = (*vec)[i].deserialize(cx, cursor))) - return nullptr; - } - return cursor; -} - -template -bool -CloneVector(ExclusiveContext* cx, const mozilla::Vector& in, - mozilla::Vector* out) -{ - if (!out->resize(in.length())) - return false; - for (size_t i = 0; i < in.length(); i++) { - if (!in[i].clone(cx, &(*out)[i])) - return false; - } - return true; -} - -template -size_t -SerializedPodVectorSize(const mozilla::Vector& vec) -{ - return sizeof(uint32_t) + - vec.length() * sizeof(T); -} - -template -uint8_t* -SerializePodVector(uint8_t* cursor, const mozilla::Vector& vec) -{ - cursor = WriteScalar(cursor, vec.length()); - cursor = WriteBytes(cursor, vec.begin(), vec.length() * sizeof(T)); - return cursor; -} - -template -const uint8_t* -DeserializePodVector(ExclusiveContext* cx, const uint8_t* cursor, - mozilla::Vector* vec) -{ - uint32_t length; - cursor = ReadScalar(cursor, &length); - if (!vec->resize(length)) - return nullptr; - cursor = ReadBytes(cursor, vec->begin(), length * sizeof(T)); - return cursor; -} - -template -bool -ClonePodVector(ExclusiveContext* cx, const mozilla::Vector& in, - mozilla::Vector* out) -{ - if (!out->resize(in.length())) - return false; - PodCopy(out->begin(), in.begin(), in.length()); - return true; -} - -size_t -SerializedSigSize(const MallocSig& sig) -{ - return sizeof(ExprType) + - SerializedPodVectorSize(sig.args()); -} - -uint8_t* -SerializeSig(uint8_t* cursor, const MallocSig& sig) -{ - cursor = WriteScalar(cursor, sig.ret()); - cursor = SerializePodVector(cursor, sig.args()); - return cursor; -} - -const uint8_t* -DeserializeSig(ExclusiveContext* cx, const uint8_t* cursor, MallocSig* sig) -{ - ExprType ret; - cursor = ReadScalar(cursor, &ret); - - MallocSig::ArgVector args; - cursor = DeserializePodVector(cx, cursor, &args); - if (!cursor) - return nullptr; - - sig->init(Move(args), ret); - return cursor; -} - -bool -CloneSig(ExclusiveContext* cx, const MallocSig& sig, MallocSig* out) -{ - MallocSig::ArgVector args; - if (!ClonePodVector(cx, sig.args(), &args)) - return false; - - out->init(Move(args), sig.ret()); - return true; -} - uint8_t* AsmJSModule::Global::serialize(uint8_t* cursor) const { @@ -1287,746 +211,134 @@ AsmJSModule::Global::deserialize(ExclusiveContext* cx, const uint8_t* cursor) } bool -AsmJSModule::Global::clone(ExclusiveContext* cx, Global* out) const +AsmJSModule::Global::clone(JSContext* cx, Global* out) const { *out = *this; return true; } uint8_t* -AsmJSModule::Exit::serialize(uint8_t* cursor) const -{ - cursor = SerializeSig(cursor, sig_); - cursor = WriteBytes(cursor, &pod, sizeof(pod)); - return cursor; -} - -size_t -AsmJSModule::Exit::serializedSize() const -{ - return SerializedSigSize(sig_) + - sizeof(pod); -} - -const uint8_t* -AsmJSModule::Exit::deserialize(ExclusiveContext* cx, const uint8_t* cursor) -{ - (cursor = DeserializeSig(cx, cursor, &sig_)) && - (cursor = ReadBytes(cursor, &pod, sizeof(pod))); - return cursor; -} - -bool -AsmJSModule::Exit::clone(ExclusiveContext* cx, Exit* out) const -{ - out->pod = pod; - return CloneSig(cx, sig_, &out->sig_); -} - -uint8_t* -AsmJSModule::ExportedFunction::serialize(uint8_t* cursor) const +AsmJSModule::Export::serialize(uint8_t* cursor) const { cursor = SerializeName(cursor, name_); cursor = SerializeName(cursor, maybeFieldName_); - cursor = SerializeSig(cursor, sig_); cursor = WriteBytes(cursor, &pod, sizeof(pod)); return cursor; } size_t -AsmJSModule::ExportedFunction::serializedSize() const +AsmJSModule::Export::serializedSize() const { return SerializedNameSize(name_) + SerializedNameSize(maybeFieldName_) + - SerializedSigSize(sig_) + sizeof(pod); } const uint8_t* -AsmJSModule::ExportedFunction::deserialize(ExclusiveContext* cx, const uint8_t* cursor) +AsmJSModule::Export::deserialize(ExclusiveContext* cx, const uint8_t* cursor) { (cursor = DeserializeName(cx, cursor, &name_)) && (cursor = DeserializeName(cx, cursor, &maybeFieldName_)) && - (cursor = DeserializeSig(cx, cursor, &sig_)) && (cursor = ReadBytes(cursor, &pod, sizeof(pod))); return cursor; } bool -AsmJSModule::ExportedFunction::clone(ExclusiveContext* cx, ExportedFunction* out) const +AsmJSModule::Export::clone(JSContext* cx, Export* out) const { out->name_ = name_; out->maybeFieldName_ = maybeFieldName_; out->pod = pod; - return CloneSig(cx, sig_, &out->sig_); -} - -AsmJSModule::CodeRange::CodeRange(uint32_t lineNumber, AsmJSFunctionOffsets offsets) - : nameIndex_(UINT32_MAX), - lineNumber_(lineNumber) -{ - PodZero(&u); // zero padding for Valgrind - u.kind_ = Function; - - MOZ_ASSERT(offsets.nonProfilingEntry - offsets.begin <= UINT8_MAX); - begin_ = offsets.begin; - u.func.beginToEntry_ = offsets.nonProfilingEntry - begin_; - - MOZ_ASSERT(offsets.nonProfilingEntry < offsets.profilingReturn); - MOZ_ASSERT(offsets.profilingReturn - offsets.profilingJump <= UINT8_MAX); - MOZ_ASSERT(offsets.profilingReturn - offsets.profilingEpilogue <= UINT8_MAX); - profilingReturn_ = offsets.profilingReturn; - u.func.profilingJumpToProfilingReturn_ = profilingReturn_ - offsets.profilingJump; - u.func.profilingEpilogueToProfilingReturn_ = profilingReturn_ - offsets.profilingEpilogue; - - MOZ_ASSERT(offsets.nonProfilingEntry < offsets.end); - end_ = offsets.end; -} - -AsmJSModule::CodeRange::CodeRange(Kind kind, AsmJSOffsets offsets) - : nameIndex_(0), - lineNumber_(0), - begin_(offsets.begin), - profilingReturn_(0), - end_(offsets.end) -{ - PodZero(&u); // zero padding for Valgrind - u.kind_ = kind; - - MOZ_ASSERT(begin_ <= end_); - MOZ_ASSERT(u.kind_ == Entry || u.kind_ == Inline); -} - -AsmJSModule::CodeRange::CodeRange(Kind kind, AsmJSProfilingOffsets offsets) - : nameIndex_(0), - lineNumber_(0), - begin_(offsets.begin), - profilingReturn_(offsets.profilingReturn), - end_(offsets.end) -{ - PodZero(&u); // zero padding for Valgrind - u.kind_ = kind; - - MOZ_ASSERT(begin_ < profilingReturn_); - MOZ_ASSERT(profilingReturn_ < end_); - MOZ_ASSERT(u.kind_ == JitFFI || u.kind_ == SlowFFI || u.kind_ == Interrupt); -} - -AsmJSModule::CodeRange::CodeRange(Builtin builtin, AsmJSProfilingOffsets offsets) - : nameIndex_(0), - lineNumber_(0), - begin_(offsets.begin), - profilingReturn_(offsets.profilingReturn), - end_(offsets.end) -{ - PodZero(&u); // zero padding for Valgrind - u.kind_ = Thunk; - u.thunk.target_ = uint16_t(builtin); - - MOZ_ASSERT(begin_ < profilingReturn_); - MOZ_ASSERT(profilingReturn_ < end_); -} - -#if defined(MOZ_VTUNE) || defined(JS_ION_PERF) -size_t -AsmJSModule::ProfiledFunction::serializedSize() const -{ - return SerializedNameSize(name) + - sizeof(pod); -} - -uint8_t* -AsmJSModule::ProfiledFunction::serialize(uint8_t* cursor) const -{ - cursor = SerializeName(cursor, name); - cursor = WriteBytes(cursor, &pod, sizeof(pod)); - return cursor; -} - -const uint8_t* -AsmJSModule::ProfiledFunction::deserialize(ExclusiveContext* cx, const uint8_t* cursor) -{ - (cursor = DeserializeName(cx, cursor, &name)) && - (cursor = ReadBytes(cursor, &pod, sizeof(pod))); - return cursor; -} -#endif - -size_t -AsmJSModule::AbsoluteLinkArray::serializedSize() const -{ - size_t size = 0; - for (const OffsetVector& offsets : *this) - size += SerializedPodVectorSize(offsets); - return size; -} - -uint8_t* -AsmJSModule::AbsoluteLinkArray::serialize(uint8_t* cursor) const -{ - for (const OffsetVector& offsets : *this) - cursor = SerializePodVector(cursor, offsets); - return cursor; -} - -const uint8_t* -AsmJSModule::AbsoluteLinkArray::deserialize(ExclusiveContext* cx, const uint8_t* cursor) -{ - for (OffsetVector& offsets : *this) { - cursor = DeserializePodVector(cx, cursor, &offsets); - if (!cursor) - return nullptr; - } - return cursor; -} - -bool -AsmJSModule::AbsoluteLinkArray::clone(ExclusiveContext* cx, AbsoluteLinkArray* out) const -{ - for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) { - if (!ClonePodVector(cx, (*this)[imm], &(*out)[imm])) - return false; - } return true; } -size_t -AsmJSModule::AbsoluteLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const -{ - size_t size = 0; - for (const OffsetVector& offsets : *this) - size += offsets.sizeOfExcludingThis(mallocSizeOf); - return size; -} - -size_t -AsmJSModule::FuncPtrTable::serializedSize() const -{ - return sizeof(pod) + - SerializedPodVectorSize(elemOffsets_); -} - -uint8_t* -AsmJSModule::FuncPtrTable::serialize(uint8_t* cursor) const -{ - cursor = WriteBytes(cursor, &pod, sizeof(pod)); - cursor = SerializePodVector(cursor, elemOffsets_); - return cursor; -} - -const uint8_t* -AsmJSModule::FuncPtrTable::deserialize(ExclusiveContext* cx, const uint8_t* cursor) -{ - (cursor = ReadBytes(cursor, &pod, sizeof(pod))) && - (cursor = DeserializePodVector(cx, cursor, &elemOffsets_)); - return cursor; -} - -bool -AsmJSModule::FuncPtrTable::clone(ExclusiveContext* cx, FuncPtrTable* out) const -{ - out->pod = pod; - return ClonePodVector(cx, elemOffsets_, &out->elemOffsets_); -} - -size_t -AsmJSModule::FuncPtrTable::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const -{ - return elemOffsets_.sizeOfExcludingThis(mallocSizeOf); -} - -size_t -AsmJSModule::StaticLinkData::serializedSize() const -{ - return sizeof(pod) + - SerializedPodVectorSize(relativeLinks) + - absoluteLinks.serializedSize() + - SerializedVectorSize(funcPtrTables); -} - -uint8_t* -AsmJSModule::StaticLinkData::serialize(uint8_t* cursor) const -{ - cursor = WriteBytes(cursor, &pod, sizeof(pod)); - cursor = SerializePodVector(cursor, relativeLinks); - cursor = absoluteLinks.serialize(cursor); - cursor = SerializeVector(cursor, funcPtrTables); - return cursor; -} - -const uint8_t* -AsmJSModule::StaticLinkData::deserialize(ExclusiveContext* cx, const uint8_t* cursor) -{ - (cursor = ReadBytes(cursor, &pod, sizeof(pod))) && - (cursor = DeserializePodVector(cx, cursor, &relativeLinks)) && - (cursor = absoluteLinks.deserialize(cx, cursor)) && - (cursor = DeserializeVector(cx, cursor, &funcPtrTables)); - return cursor; -} - -bool -AsmJSModule::StaticLinkData::clone(ExclusiveContext* cx, StaticLinkData* out) const -{ - out->pod = pod; - return ClonePodVector(cx, relativeLinks, &out->relativeLinks) && - absoluteLinks.clone(cx, &out->absoluteLinks) && - CloneVector(cx, funcPtrTables, &out->funcPtrTables); -} - -size_t -AsmJSModule::StaticLinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const -{ - size_t size = relativeLinks.sizeOfExcludingThis(mallocSizeOf) + - absoluteLinks.sizeOfExcludingThis(mallocSizeOf) + - funcPtrTables.sizeOfExcludingThis(mallocSizeOf); - - for (const FuncPtrTable& table : funcPtrTables) - size += table.sizeOfExcludingThis(mallocSizeOf); - - return size; -} - size_t AsmJSModule::serializedSize() const { - return sizeof(pod) + - pod.codeBytes_ + + MOZ_ASSERT(isFinished()); + return wasm_->serializedSize() + + linkData_->serializedSize() + + sizeof(pod) + + SerializedVectorSize(globals_) + + SerializedPodVectorSize(imports_) + + SerializedVectorSize(exports_) + SerializedNameSize(globalArgumentName_) + SerializedNameSize(importArgumentName_) + - SerializedNameSize(bufferArgumentName_) + - SerializedVectorSize(globals_) + - SerializedVectorSize(exits_) + - SerializedVectorSize(exports_) + - SerializedPodVectorSize(callSites_) + - SerializedPodVectorSize(codeRanges_) + - SerializedVectorSize(names_) + - SerializedPodVectorSize(heapAccesses_) + -#if defined(MOZ_VTUNE) || defined(JS_ION_PERF) - SerializedVectorSize(profiledFunctions_) + -#endif - staticLinkData_.serializedSize(); + SerializedNameSize(bufferArgumentName_); } uint8_t* AsmJSModule::serialize(uint8_t* cursor) const { - MOZ_ASSERT(!dynamicallyLinked_); - MOZ_ASSERT(!loadedFromCache_); - MOZ_ASSERT(!profilingEnabled_); - MOZ_ASSERT(!interrupted_); - + MOZ_ASSERT(isFinished()); + cursor = wasm_->serialize(cursor); + cursor = linkData_->serialize(cursor); cursor = WriteBytes(cursor, &pod, sizeof(pod)); - cursor = WriteBytes(cursor, code_, pod.codeBytes_); + cursor = SerializeVector(cursor, globals_); + cursor = SerializePodVector(cursor, imports_); + cursor = SerializeVector(cursor, exports_); cursor = SerializeName(cursor, globalArgumentName_); cursor = SerializeName(cursor, importArgumentName_); cursor = SerializeName(cursor, bufferArgumentName_); - cursor = SerializeVector(cursor, globals_); - cursor = SerializeVector(cursor, exits_); - cursor = SerializeVector(cursor, exports_); - cursor = SerializePodVector(cursor, callSites_); - cursor = SerializePodVector(cursor, codeRanges_); - cursor = SerializeVector(cursor, names_); - cursor = SerializePodVector(cursor, heapAccesses_); -#if defined(MOZ_VTUNE) || defined(JS_ION_PERF) - cursor = SerializeVector(cursor, profiledFunctions_); -#endif - cursor = staticLinkData_.serialize(cursor); return cursor; } const uint8_t* AsmJSModule::deserialize(ExclusiveContext* cx, const uint8_t* cursor) { + linkData_ = cx->make_unique(); + if (!linkData_) + return nullptr; + // To avoid GC-during-deserialization corner cases, prevent atoms from // being collected. AutoKeepAtoms aka(cx->perThreadData); + (cursor = Module::deserialize(cx, cursor, &wasm_)) && + (cursor = linkData_->deserialize(cx, cursor)) && (cursor = ReadBytes(cursor, &pod, sizeof(pod))) && - (code_ = AllocateExecutableMemory(cx, pod.totalBytes_)) && - (cursor = ReadBytes(cursor, code_, pod.codeBytes_)) && + (cursor = DeserializeVector(cx, cursor, &globals_)) && + (cursor = DeserializePodVector(cx, cursor, &imports_)) && + (cursor = DeserializeVector(cx, cursor, &exports_)) && (cursor = DeserializeName(cx, cursor, &globalArgumentName_)) && (cursor = DeserializeName(cx, cursor, &importArgumentName_)) && - (cursor = DeserializeName(cx, cursor, &bufferArgumentName_)) && - (cursor = DeserializeVector(cx, cursor, &globals_)) && - (cursor = DeserializeVector(cx, cursor, &exits_)) && - (cursor = DeserializeVector(cx, cursor, &exports_)) && - (cursor = DeserializePodVector(cx, cursor, &callSites_)) && - (cursor = DeserializePodVector(cx, cursor, &codeRanges_)) && - (cursor = DeserializeVector(cx, cursor, &names_)) && - (cursor = DeserializePodVector(cx, cursor, &heapAccesses_)) && -#if defined(MOZ_VTUNE) || defined(JS_ION_PERF) - (cursor = DeserializeVector(cx, cursor, &profiledFunctions_)) && -#endif - (cursor = staticLinkData_.deserialize(cx, cursor)); - - loadedFromCache_ = true; + (cursor = DeserializeName(cx, cursor, &bufferArgumentName_)); return cursor; } bool -AsmJSModule::clone(JSContext* cx, ScopedJSDeletePtr* moduleOut) const +AsmJSModule::clone(JSContext* cx, HandleAsmJSModule obj) const { - *moduleOut = cx->new_(scriptSource_, srcStart_, srcBodyStart_, pod.strict_, - pod.canUseSignalHandlers_); - if (!*moduleOut) + auto out = cx->new_(scriptSource(), srcStart_, srcBodyStart_, pod.strict_); + if (!out) return false; - AsmJSModule& out = **moduleOut; + obj->setModule(out); - // Mirror the order of serialize/deserialize in cloning: - - out.pod = pod; - - out.code_ = AllocateExecutableMemory(cx, pod.totalBytes_); - if (!out.code_) + out->wasm_ = wasm_->clone(cx, *linkData_); + if (!out->wasm_) return false; - memcpy(out.code_, code_, pod.codeBytes_); + out->linkData_ = cx->make_unique(); + if (!out->linkData_ || !linkData_->clone(cx, out->linkData_.get())) + return false; - out.globalArgumentName_ = globalArgumentName_; - out.importArgumentName_ = importArgumentName_; - out.bufferArgumentName_ = bufferArgumentName_; + out->pod = pod; - if (!CloneVector(cx, globals_, &out.globals_) || - !CloneVector(cx, exits_, &out.exits_) || - !CloneVector(cx, exports_, &out.exports_) || - !ClonePodVector(cx, callSites_, &out.callSites_) || - !ClonePodVector(cx, codeRanges_, &out.codeRanges_) || - !CloneVector(cx, names_, &out.names_) || - !ClonePodVector(cx, heapAccesses_, &out.heapAccesses_) || - !staticLinkData_.clone(cx, &out.staticLinkData_)) + if (!CloneVector(cx, globals_, &out->globals_) || + !ClonePodVector(cx, imports_, &out->imports_) || + !CloneVector(cx, exports_, &out->exports_)) { return false; } - out.loadedFromCache_ = loadedFromCache_; - out.profilingEnabled_ = profilingEnabled_; - - if (profilingEnabled_) { - if (!out.profilingLabels_.resize(profilingLabels_.length())) - return false; - for (size_t i = 0; i < profilingLabels_.length(); i++) { - out.profilingLabels_[i] = DuplicateString(cx, profilingLabels_[i].get()); - if (!out.profilingLabels_[i]) - return false; - } - } - - - // Delay flushing until dynamic linking. - AutoFlushICache afc("AsmJSModule::clone", /* inhibit = */ true); - out.setAutoFlushICacheRange(); - - out.restoreToInitialState(maybeHeap_, code_, cx); - out.staticallyLink(cx); + out->globalArgumentName_ = globalArgumentName_; + out->importArgumentName_ = importArgumentName_; + out->bufferArgumentName_ = bufferArgumentName_; return true; } -bool -AsmJSModule::changeHeap(Handle newHeap, JSContext* cx) -{ - MOZ_ASSERT(hasArrayView()); - - // Content JS should not be able to run (and change heap) from within an - // interrupt callback, but in case it does, fail to change heap. Otherwise, - // the heap can change at every single instruction which would prevent - // future optimizations like heap-base hoisting. - if (interrupted_) - return false; - - AutoMutateCode amc(cx, *this, "AsmJSModule::changeHeap"); - restoreHeapToInitialState(maybeHeap_); - initHeap(newHeap, cx); - return true; -} - -size_t -AsmJSModule::heapLength() const -{ - MOZ_ASSERT(isDynamicallyLinked()); - return maybeHeap_ ? maybeHeap_->byteLength() : 0; -} - -void -AsmJSModule::setProfilingEnabled(bool enabled, JSContext* cx) -{ - MOZ_ASSERT(isDynamicallyLinked()); - - if (profilingEnabled_ == enabled) - return; - - // When enabled, generate profiling labels for every name in names_ that is - // the name of some Function CodeRange. This involves malloc() so do it now - // since, once we start sampling, we'll be in a signal-handing context where - // we cannot malloc. - if (enabled) { - profilingLabels_.resize(names_.length()); - const char* filename = scriptSource_->filename(); - JS::AutoCheckCannotGC nogc; - for (size_t i = 0; i < codeRanges_.length(); i++) { - CodeRange& cr = codeRanges_[i]; - if (!cr.isFunction()) - continue; - unsigned lineno = cr.functionLineNumber(); - PropertyName* name = names_[cr.functionNameIndex()].name(); - profilingLabels_[cr.functionNameIndex()].reset( - name->hasLatin1Chars() - ? JS_smprintf("%s (%s:%u)", name->latin1Chars(nogc), filename, lineno) - : JS_smprintf("%hs (%s:%u)", name->twoByteChars(nogc), filename, lineno)); - } - } else { - profilingLabels_.clear(); - } - - AutoMutateCode amc(cx, *this, "AsmJSModule::setProfilingEnabled"); - - // Patch all internal (asm.js->asm.js) callsites to call the profiling - // prologues: - for (size_t i = 0; i < callSites_.length(); i++) { - CallSite& cs = callSites_[i]; - if (cs.kind() != CallSite::Relative) - continue; - - uint8_t* callerRetAddr = code_ + cs.returnAddressOffset(); -#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) - void* callee = X86Encoding::GetRel32Target(callerRetAddr); -#elif defined(JS_CODEGEN_ARM) - uint8_t* caller = callerRetAddr - 4; - Instruction* callerInsn = reinterpret_cast(caller); - BOffImm calleeOffset; - callerInsn->as()->extractImm(&calleeOffset); - void* callee = calleeOffset.getDest(callerInsn); -#elif defined(JS_CODEGEN_ARM64) - MOZ_CRASH(); - void* callee = nullptr; - (void)callerRetAddr; -#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) - uint8_t* instr = callerRetAddr - Assembler::PatchWrite_NearCallSize(); - void* callee = (void*)Assembler::ExtractInstructionImmediate(instr); -#elif defined(JS_CODEGEN_NONE) - MOZ_CRASH(); - void* callee = nullptr; -#else -# error "Missing architecture" -#endif - - const CodeRange* codeRange = lookupCodeRange(callee); - if (codeRange->kind() != CodeRange::Function) - continue; - - uint8_t* profilingEntry = code_ + codeRange->profilingEntry(); - uint8_t* entry = code_ + codeRange->entry(); - MOZ_ASSERT_IF(profilingEnabled_, callee == profilingEntry); - MOZ_ASSERT_IF(!profilingEnabled_, callee == entry); - uint8_t* newCallee = enabled ? profilingEntry : entry; - -#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) - X86Encoding::SetRel32(callerRetAddr, newCallee); -#elif defined(JS_CODEGEN_ARM) - new (caller) InstBLImm(BOffImm(newCallee - caller), Assembler::Always); -#elif defined(JS_CODEGEN_ARM64) - (void)newCallee; - MOZ_CRASH(); -#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) - Assembler::PatchInstructionImmediate(instr, PatchedImmPtr(newCallee)); -#elif defined(JS_CODEGEN_NONE) - MOZ_CRASH(); -#else -# error "Missing architecture" -#endif - } - - // Update all the addresses in the function-pointer tables to point to the - // profiling prologues: - for (FuncPtrTable& funcPtrTable : staticLinkData_.funcPtrTables) { - auto array = reinterpret_cast(globalData() + funcPtrTable.globalDataOffset()); - for (size_t i = 0; i < funcPtrTable.elemOffsets().length(); i++) { - void* callee = array[i]; - const CodeRange* codeRange = lookupCodeRange(callee); - void* profilingEntry = code_ + codeRange->profilingEntry(); - void* entry = code_ + codeRange->entry(); - MOZ_ASSERT_IF(profilingEnabled_, callee == profilingEntry); - MOZ_ASSERT_IF(!profilingEnabled_, callee == entry); - if (enabled) - array[i] = profilingEntry; - else - array[i] = entry; - } - } - - // Replace all the nops in all the epilogues of asm.js functions with jumps - // to the profiling epilogues. - for (size_t i = 0; i < codeRanges_.length(); i++) { - CodeRange& cr = codeRanges_[i]; - if (!cr.isFunction()) - continue; - uint8_t* jump = code_ + cr.profilingJump(); - uint8_t* profilingEpilogue = code_ + cr.profilingEpilogue(); -#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) - // An unconditional jump with a 1 byte offset immediate has the opcode - // 0x90. The offset is relative to the address of the instruction after - // the jump. 0x66 0x90 is the canonical two-byte nop. - ptrdiff_t jumpImmediate = profilingEpilogue - jump - 2; - MOZ_ASSERT(jumpImmediate > 0 && jumpImmediate <= 127); - if (enabled) { - MOZ_ASSERT(jump[0] == 0x66); - MOZ_ASSERT(jump[1] == 0x90); - jump[0] = 0xeb; - jump[1] = jumpImmediate; - } else { - MOZ_ASSERT(jump[0] == 0xeb); - MOZ_ASSERT(jump[1] == jumpImmediate); - jump[0] = 0x66; - jump[1] = 0x90; - } -#elif defined(JS_CODEGEN_ARM) - if (enabled) { - MOZ_ASSERT(reinterpret_cast(jump)->is()); - new (jump) InstBImm(BOffImm(profilingEpilogue - jump), Assembler::Always); - } else { - MOZ_ASSERT(reinterpret_cast(jump)->is()); - new (jump) InstNOP(); - } -#elif defined(JS_CODEGEN_ARM64) - (void)jump; - (void)profilingEpilogue; - MOZ_CRASH(); -#elif defined(JS_CODEGEN_MIPS32) - Instruction* instr = (Instruction*)jump; - if (enabled) { - Assembler::WriteLuiOriInstructions(instr, instr->next(), - ScratchRegister, (uint32_t)profilingEpilogue); - instr[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr); - } else { - instr[0].makeNop(); - instr[1].makeNop(); - instr[2].makeNop(); - } -#elif defined(JS_CODEGEN_MIPS64) - Instruction* instr = (Instruction*)jump; - if (enabled) { - Assembler::WriteLoad64Instructions(instr, ScratchRegister, (uint64_t)profilingEpilogue); - instr[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr); - } else { - instr[0].makeNop(); - instr[1].makeNop(); - instr[2].makeNop(); - instr[3].makeNop(); - instr[4].makeNop(); - } -#elif defined(JS_CODEGEN_NONE) - MOZ_CRASH(); -#else -# error "Missing architecture" -#endif - } - - // Replace all calls to builtins with calls to profiling thunks that push a - // frame pointer. Since exit unwinding always starts at the caller of fp, - // this avoids losing the innermost asm.js function. - for (auto builtin : MakeEnumeratedRange(Builtin::Limit)) { - auto imm = BuiltinToImmediate(builtin); - const OffsetVector& offsets = staticLinkData_.absoluteLinks[imm]; - void* from = AddressOf(imm, nullptr); - void* to = code_ + staticLinkData_.pod.builtinThunkOffsets[builtin]; - if (!enabled) - Swap(from, to); - for (size_t j = 0; j < offsets.length(); j++) { - uint8_t* caller = code_ + offsets[j]; - const AsmJSModule::CodeRange* codeRange = lookupCodeRange(caller); - if (codeRange->isThunk()) - continue; - MOZ_ASSERT(codeRange->isFunction()); - Assembler::PatchDataWithValueCheck(CodeLocationLabel(caller), - PatchedImmPtr(to), - PatchedImmPtr(from)); - } - } - - profilingEnabled_ = enabled; -} - -static bool -GetCPUID(uint32_t* cpuId) -{ - enum Arch { - X86 = 0x1, - X64 = 0x2, - ARM = 0x3, - MIPS = 0x4, - MIPS64 = 0x5, - ARCH_BITS = 3 - }; - -#if defined(JS_CODEGEN_X86) - MOZ_ASSERT(uint32_t(CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS)); - *cpuId = X86 | (uint32_t(CPUInfo::GetSSEVersion()) << ARCH_BITS); - return true; -#elif defined(JS_CODEGEN_X64) - MOZ_ASSERT(uint32_t(CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS)); - *cpuId = X64 | (uint32_t(CPUInfo::GetSSEVersion()) << ARCH_BITS); - return true; -#elif defined(JS_CODEGEN_ARM) - MOZ_ASSERT(GetARMFlags() <= (UINT32_MAX >> ARCH_BITS)); - *cpuId = ARM | (GetARMFlags() << ARCH_BITS); - return true; -#elif defined(JS_CODEGEN_MIPS32) - MOZ_ASSERT(GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS)); - *cpuId = MIPS | (GetMIPSFlags() << ARCH_BITS); - return true; -#elif defined(JS_CODEGEN_MIPS64) - MOZ_ASSERT(GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS)); - *cpuId = MIPS64 | (GetMIPSFlags() << ARCH_BITS); - return true; -#else - return false; -#endif -} - -class MachineId -{ - uint32_t cpuId_; - JS::BuildIdCharVector buildId_; - - public: - bool extractCurrentState(ExclusiveContext* cx) { - if (!cx->asmJSCacheOps().buildId) - return false; - if (!cx->asmJSCacheOps().buildId(&buildId_)) - return false; - if (!GetCPUID(&cpuId_)) - return false; - return true; - } - - size_t serializedSize() const { - return sizeof(uint32_t) + - SerializedPodVectorSize(buildId_); - } - - uint8_t* serialize(uint8_t* cursor) const { - cursor = WriteScalar(cursor, cpuId_); - cursor = SerializePodVector(cursor, buildId_); - return cursor; - } - - const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor) { - (cursor = ReadScalar(cursor, &cpuId_)) && - (cursor = DeserializePodVector(cx, cursor, &buildId_)); - return cursor; - } - - bool operator==(const MachineId& rhs) const { - return cpuId_ == rhs.cpuId_ && - buildId_.length() == rhs.buildId_.length() && - PodEqual(buildId_.begin(), rhs.buildId_.begin(), buildId_.length()); - } - bool operator!=(const MachineId& rhs) const { - return !(*this == rhs); - } -}; - struct PropertyNameWrapper { PropertyName* name; @@ -2198,27 +510,8 @@ class ModuleCharsForLookup : ModuleChars } }; -struct ScopedCacheEntryOpenedForWrite -{ - ExclusiveContext* cx; - const size_t serializedSize; - uint8_t* memory; - intptr_t handle; - - ScopedCacheEntryOpenedForWrite(ExclusiveContext* cx, size_t serializedSize) - : cx(cx), serializedSize(serializedSize), memory(nullptr), handle(-1) - {} - - ~ScopedCacheEntryOpenedForWrite() { - if (memory) - cx->asmJSCacheOps().closeEntryForWrite(serializedSize, memory, handle); - } -}; - JS::AsmJSCacheResult -js::StoreAsmJSModuleInCache(AsmJSParser& parser, - const AsmJSModule& module, - ExclusiveContext* cx) +js::StoreAsmJSModuleInCache(AsmJSParser& parser, const AsmJSModule& module, ExclusiveContext* cx) { MachineId machineId; if (!machineId.extractCurrentState(cx)) @@ -2255,31 +548,14 @@ js::StoreAsmJSModuleInCache(AsmJSParser& parser, return JS::AsmJSCache_Success; } -struct ScopedCacheEntryOpenedForRead -{ - ExclusiveContext* cx; - size_t serializedSize; - const uint8_t* memory; - intptr_t handle; - - explicit ScopedCacheEntryOpenedForRead(ExclusiveContext* cx) - : cx(cx), serializedSize(0), memory(nullptr), handle(0) - {} - - ~ScopedCacheEntryOpenedForRead() { - if (memory) - cx->asmJSCacheOps().closeEntryForRead(serializedSize, memory, handle); - } -}; - bool -js::LookupAsmJSModuleInCache(ExclusiveContext* cx, - AsmJSParser& parser, - ScopedJSDeletePtr* moduleOut, - ScopedJSFreePtr* compilationTimeReport) +js::LookupAsmJSModuleInCache(ExclusiveContext* cx, AsmJSParser& parser, HandleAsmJSModule moduleObj, + bool* loadedFromCache, UniqueChars* compilationTimeReport) { int64_t usecBefore = PRMJ_Now(); + *loadedFromCache = false; + MachineId machineId; if (!machineId.extractCurrentState(cx)) return true; @@ -2313,13 +589,12 @@ js::LookupAsmJSModuleInCache(ExclusiveContext* cx, uint32_t srcBodyStart = parser.tokenStream.currentToken().pos.end; bool strict = parser.pc->sc->strict() && !parser.pc->sc->hasExplicitUseStrict(); - // canUseSignalHandlers will be clobbered when deserializing and checked below - ScopedJSDeletePtr module( - cx->new_(parser.ss, srcStart, srcBodyStart, strict, - /* canUseSignalHandlers = */ false)); + AsmJSModule* module = cx->new_(parser.ss, srcStart, srcBodyStart, strict); if (!module) return false; + moduleObj->setModule(module); + cursor = module->deserialize(cx, cursor); if (!cursor) return false; @@ -2329,23 +604,19 @@ js::LookupAsmJSModuleInCache(ExclusiveContext* cx, if (!atEnd) return true; - if (module->canUseSignalHandlers() != cx->canUseSignalHandlers()) + if (module->wasm().compileArgs() != CompileArgs(cx)) return true; + module->staticallyLink(cx); + if (!parser.tokenStream.advance(module->srcEndBeforeCurly())) return false; - { - // Delay flushing until dynamic linking. - AutoFlushICache afc("LookupAsmJSModuleInCache", /* inhibit = */ true); - module->setAutoFlushICacheRange(); - - module->staticallyLink(cx); - } + *loadedFromCache = true; int64_t usecAfter = PRMJ_Now(); int ms = (usecAfter - usecBefore) / PRMJ_USEC_PER_MSEC; - *compilationTimeReport = JS_smprintf("loaded from cache in %dms", ms); - *moduleOut = module.forget(); + *compilationTimeReport = UniqueChars(JS_smprintf("loaded from cache in %dms", ms)); return true; } + diff --git a/js/src/asmjs/AsmJSModule.h b/js/src/asmjs/AsmJSModule.h index f850679bbbc..549aa912347 100644 --- a/js/src/asmjs/AsmJSModule.h +++ b/js/src/asmjs/AsmJSModule.h @@ -24,22 +24,16 @@ #include "mozilla/Move.h" #include "mozilla/PodOperations.h" -#include "jsscript.h" - -#include "asmjs/AsmJSFrameIterator.h" #include "asmjs/AsmJSValidate.h" -#include "asmjs/Wasm.h" +#include "asmjs/WasmModule.h" #include "builtin/SIMD.h" #include "gc/Tracer.h" -#ifdef JS_ION_PERF -# include "jit/PerfSpewer.h" -#endif #include "vm/TypedArrayObject.h" namespace js { -namespace frontend { class TokenStream; } -namespace jit { struct BaselineScript; class MacroAssembler; } +class AsmJSModuleObject; +typedef Handle HandleAsmJSModule; // The asm.js spec recognizes this set of builtin Math functions. enum AsmJSMathBuiltinFunction @@ -98,16 +92,8 @@ enum AsmJSSimdOperation #undef ASMJSSIMDOPERATION }; -// An asm.js module represents the collection of functions nested inside a -// single outer "use asm" function. For example, this asm.js module: -// function() { "use asm"; function f() {} function g() {} return f } -// contains the functions 'f' and 'g'. -// -// An asm.js module contains both the jit-code produced by compiling all the -// functions in the module as well all the data required to perform the -// link-time validation step in the asm.js spec. -// -// NB: this means that AsmJSModule must be GC-safe. +// An AsmJSModule extends (via containment) a wasm::Module with the extra persistent state +// necessary to represent a compiled asm.js module. class AsmJSModule { public: @@ -120,7 +106,7 @@ class AsmJSModule enum ConstantKind { GlobalConstant, MathConstant }; private: - struct Pod { + struct CacheablePod { Which which_; union { struct { @@ -259,141 +245,42 @@ class AsmJSModule return pod.u.constant.value_; } - size_t serializedSize() const; - uint8_t* serialize(uint8_t* cursor) const; - const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor); - bool clone(ExclusiveContext* cx, Global* out) const; + WASM_DECLARE_SERIALIZABLE(Global); }; - // An Exit holds bookkeeping information about an exit; the ExitDatum - // struct overlays the actual runtime data stored in the global data - // section. + typedef Vector GlobalVector; - struct ExitDatum + class Import { - uint8_t* exit; - jit::BaselineScript* baselineScript; - HeapPtrFunction fun; - }; - - class Exit - { - wasm::MallocSig sig_; - struct Pod { - unsigned ffiIndex_; - unsigned globalDataOffset_; - unsigned interpCodeOffset_; - unsigned jitCodeOffset_; - } pod; - + uint32_t ffiIndex_; public: - Exit() {} - Exit(Exit&& rhs) : sig_(Move(rhs.sig_)), pod(rhs.pod) {} - Exit(wasm::MallocSig&& sig, unsigned ffiIndex, unsigned globalDataOffset) - : sig_(Move(sig)) - { - pod.ffiIndex_ = ffiIndex; - pod.globalDataOffset_ = globalDataOffset; - pod.interpCodeOffset_ = 0; - pod.jitCodeOffset_ = 0; - } - const wasm::MallocSig& sig() const { - return sig_; - } - unsigned ffiIndex() const { - return pod.ffiIndex_; - } - unsigned globalDataOffset() const { - return pod.globalDataOffset_; - } - void initInterpOffset(unsigned off) { - MOZ_ASSERT(!pod.interpCodeOffset_); - pod.interpCodeOffset_ = off; - } - void initJitOffset(unsigned off) { - MOZ_ASSERT(!pod.jitCodeOffset_); - pod.jitCodeOffset_ = off; - } - ExitDatum& datum(const AsmJSModule& module) const { - return *reinterpret_cast(module.globalData() + pod.globalDataOffset_); - } - void initDatum(const AsmJSModule& module) const { - MOZ_ASSERT(pod.interpCodeOffset_); - ExitDatum& d = datum(module); - d.exit = module.codeBase() + pod.interpCodeOffset_; - d.baselineScript = nullptr; - d.fun = nullptr; - } - bool isOptimized(const AsmJSModule& module) const { - return datum(module).exit == module.codeBase() + pod.jitCodeOffset_; - } - void optimize(const AsmJSModule& module, jit::BaselineScript* baselineScript) const { - ExitDatum& d = datum(module); - d.exit = module.codeBase() + pod.jitCodeOffset_; - d.baselineScript = baselineScript; - } - void deoptimize(const AsmJSModule& module) const { - ExitDatum& d = datum(module); - d.exit = module.codeBase() + pod.interpCodeOffset_; - d.baselineScript = nullptr; - } - - size_t serializedSize() const; - uint8_t* serialize(uint8_t* cursor) const; - const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor); - bool clone(ExclusiveContext* cx, Exit* out) const; + Import() = default; + explicit Import(uint32_t ffiIndex) : ffiIndex_(ffiIndex) {} + uint32_t ffiIndex() const { return ffiIndex_; } }; - struct EntryArg { - uint64_t lo; - uint64_t hi; - }; + typedef Vector ImportVector; - typedef int32_t (*CodePtr)(EntryArg* args, uint8_t* global); - - class ExportedFunction + class Export { PropertyName* name_; PropertyName* maybeFieldName_; - wasm::MallocSig sig_; - struct Pod { - bool isChangeHeap_; - uint32_t funcIndex_; - uint32_t codeOffset_; + struct CacheablePod { + uint32_t wasmIndex_; uint32_t startOffsetInModule_; // Store module-start-relative offsets uint32_t endOffsetInModule_; // so preserved by serialization. } pod; - friend class AsmJSModule; - - ExportedFunction(PropertyName* name, uint32_t funcIndex, - uint32_t startOffsetInModule, uint32_t endOffsetInModule, - PropertyName* maybeFieldName, - wasm::MallocSig&& sig) - : name_(name), - maybeFieldName_(maybeFieldName), - sig_(Move(sig)) - { - MOZ_ASSERT(name_->isTenured()); - MOZ_ASSERT_IF(maybeFieldName_, maybeFieldName_->isTenured()); - mozilla::PodZero(&pod); // zero padding for Valgrind - pod.funcIndex_ = funcIndex; - pod.isChangeHeap_ = false; - pod.codeOffset_ = UINT32_MAX; - pod.startOffsetInModule_ = startOffsetInModule; - pod.endOffsetInModule_ = endOffsetInModule; - } - - ExportedFunction(PropertyName* name, - uint32_t startOffsetInModule, uint32_t endOffsetInModule, - PropertyName* maybeFieldName) + public: + Export() {} + Export(PropertyName* name, PropertyName* maybeFieldName, uint32_t wasmIndex, + uint32_t startOffsetInModule, uint32_t endOffsetInModule) : name_(name), maybeFieldName_(maybeFieldName) { MOZ_ASSERT(name_->isTenured()); MOZ_ASSERT_IF(maybeFieldName_, maybeFieldName_->isTenured()); - mozilla::PodZero(&pod); // zero padding for Valgrind - pod.isChangeHeap_ = true; + pod.wasmIndex_ = wasmIndex; pod.startOffsetInModule_ = startOffsetInModule; pod.endOffsetInModule_ = endOffsetInModule; } @@ -404,17 +291,6 @@ class AsmJSModule TraceManuallyBarrieredEdge(trc, &maybeFieldName_, "asm.js export field"); } - public: - ExportedFunction() {} - ExportedFunction(ExportedFunction&& rhs) - : name_(rhs.name_), - maybeFieldName_(rhs.maybeFieldName_), - sig_(mozilla::Move(rhs.sig_)) - { - mozilla::PodZero(&pod); // zero padding for Valgrind - pod = rhs.pod; - } - PropertyName* name() const { return name_; } @@ -427,384 +303,61 @@ class AsmJSModule uint32_t endOffsetInModule() const { return pod.endOffsetInModule_; } + static const uint32_t ChangeHeap = UINT32_MAX; bool isChangeHeap() const { - return pod.isChangeHeap_; + return pod.wasmIndex_ == ChangeHeap; } - uint32_t funcIndex() const { + uint32_t wasmIndex() const { MOZ_ASSERT(!isChangeHeap()); - return pod.funcIndex_; - } - void initCodeOffset(unsigned off) { - MOZ_ASSERT(!isChangeHeap()); - MOZ_ASSERT(pod.codeOffset_ == UINT32_MAX); - pod.codeOffset_ = off; - } - const wasm::MallocSig& sig() const { - MOZ_ASSERT(!isChangeHeap()); - return sig_; + return pod.wasmIndex_; } - size_t serializedSize() const; - uint8_t* serialize(uint8_t* cursor) const; - const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor); - bool clone(ExclusiveContext* cx, ExportedFunction* out) const; + WASM_DECLARE_SERIALIZABLE(Export) }; - class CodeRange - { - protected: - uint32_t nameIndex_; + typedef Vector ExportVector; - private: - uint32_t lineNumber_; - uint32_t begin_; - uint32_t profilingReturn_; - uint32_t end_; - union { - struct { - uint8_t kind_; - uint8_t beginToEntry_; - uint8_t profilingJumpToProfilingReturn_; - uint8_t profilingEpilogueToProfilingReturn_; - } func; - struct { - uint8_t kind_; - uint16_t target_; - } thunk; - uint8_t kind_; - } u; - - void assertValid(); - - public: - enum Kind { Function, Entry, JitFFI, SlowFFI, Interrupt, Thunk, Inline }; - - CodeRange() {} - CodeRange(Kind kind, AsmJSOffsets offsets); - CodeRange(Kind kind, AsmJSProfilingOffsets offsets); - CodeRange(wasm::Builtin builtin, AsmJSProfilingOffsets offsets); - CodeRange(uint32_t lineNumber, AsmJSFunctionOffsets offsets); - - Kind kind() const { return Kind(u.kind_); } - bool isFunction() const { return kind() == Function; } - bool isEntry() const { return kind() == Entry; } - bool isFFI() const { return kind() == JitFFI || kind() == SlowFFI; } - bool isInterrupt() const { return kind() == Interrupt; } - bool isThunk() const { return kind() == Thunk; } - - uint32_t begin() const { - return begin_; - } - uint32_t profilingEntry() const { - return begin(); - } - uint32_t entry() const { - MOZ_ASSERT(isFunction()); - return begin_ + u.func.beginToEntry_; - } - uint32_t end() const { - return end_; - } - uint32_t profilingJump() const { - MOZ_ASSERT(isFunction()); - return profilingReturn_ - u.func.profilingJumpToProfilingReturn_; - } - uint32_t profilingEpilogue() const { - MOZ_ASSERT(isFunction()); - return profilingReturn_ - u.func.profilingEpilogueToProfilingReturn_; - } - uint32_t profilingReturn() const { - MOZ_ASSERT(isFunction() || isFFI() || isInterrupt() || isThunk()); - return profilingReturn_; - } - void initNameIndex(uint32_t nameIndex) { - MOZ_ASSERT(nameIndex_ == UINT32_MAX); - nameIndex_ = nameIndex; - } - uint32_t functionNameIndex() const { - MOZ_ASSERT(isFunction()); - MOZ_ASSERT(nameIndex_ != UINT32_MAX); - return nameIndex_; - } - PropertyName* functionName(const AsmJSModule& module) const { - return module.names_[functionNameIndex()].name(); - } - const char* functionProfilingLabel(const AsmJSModule& module) const { - MOZ_ASSERT(isFunction()); - return module.profilingLabels_[nameIndex_].get(); - } - uint32_t functionLineNumber() const { - MOZ_ASSERT(isFunction()); - return lineNumber_; - } - void functionOffsetBy(uint32_t offset) { - MOZ_ASSERT(isFunction()); - begin_ += offset; - profilingReturn_ += offset; - end_ += offset; - } - wasm::Builtin thunkTarget() const { - MOZ_ASSERT(isThunk()); - return wasm::Builtin(u.thunk.target_); - } - }; - - class Name - { - PropertyName* name_; - public: - Name() : name_(nullptr) {} - MOZ_IMPLICIT Name(PropertyName* name) : name_(name) {} - PropertyName* name() const { return name_; } - PropertyName*& name() { return name_; } - size_t serializedSize() const; - uint8_t* serialize(uint8_t* cursor) const; - const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor); - bool clone(ExclusiveContext* cx, Name* out) const; - }; - - typedef mozilla::UniquePtr ProfilingLabel; - -#if defined(MOZ_VTUNE) || defined(JS_ION_PERF) - // Function information to add to the VTune JIT profiler following linking. - struct ProfiledFunction - { - PropertyName* name; - struct Pod { - unsigned startCodeOffset; - unsigned endCodeOffset; - unsigned lineno; - unsigned columnIndex; - } pod; - - explicit ProfiledFunction() - : name(nullptr) - { } - - ProfiledFunction(PropertyName* name, unsigned start, unsigned end, - unsigned line = 0, unsigned column = 0) - : name(name) - { - MOZ_ASSERT(name->isTenured()); - - pod.startCodeOffset = start; - pod.endCodeOffset = end; - pod.lineno = line; - pod.columnIndex = column; - } - - void trace(JSTracer* trc) { - if (name) - TraceManuallyBarrieredEdge(trc, &name, "asm.js profiled function name"); - } - - size_t serializedSize() const; - uint8_t* serialize(uint8_t* cursor) const; - const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor); - }; -#endif - - struct RelativeLink - { - enum Kind - { - RawPointer, - CodeLabel, - InstructionImmediate - }; - - RelativeLink() - { } - -#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) - // On MIPS, CodeLabels are instruction immediates so RelativeLinks only - // patch instruction immediates. - explicit RelativeLink(Kind kind) { - MOZ_ASSERT(kind == CodeLabel || kind == InstructionImmediate); - } - bool isRawPointerPatch() { - return false; - } -#else - // On the rest, CodeLabels are raw pointers so RelativeLinks only patch - // raw pointers. - explicit RelativeLink(Kind kind) { - MOZ_ASSERT(kind == CodeLabel || kind == RawPointer); - } - bool isRawPointerPatch() { - return true; - } -#endif - - uint32_t patchAtOffset; - uint32_t targetOffset; - }; - - typedef Vector RelativeLinkVector; - - typedef mozilla::EnumeratedArray BuiltinThunkOffsetArray; - - typedef Vector OffsetVector; - typedef mozilla::EnumeratedArray OffsetVectorArray; - - struct AbsoluteLinkArray : public OffsetVectorArray - { - size_t serializedSize() const; - uint8_t* serialize(uint8_t* cursor) const; - const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor); - bool clone(ExclusiveContext* cx, AbsoluteLinkArray* out) const; - - size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; - }; - - class FuncPtrTable - { - struct Pod { - uint32_t globalDataOffset_; - } pod; - OffsetVector elemOffsets_; - - public: - FuncPtrTable() {} - FuncPtrTable(FuncPtrTable&& rhs) : pod(rhs.pod), elemOffsets_(Move(rhs.elemOffsets_)) {} - explicit FuncPtrTable(uint32_t globalDataOffset) { pod.globalDataOffset_ = globalDataOffset; } - void define(OffsetVector&& elemOffsets) { elemOffsets_ = Move(elemOffsets); } - uint32_t globalDataOffset() const { return pod.globalDataOffset_; } - const OffsetVector& elemOffsets() const { return elemOffsets_; } - - size_t serializedSize() const; - uint8_t* serialize(uint8_t* cursor) const; - const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor); - bool clone(ExclusiveContext* cx, FuncPtrTable* out) const; - - size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; - }; - - typedef Vector FuncPtrTableVector; - - // Static-link data is used to patch a module either after it has been - // compiled or deserialized with various absolute addresses (of code or - // data in the process) or relative addresses (of code or data in the same - // AsmJSModule). - struct StaticLinkData - { - StaticLinkData() { mozilla::PodZero(&pod); } - - struct Pod { - uint32_t interruptExitOffset; - uint32_t outOfBoundsExitOffset; - BuiltinThunkOffsetArray builtinThunkOffsets; - } pod; - - RelativeLinkVector relativeLinks; - AbsoluteLinkArray absoluteLinks; - FuncPtrTableVector funcPtrTables; - - size_t serializedSize() const; - uint8_t* serialize(uint8_t* cursor) const; - const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor); - bool clone(ExclusiveContext* cx, StaticLinkData* out) const; - - size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; - }; + typedef JS::UniquePtr> UniqueWasmModule; private: - struct Pod { - uint32_t functionBytes_; - uint32_t codeBytes_; - uint32_t globalBytes_; - uint32_t totalBytes_; - uint32_t minHeapLength_; - uint32_t maxHeapLength_; - uint32_t heapLengthMask_; - uint32_t numFFIs_; - uint32_t srcLength_; - uint32_t srcLengthWithRightBrace_; - bool strict_; - bool hasArrayView_; - bool isSharedView_; - bool hasFixedMinHeapLength_; - bool canUseSignalHandlers_; + UniqueWasmModule wasm_; + wasm::UniqueStaticLinkData linkData_; + struct CacheablePod { + uint32_t minHeapLength_; + uint32_t maxHeapLength_; + uint32_t heapLengthMask_; + uint32_t numFFIs_; + uint32_t srcLength_; + uint32_t srcLengthWithRightBrace_; + bool strict_; + bool hasArrayView_; + bool isSharedView_; + bool hasFixedMinHeapLength_; } pod; - - // These two fields need to be kept out pod as they depend on the position - // of the module within the ScriptSource and thus aren't invariant with - // respect to caching. - const uint32_t srcStart_; - const uint32_t srcBodyStart_; - - Vector globals_; - Vector exits_; - Vector exports_; - Vector callSites_; - Vector codeRanges_; - Vector names_; - Vector profilingLabels_; - Vector heapAccesses_; -#if defined(MOZ_VTUNE) || defined(JS_ION_PERF) - Vector profiledFunctions_; -#endif - - ScriptSource * scriptSource_; - PropertyName * globalArgumentName_; - PropertyName * importArgumentName_; - PropertyName * bufferArgumentName_; - uint8_t * code_; - uint8_t * interruptExit_; - uint8_t * outOfBoundsExit_; - StaticLinkData staticLinkData_; - RelocatablePtrArrayBufferObjectMaybeShared maybeHeap_; - AsmJSModule ** prevLinked_; - AsmJSModule * nextLinked_; - bool dynamicallyLinked_; - bool loadedFromCache_; - bool profilingEnabled_; - bool interrupted_; - - void restoreHeapToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer); - void restoreToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer, uint8_t* prevCode, - ExclusiveContext* cx); + const ScriptSourceHolder scriptSource_; + const uint32_t srcStart_; + const uint32_t srcBodyStart_; + GlobalVector globals_; + ImportVector imports_; + ExportVector exports_; + PropertyName* globalArgumentName_; + PropertyName* importArgumentName_; + PropertyName* bufferArgumentName_; public: explicit AsmJSModule(ScriptSource* scriptSource, uint32_t srcStart, uint32_t srcBodyStart, - bool strict, bool canUseSignalHandlers); + bool strict); void trace(JSTracer* trc); - ~AsmJSModule(); - - // An AsmJSModule transitions from !finished to finished to dynamically linked. - bool isFinished() const { return !!code_; } - bool isDynamicallyLinked() const { return dynamicallyLinked_; } /*************************************************************************/ // These functions may be used as soon as the module is constructed: ScriptSource* scriptSource() const { - MOZ_ASSERT(scriptSource_); - return scriptSource_; + return scriptSource_.get(); } bool strict() const { return pod.strict_; } - bool canUseSignalHandlers() const { - return pod.canUseSignalHandlers_; - } - bool usesSignalHandlersForInterrupt() const { - return pod.canUseSignalHandlers_; - } - bool usesSignalHandlersForOOB() const { -#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB) - return pod.canUseSignalHandlers_; -#else - return false; -#endif - } - bool loadedFromCache() const { - return loadedFromCache_; - } // srcStart() refers to the offset in the ScriptSource to the beginning of // the asm.js module function. If the function has been created with the @@ -834,14 +387,6 @@ class AsmJSModule return pod.heapLengthMask_; } - // about:memory reporting - void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode, - size_t* asmJSModuleData); - - /*************************************************************************/ - // These functions build the global scope of the module while parsing the - // module prologue (before the function bodies): - void initGlobalArgumentName(PropertyName* n) { MOZ_ASSERT(!isFinished()); MOZ_ASSERT_IF(n, n->isTenured()); @@ -867,59 +412,20 @@ class AsmJSModule return bufferArgumentName_; } - /*************************************************************************/ - // These functions may only be called before finish(): - - private: - bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset) { + bool addGlobalVarInit(const wasm::Val& v, uint32_t globalDataOffset) { MOZ_ASSERT(!isFinished()); - uint32_t pad = ComputeByteAlignment(pod.globalBytes_, align); - if (UINT32_MAX - pod.globalBytes_ < pad + bytes) - return false; - pod.globalBytes_ += pad; - *globalDataOffset = pod.globalBytes_; - pod.globalBytes_ += bytes; - return true; - } - bool addGlobalVar(wasm::ValType type, uint32_t* globalDataOffset) { - MOZ_ASSERT(!isFinished()); - unsigned width = 0; - switch (type) { - case wasm::ValType::I32: - case wasm::ValType::F32: - width = 4; - break; - case wasm::ValType::I64: - case wasm::ValType::F64: - width = 8; - break; - case wasm::ValType::I32x4: - case wasm::ValType::F32x4: - case wasm::ValType::B32x4: - width = 16; - break; - } - return allocateGlobalBytes(width, width, globalDataOffset); - } - public: - bool addGlobalVarInit(const wasm::Val& v, uint32_t* globalDataOffset) { - MOZ_ASSERT(!isFinished()); - if (!addGlobalVar(v.type(), globalDataOffset)) - return false; Global g(Global::Variable, nullptr); g.pod.u.var.initKind_ = Global::InitConstant; g.pod.u.var.u.val_ = v; - g.pod.u.var.globalDataOffset_ = *globalDataOffset; + g.pod.u.var.globalDataOffset_ = globalDataOffset; return globals_.append(g); } - bool addGlobalVarImport(PropertyName* name, wasm::ValType importType, uint32_t* globalDataOffset) { + bool addGlobalVarImport(PropertyName* name, wasm::ValType importType, uint32_t globalDataOffset) { MOZ_ASSERT(!isFinished()); - if (!addGlobalVar(importType, globalDataOffset)) - return false; Global g(Global::Variable, name); g.pod.u.var.initKind_ = Global::InitImport; g.pod.u.var.u.importType_ = importType; - g.pod.u.var.globalDataOffset_ = *globalDataOffset; + g.pod.u.var.globalDataOffset_ = globalDataOffset; return globals_.append(g); } bool addFFI(PropertyName* field, uint32_t* ffiIndex) { @@ -990,28 +496,23 @@ class AsmJSModule g.pod.u.constant.kind_ = Global::GlobalConstant; return globals_.append(g); } - unsigned numGlobals() const { - return globals_.length(); + bool addImport(uint32_t ffiIndex, uint32_t importIndex) { + MOZ_ASSERT(imports_.length() == importIndex); + return imports_.emplaceBack(ffiIndex); } - Global& global(unsigned i) { - return globals_[i]; + bool addExport(PropertyName* name, PropertyName* maybeFieldName, uint32_t wasmIndex, + uint32_t funcSrcBegin, uint32_t funcSrcEnd) + { + // NB: funcSrcBegin/funcSrcEnd are given relative to the ScriptSource + // (the entire file) and ExportedFunctions store offsets relative to + // the beginning of the module (so that they are caching-invariant). + MOZ_ASSERT(!isFinished()); + MOZ_ASSERT(srcStart_ < funcSrcBegin); + MOZ_ASSERT(funcSrcBegin < funcSrcEnd); + return exports_.emplaceBack(name, maybeFieldName, wasmIndex, + funcSrcBegin - srcStart_, funcSrcEnd - srcStart_); } - void setViewsAreShared() { - if (pod.hasArrayView_) - pod.isSharedView_ = true; - } - - /*************************************************************************/ - // These functions are called while parsing/compiling function bodies: - - bool hasArrayView() const { - return pod.hasArrayView_; - } - bool isSharedView() const { - MOZ_ASSERT(pod.hasArrayView_); - return pod.isSharedView_; - } - void addChangeHeap(uint32_t mask, uint32_t min, uint32_t max) { + bool addChangeHeap(uint32_t mask, uint32_t min, uint32_t max) { MOZ_ASSERT(!isFinished()); MOZ_ASSERT(!pod.hasFixedMinHeapLength_); MOZ_ASSERT(IsValidAsmJSHeapLength(mask + 1)); @@ -1022,6 +523,28 @@ class AsmJSModule pod.minHeapLength_ = min; pod.maxHeapLength_ = max; pod.hasFixedMinHeapLength_ = true; + return true; + } + + const GlobalVector& globals() const { + return globals_; + } + const ImportVector& imports() const { + return imports_; + } + const ExportVector& exports() const { + return exports_; + } + + void setViewsAreShared() { + if (pod.hasArrayView_) + pod.isSharedView_ = true; + } + bool hasArrayView() const { + return pod.hasArrayView_; + } + bool isSharedView() const { + return pod.isSharedView_; } bool tryRequireHeapLengthToBeAtLeast(uint32_t len) { MOZ_ASSERT(!isFinished()); @@ -1034,164 +557,25 @@ class AsmJSModule pod.minHeapLength_ = len; return true; } - bool addCodeRange(CodeRange::Kind kind, AsmJSOffsets offsets) { - return codeRanges_.append(CodeRange(kind, offsets)); - } - bool addCodeRange(CodeRange::Kind kind, AsmJSProfilingOffsets offsets) { - return codeRanges_.append(CodeRange(kind, offsets)); - } - bool addFunctionCodeRange(PropertyName* name, CodeRange codeRange) { - MOZ_ASSERT(!isFinished()); - MOZ_ASSERT(name->isTenured()); - if (names_.length() >= UINT32_MAX) - return false; - codeRange.initNameIndex(names_.length()); - return names_.append(name) && codeRanges_.append(codeRange); - } - bool addBuiltinThunkCodeRange(wasm::Builtin builtin, AsmJSProfilingOffsets offsets) { - MOZ_ASSERT(staticLinkData_.pod.builtinThunkOffsets[builtin] == 0); - staticLinkData_.pod.builtinThunkOffsets[builtin] = offsets.begin; - return codeRanges_.append(CodeRange(builtin, offsets)); - } - bool addExit(wasm::MallocSig&& sig, unsigned ffiIndex, unsigned* exitIndex) { - MOZ_ASSERT(!isFinished()); - static_assert(sizeof(ExitDatum) % sizeof(void*) == 0, "word aligned"); - uint32_t globalDataOffset; - if (!allocateGlobalBytes(sizeof(ExitDatum), sizeof(void*), &globalDataOffset)) - return false; - *exitIndex = unsigned(exits_.length()); - return exits_.append(Exit(Move(sig), ffiIndex, globalDataOffset)); - } - unsigned numExits() const { - return exits_.length(); - } - Exit& exit(unsigned i) { - return exits_[i]; - } - const Exit& exit(unsigned i) const { - return exits_[i]; - } - bool declareFuncPtrTable(unsigned numElems, uint32_t* funcPtrTableIndex) { - MOZ_ASSERT(!isFinished()); - MOZ_ASSERT(IsPowerOfTwo(numElems)); - uint32_t globalDataOffset; - if (!allocateGlobalBytes(numElems * sizeof(void*), sizeof(void*), &globalDataOffset)) - return false; - *funcPtrTableIndex = staticLinkData_.funcPtrTables.length(); - return staticLinkData_.funcPtrTables.append(FuncPtrTable(globalDataOffset)); - } - FuncPtrTable& funcPtrTable(uint32_t funcPtrTableIndex) { - return staticLinkData_.funcPtrTables[funcPtrTableIndex]; - } -#if defined(MOZ_VTUNE) || defined(JS_ION_PERF) - bool addProfiledFunction(ProfiledFunction func) { - MOZ_ASSERT(!isFinished()); - return profiledFunctions_.append(func); - } - unsigned numProfiledFunctions() const { - return profiledFunctions_.length(); - } - ProfiledFunction& profiledFunction(unsigned i) { - return profiledFunctions_[i]; - } -#endif - - bool addExportedFunction(PropertyName* name, - uint32_t funcIndex, - uint32_t funcSrcBegin, - uint32_t funcSrcEnd, - PropertyName* maybeFieldName, - wasm::MallocSig&& sig) - { - // NB: funcSrcBegin/funcSrcEnd are given relative to the ScriptSource - // (the entire file) and ExportedFunctions store offsets relative to - // the beginning of the module (so that they are caching-invariant). - MOZ_ASSERT(!isFinished()); - MOZ_ASSERT(srcStart_ < funcSrcBegin); - MOZ_ASSERT(funcSrcBegin < funcSrcEnd); - ExportedFunction func(name, funcIndex, funcSrcBegin - srcStart_, funcSrcEnd - srcStart_, - maybeFieldName, mozilla::Move(sig)); - return exports_.length() < UINT32_MAX && exports_.append(mozilla::Move(func)); - } - bool addExportedChangeHeap(PropertyName* name, - uint32_t funcSrcBegin, - uint32_t funcSrcEnd, - PropertyName* maybeFieldName) - { - // See addExportedFunction. - MOZ_ASSERT(!isFinished()); - MOZ_ASSERT(srcStart_ < funcSrcBegin); - MOZ_ASSERT(funcSrcBegin < funcSrcEnd); - ExportedFunction func(name, funcSrcBegin - srcStart_, funcSrcEnd - srcStart_, - maybeFieldName); - return exports_.length() < UINT32_MAX && exports_.append(mozilla::Move(func)); - } - unsigned numExportedFunctions() const { - return exports_.length(); - } - const ExportedFunction& exportedFunction(unsigned i) const { - return exports_[i]; - } - ExportedFunction& exportedFunction(unsigned i) { - return exports_[i]; - } - void setAsyncInterruptOffset(uint32_t o) { - staticLinkData_.pod.interruptExitOffset = o; - } - void setOnOutOfBoundsExitOffset(uint32_t o) { - staticLinkData_.pod.outOfBoundsExitOffset = o; - } /*************************************************************************/ + // A module isFinished() when compilation completes. After being finished, + // a module must be statically and dynamically linked before execution. - // finish() is called once the entire module has been parsed (via - // tokenStream) and all function and entry/exit trampolines have been - // generated (via masm). After this function, the module must still be - // statically and dynamically linked before code can be run. - bool finish(ExclusiveContext* cx, frontend::TokenStream& ts, jit::MacroAssembler& masm); + bool isFinished() const { + return !!wasm_; + } + void finish(wasm::Module* wasm, wasm::UniqueStaticLinkData linkData, + uint32_t endBeforeCurly, uint32_t endAfterCurly); /*************************************************************************/ - // These accessor functions can be used after finish(): + // These accessor functions can only be used after finish(): - uint8_t* codeBase() const { + wasm::Module& wasm() const { MOZ_ASSERT(isFinished()); - MOZ_ASSERT(uintptr_t(code_) % AsmJSPageSize == 0); - return code_; + return *wasm_; } - uint32_t codeBytes() const { - MOZ_ASSERT(isFinished()); - return pod.codeBytes_; - } - bool containsCodePC(void* pc) const { - MOZ_ASSERT(isFinished()); - return pc >= code_ && pc < (code_ + codeBytes()); - } - - // The range [0, functionBytes) is a subrange of [0, codeBytes) that - // contains only function body code, not the stub code. This distinction is - // used by the async interrupt handler to only interrupt when the pc is in - // function code which, in turn, simplifies reasoning about how stubs - // enter/exit. - void setFunctionBytes(uint32_t functionBytes) { - MOZ_ASSERT(!isFinished()); - MOZ_ASSERT(!pod.functionBytes_); - pod.functionBytes_ = functionBytes; - } - uint32_t functionBytes() const { - MOZ_ASSERT(isFinished()); - return pod.functionBytes_; - } - bool containsFunctionPC(void* pc) const { - MOZ_ASSERT(isFinished()); - return pc >= code_ && pc < (code_ + functionBytes()); - } - - uint32_t globalBytes() const { - MOZ_ASSERT(isFinished()); - return pod.globalBytes_; - } - - unsigned numFFIs() const { + uint32_t numFFIs() const { MOZ_ASSERT(isFinished()); return pod.numFFIs_; } @@ -1203,156 +587,17 @@ class AsmJSModule MOZ_ASSERT(isFinished()); return srcStart_ + pod.srcLengthWithRightBrace_; } - - // Lookup a callsite by the return pc (from the callee to the caller). - // Return null if no callsite was found. - const wasm::CallSite* lookupCallSite(void* returnAddress) const; - - // Lookup the name the code range containing the given pc. Return null if no - // code range was found. - const CodeRange* lookupCodeRange(void* pc) const; - - // Lookup a heap access site by the pc which performs the access. Return - // null if no heap access was found. - const wasm::HeapAccess* lookupHeapAccess(void* pc) const; - - // The global data section is placed after the executable code (i.e., at - // offset codeBytes_) in the module's linear allocation. The global data - // starts with some fixed allocations followed by interleaved global, - // function-pointer table and exit allocations. - uint32_t offsetOfGlobalData() const { - MOZ_ASSERT(isFinished()); - return pod.codeBytes_; - } - uint8_t* globalData() const { - MOZ_ASSERT(isFinished()); - return codeBase() + offsetOfGlobalData(); - } - static void assertGlobalDataOffsets() { - static_assert(wasm::ActivationGlobalDataOffset == 0, - "an AsmJSActivation* data goes first"); - static_assert(wasm::HeapGlobalDataOffset == wasm::ActivationGlobalDataOffset + sizeof(void*), - "then a pointer to the heap*"); - static_assert(wasm::NaN64GlobalDataOffset == wasm::HeapGlobalDataOffset + sizeof(uint8_t*), - "then a 64-bit NaN"); - static_assert(wasm::NaN32GlobalDataOffset == wasm::NaN64GlobalDataOffset + sizeof(double), - "then a 32-bit NaN"); - static_assert(sInitialGlobalDataBytes == wasm::NaN32GlobalDataOffset + sizeof(float), - "then all the normal global data (globals, exits, func-ptr-tables)"); - } - static const uint32_t sInitialGlobalDataBytes = wasm::NaN32GlobalDataOffset + sizeof(float); - - AsmJSActivation*& activation() const { - MOZ_ASSERT(isFinished()); - return *(AsmJSActivation**)(globalData() + wasm::ActivationGlobalDataOffset); - } - bool active() const { - return activation() != nullptr; - } - private: - // The pointer may reference shared memory, use with care. - // Generally you want to use maybeHeap(), not heapDatum(). - uint8_t*& heapDatum() const { - MOZ_ASSERT(isFinished()); - return *(uint8_t**)(globalData() + wasm::HeapGlobalDataOffset); - } - public: - - /*************************************************************************/ - // These functions are called after finish() but before staticallyLink(): - - bool addRelativeLink(RelativeLink link) { - MOZ_ASSERT(isFinished()); - return staticLinkData_.relativeLinks.append(link); + bool staticallyLink(ExclusiveContext* cx) { + return wasm_->staticallyLink(cx, *linkData_); } - // A module is serialized after it is finished but before it is statically - // linked. (Technically, it could be serialized after static linking, but it - // would still need to be statically linked on deserialization.) + // See WASM_DECLARE_SERIALIZABLE. size_t serializedSize() const; uint8_t* serialize(uint8_t* cursor) const; const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor); - - // Additionally, this function is called to flush the i-cache after - // deserialization and cloning (but still before static linking, to prevent - // a bunch of expensive micro-flushes). - void setAutoFlushICacheRange(); - - /*************************************************************************/ - - // After a module is finished compiling or deserializing, it is "statically - // linked" which specializes the code to its current address (this allows - // code to be relocated between serialization and deserialization). - void staticallyLink(ExclusiveContext* cx); - - // After a module is statically linked, it is "dynamically linked" which - // specializes it to a particular set of arguments. In particular, this - // binds the code to a particular heap (via initHeap) and set of global - // variables. A given asm.js module cannot be dynamically linked more than - // once so, if JS tries, the module is cloned. When linked, an asm.js module - // is kept in a list so that it can be updated if the linked buffer is - // detached. - void setIsDynamicallyLinked(JSRuntime* rt) { - MOZ_ASSERT(isFinished()); - MOZ_ASSERT(!isDynamicallyLinked()); - dynamicallyLinked_ = true; - nextLinked_ = rt->linkedAsmJSModules; - prevLinked_ = &rt->linkedAsmJSModules; - if (nextLinked_) - nextLinked_->prevLinked_ = &nextLinked_; - rt->linkedAsmJSModules = this; - MOZ_ASSERT(isDynamicallyLinked()); - } - - void initHeap(Handle heap, JSContext* cx); - bool changeHeap(Handle newHeap, JSContext* cx); - bool detachHeap(JSContext* cx); - - bool clone(JSContext* cx, ScopedJSDeletePtr* moduleOut) const; - - /*************************************************************************/ - // Functions that can be called after dynamic linking succeeds: - - AsmJSModule* nextLinked() const { - MOZ_ASSERT(isDynamicallyLinked()); - return nextLinked_; - } - bool hasDetachedHeap() const { - MOZ_ASSERT(isDynamicallyLinked()); - return hasArrayView() && !heapDatum(); - } - CodePtr entryTrampoline(const ExportedFunction& func) const { - MOZ_ASSERT(isDynamicallyLinked()); - MOZ_ASSERT(!func.isChangeHeap()); - return JS_DATA_TO_FUNC_PTR(CodePtr, code_ + func.pod.codeOffset_); - } - uint8_t* interruptExit() const { - MOZ_ASSERT(isDynamicallyLinked()); - return interruptExit_; - } - uint8_t* outOfBoundsExit() const { - MOZ_ASSERT(isDynamicallyLinked()); - return outOfBoundsExit_; - } - SharedMem maybeHeap() const { - MOZ_ASSERT(isDynamicallyLinked()); - return hasArrayView() && isSharedView() ? SharedMem::shared(heapDatum()) - : SharedMem::unshared(heapDatum()); - } - ArrayBufferObjectMaybeShared* maybeHeapBufferObject() const { - MOZ_ASSERT(isDynamicallyLinked()); - return maybeHeap_; - } - size_t heapLength() const; - bool profilingEnabled() const { - MOZ_ASSERT(isDynamicallyLinked()); - return profilingEnabled_; - } - void setProfilingEnabled(bool enabled, JSContext* cx); - void setInterrupted(bool interrupted) { - MOZ_ASSERT(isDynamicallyLinked()); - interrupted_ = interrupted; - } + bool clone(JSContext* cx, HandleAsmJSModule moduleObj) const; + void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode, + size_t* asmJSModuleData); }; // Store the just-parsed module in the cache using AsmJSCacheOps. @@ -1362,14 +607,12 @@ StoreAsmJSModuleInCache(AsmJSParser& parser, ExclusiveContext* cx); // Attempt to load the asm.js module that is about to be parsed from the cache -// using AsmJSCacheOps. On cache hit, *module will be non-null. Note: the -// return value indicates whether or not an error was encountered, not whether -// there was a cache hit. +// using AsmJSCacheOps. The return value indicates whether an error was +// reported. The loadedFromCache outparam indicates whether the module was +// successfully loaded and stored in moduleObj.extern bool extern bool -LookupAsmJSModuleInCache(ExclusiveContext* cx, - AsmJSParser& parser, - ScopedJSDeletePtr* module, - ScopedJSFreePtr* compilationTimeReport); +LookupAsmJSModuleInCache(ExclusiveContext* cx, AsmJSParser& parser, HandleAsmJSModule moduleObj, + bool* loadedFromCache, UniqueChars* compilationTimeReport); // This function must be called for every detached ArrayBuffer. extern bool @@ -1386,10 +629,10 @@ class AsmJSModuleObject : public NativeObject public: static const unsigned RESERVED_SLOTS = 1; - // On success, return an AsmJSModuleClass JSObject that has taken ownership - // (and release()ed) the given module. - static AsmJSModuleObject* create(ExclusiveContext* cx, ScopedJSDeletePtr* module); + static AsmJSModuleObject* create(ExclusiveContext* cx); + bool hasModule() const; + void setModule(AsmJSModule* module); AsmJSModule& module() const; void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode, diff --git a/js/src/asmjs/AsmJSValidate.cpp b/js/src/asmjs/AsmJSValidate.cpp index 7fd3cdeacc5..98b25a9bde4 100644 --- a/js/src/asmjs/AsmJSValidate.cpp +++ b/js/src/asmjs/AsmJSValidate.cpp @@ -19,7 +19,6 @@ #include "asmjs/AsmJSValidate.h" #include "mozilla/Move.h" -#include "mozilla/UniquePtr.h" #include "jsmath.h" #include "jsprf.h" @@ -49,7 +48,6 @@ using mozilla::IsNaN; using mozilla::IsNegativeZero; using mozilla::Move; using mozilla::PositiveInfinity; -using mozilla::UniquePtr; using JS::AsmJSOption; using JS::GenericNaN; @@ -1164,13 +1162,13 @@ class MOZ_STACK_CLASS ModuleValidator Scalar::Type type; }; - class ExitDescriptor + class ImportDescriptor { PropertyName* name_; const LifoSig* sig_; public: - ExitDescriptor(PropertyName* name, const LifoSig& sig) + ImportDescriptor(PropertyName* name, const LifoSig& sig) : name_(name), sig_(&sig) {} @@ -1189,7 +1187,7 @@ class MOZ_STACK_CLASS ModuleValidator static HashNumber hash(const Lookup& l) { return HashGeneric(l.name_, l.sig_.hash()); } - static bool match(const ExitDescriptor& lhs, const Lookup& rhs) { + static bool match(const ImportDescriptor& lhs, const Lookup& rhs) { return lhs.name_ == rhs.name_ && *lhs.sig_ == rhs.sig_; } }; @@ -1202,36 +1200,37 @@ class MOZ_STACK_CLASS ModuleValidator typedef Vector ArrayViewVector; public: - typedef HashMap ExitMap; + typedef HashMap ImportMap; private: - ExclusiveContext* cx_; - AsmJSParser& parser_; + ExclusiveContext* cx_; + AsmJSParser& parser_; - ModuleGenerator mg_; + ModuleGenerator mg_; + AsmJSModule* module_; - LifoAlloc validationLifo_; - FuncVector functions_; - FuncPtrTableVector funcPtrTables_; - GlobalMap globals_; - ArrayViewVector arrayViews_; - ExitMap exits_; + LifoAlloc validationLifo_; + FuncVector functions_; + FuncPtrTableVector funcPtrTables_; + GlobalMap globals_; + ArrayViewVector arrayViews_; + ImportMap imports_; - MathNameMap standardLibraryMathNames_; - AtomicsNameMap standardLibraryAtomicsNames_; - SimdOperationNameMap standardLibrarySimdOpNames_; + MathNameMap standardLibraryMathNames_; + AtomicsNameMap standardLibraryAtomicsNames_; + SimdOperationNameMap standardLibrarySimdOpNames_; - ParseNode* moduleFunctionNode_; - PropertyName* moduleFunctionName_; + ParseNode* moduleFunctionNode_; + PropertyName* moduleFunctionName_; - UniquePtr errorString_; - uint32_t errorOffset_; - bool errorOverRecursed_; + UniqueChars errorString_; + uint32_t errorOffset_; + bool errorOverRecursed_; - bool canValidateChangeHeap_; - bool hasChangeHeap_; - bool supportsSimd_; - bool atomicsPresent_; + bool canValidateChangeHeap_; + bool hasChangeHeap_; + bool supportsSimd_; + bool atomicsPresent_; public: ModuleValidator(ExclusiveContext* cx, AsmJSParser& parser) @@ -1243,7 +1242,7 @@ class MOZ_STACK_CLASS ModuleValidator funcPtrTables_(cx), globals_(cx), arrayViews_(cx), - exits_(cx), + imports_(cx), standardLibraryMathNames_(cx), standardLibraryAtomicsNames_(cx), standardLibrarySimdOpNames_(cx), @@ -1303,8 +1302,8 @@ class MOZ_STACK_CLASS ModuleValidator public: - bool init() { - if (!globals_.init() || !exits_.init()) + bool init(HandleAsmJSModule moduleObj) { + if (!globals_.init() || !imports_.init()) return false; if (!standardLibraryMathNames_.init() || @@ -1371,11 +1370,34 @@ class MOZ_STACK_CLASS ModuleValidator // js::FunctionToString. bool strict = parser_.pc->sc->strict() && !parser_.pc->sc->hasExplicitUseStrict(); - return mg_.init(parser_.ss, srcStart, srcBodyStart, strict); + module_ = cx_->new_(parser_.ss, srcStart, srcBodyStart, strict); + if (!module_) + return false; + + moduleObj->setModule(module_); + + return mg_.init(); } - bool finish(ScopedJSDeletePtr* module, SlowFunctionVector* slowFuncs) { - return mg_.finish(parser_.tokenStream, module, slowFuncs); + bool finish(SlowFunctionVector* slowFuncs) { + uint32_t endBeforeCurly = tokenStream().currentToken().pos.end; + TokenPos pos; + JS_ALWAYS_TRUE(tokenStream().peekTokenPos(&pos, TokenStream::Operand)); + uint32_t endAfterCurly = pos.end; + + auto usesHeap = Module::HeapBool(module_->hasArrayView()); + auto sharedHeap = Module::SharedBool(module_->isSharedView()); + UniqueChars filename = make_string_copy(parser_.ss->filename()); + if (!filename) + return false; + + UniqueStaticLinkData linkData; + Module* wasm = mg_.finish(usesHeap, sharedHeap, Move(filename), &linkData, slowFuncs); + if (!wasm) + return false; + + module_->finish(wasm, Move(linkData), endBeforeCurly, endAfterCurly); + return true; } // Mutable interface. @@ -1384,140 +1406,127 @@ class MOZ_STACK_CLASS ModuleValidator void initImportArgumentName(PropertyName* n) { module().initImportArgumentName(n); } void initBufferArgumentName(PropertyName* n) { module().initBufferArgumentName(n); } - bool addGlobalVarInit(PropertyName* varName, const NumLit& lit, bool isConst) { - // The type of a const is the exact type of the literal (since its value - // cannot change) which is more precise than the corresponding vartype. - Type type = isConst ? Type::lit(lit) : Type::var(lit.type()); + bool addGlobalVarInit(PropertyName* var, const NumLit& lit, bool isConst) { uint32_t globalDataOffset; - if (!module().addGlobalVarInit(lit.value(), &globalDataOffset)) + if (!mg_.allocateGlobalVar(lit.type(), &globalDataOffset)) return false; Global::Which which = isConst ? Global::ConstantLiteral : Global::Variable; Global* global = validationLifo_.new_(which); if (!global) return false; global->u.varOrConst.globalDataOffset_ = globalDataOffset; - global->u.varOrConst.type_ = type.which(); + global->u.varOrConst.type_ = (isConst ? Type::lit(lit) : Type::var(lit.type())).which(); if (isConst) global->u.varOrConst.literalValue_ = lit; - return globals_.putNew(varName, global); + return globals_.putNew(var, global) && + module().addGlobalVarInit(lit.value(), globalDataOffset); } - bool addGlobalVarImport(PropertyName* varName, PropertyName* fieldName, ValType importType, - bool isConst) - { + bool addGlobalVarImport(PropertyName* var, PropertyName* field, ValType type, bool isConst) { uint32_t globalDataOffset; - if (!module().addGlobalVarImport(fieldName, importType, &globalDataOffset)) + if (!mg_.allocateGlobalVar(type, &globalDataOffset)) return false; Global::Which which = isConst ? Global::ConstantImport : Global::Variable; Global* global = validationLifo_.new_(which); if (!global) return false; global->u.varOrConst.globalDataOffset_ = globalDataOffset; - global->u.varOrConst.type_ = Type::var(importType).which(); - return globals_.putNew(varName, global); + global->u.varOrConst.type_ = Type::var(type).which(); + return globals_.putNew(var, global) && + module().addGlobalVarImport(field, type, globalDataOffset); } - bool addArrayView(PropertyName* varName, Scalar::Type vt, PropertyName* maybeField) - { - if (!arrayViews_.append(ArrayView(varName, vt))) + bool addArrayView(PropertyName* var, Scalar::Type vt, PropertyName* maybeField) { + if (!arrayViews_.append(ArrayView(var, vt))) return false; Global* global = validationLifo_.new_(Global::ArrayView); if (!global) return false; - if (!module().addArrayView(vt, maybeField)) - return false; global->u.viewInfo.viewType_ = vt; - return globals_.putNew(varName, global); + return globals_.putNew(var, global) && + module().addArrayView(vt, maybeField); } - bool addMathBuiltinFunction(PropertyName* varName, AsmJSMathBuiltinFunction func, - PropertyName* fieldName) + bool addMathBuiltinFunction(PropertyName* var, AsmJSMathBuiltinFunction func, + PropertyName* field) { - if (!module().addMathBuiltinFunction(func, fieldName)) - return false; Global* global = validationLifo_.new_(Global::MathBuiltinFunction); if (!global) return false; global->u.mathBuiltinFunc_ = func; - return globals_.putNew(varName, global); + return globals_.putNew(var, global) && + module().addMathBuiltinFunction(func, field); } private: - bool addGlobalDoubleConstant(PropertyName* varName, double constant) { + bool addGlobalDoubleConstant(PropertyName* var, double constant) { Global* global = validationLifo_.new_(Global::ConstantLiteral); if (!global) return false; global->u.varOrConst.type_ = Type::Double; global->u.varOrConst.literalValue_ = NumLit(NumLit::Double, DoubleValue(constant)); - return globals_.putNew(varName, global); + return globals_.putNew(var, global); } public: - bool addMathBuiltinConstant(PropertyName* varName, double constant, PropertyName* fieldName) { - if (!module().addMathBuiltinConstant(constant, fieldName)) - return false; - return addGlobalDoubleConstant(varName, constant); + bool addMathBuiltinConstant(PropertyName* var, double constant, PropertyName* field) { + return addGlobalDoubleConstant(var, constant) && + module().addMathBuiltinConstant(constant, field); } - bool addGlobalConstant(PropertyName* varName, double constant, PropertyName* fieldName) { - if (!module().addGlobalConstant(constant, fieldName)) - return false; - return addGlobalDoubleConstant(varName, constant); + bool addGlobalConstant(PropertyName* var, double constant, PropertyName* field) { + return addGlobalDoubleConstant(var, constant) && + module().addGlobalConstant(constant, field); } - bool addAtomicsBuiltinFunction(PropertyName* varName, AsmJSAtomicsBuiltinFunction func, - PropertyName* fieldName) + bool addAtomicsBuiltinFunction(PropertyName* var, AsmJSAtomicsBuiltinFunction func, + PropertyName* field) { - if (!module().addAtomicsBuiltinFunction(func, fieldName)) - return false; Global* global = validationLifo_.new_(Global::AtomicsBuiltinFunction); if (!global) return false; atomicsPresent_ = true; global->u.atomicsBuiltinFunc_ = func; - return globals_.putNew(varName, global); + return globals_.putNew(var, global) && + module().addAtomicsBuiltinFunction(func, field); } - bool addSimdCtor(PropertyName* varName, AsmJSSimdType type, PropertyName* fieldName) { - if (!module().addSimdCtor(type, fieldName)) - return false; + bool addSimdCtor(PropertyName* var, AsmJSSimdType type, PropertyName* field) { Global* global = validationLifo_.new_(Global::SimdCtor); if (!global) return false; global->u.simdCtorType_ = type; - return globals_.putNew(varName, global); + return globals_.putNew(var, global) && + module().addSimdCtor(type, field); } - bool addSimdOperation(PropertyName* varName, AsmJSSimdType type, AsmJSSimdOperation op, - PropertyName* typeVarName, PropertyName* opName) + bool addSimdOperation(PropertyName* var, AsmJSSimdType type, AsmJSSimdOperation op, + PropertyName* opName) { - if (!module().addSimdOperation(type, op, opName)) - return false; Global* global = validationLifo_.new_(Global::SimdOperation); if (!global) return false; global->u.simdOp.type_ = type; global->u.simdOp.which_ = op; - return globals_.putNew(varName, global); + return globals_.putNew(var, global) && + module().addSimdOperation(type, op, opName); } bool addByteLength(PropertyName* name) { canValidateChangeHeap_ = true; - if (!module().addByteLength()) - return false; Global* global = validationLifo_.new_(Global::ByteLength); - return global && globals_.putNew(name, global); + return global && globals_.putNew(name, global) && + module().addByteLength(); } bool addChangeHeap(PropertyName* name, ParseNode* fn, uint32_t mask, uint32_t min, uint32_t max) { hasChangeHeap_ = true; - module().addChangeHeap(mask, min, max); Global* global = validationLifo_.new_(Global::ChangeHeap); if (!global) return false; global->u.changeHeap.srcBegin_ = fn->pn_pos.begin; global->u.changeHeap.srcEnd_ = fn->pn_pos.end; - return globals_.putNew(name, global); + return globals_.putNew(name, global) && + module().addChangeHeap(mask, min, max); } - bool addArrayViewCtor(PropertyName* varName, Scalar::Type vt, PropertyName* fieldName) { + bool addArrayViewCtor(PropertyName* var, Scalar::Type vt, PropertyName* field) { Global* global = validationLifo_.new_(Global::ArrayViewCtor); if (!global) return false; - if (!module().addArrayViewCtor(vt, fieldName)) - return false; global->u.viewInfo.viewType_ = vt; - return globals_.putNew(varName, global); + return globals_.putNew(var, global) && + module().addArrayViewCtor(vt, field); } - bool addFFI(PropertyName* varName, PropertyName* field) { + bool addFFI(PropertyName* var, PropertyName* field) { Global* global = validationLifo_.new_(Global::FFI); if (!global) return false; @@ -1525,19 +1534,22 @@ class MOZ_STACK_CLASS ModuleValidator if (!module().addFFI(field, &index)) return false; global->u.ffiIndex_ = index; - return globals_.putNew(varName, global); + return globals_.putNew(var, global); } - bool addExportedFunction(const Func& func, PropertyName* maybeFieldName) { + bool addExport(const Func& func, PropertyName* maybeFieldName) { MallocSig::ArgVector args; if (!args.appendAll(func.sig().args())) return false; MallocSig sig(Move(args), func.sig().ret()); - return module().addExportedFunction(func.name(), func.index(), func.srcBegin(), - func.srcEnd(), maybeFieldName, Move(sig)); + uint32_t wasmIndex; + if (!mg_.declareExport(Move(sig), func.index(), &wasmIndex)) + return false; + return module().addExport(func.name(), maybeFieldName, wasmIndex, + func.srcBegin(), func.srcEnd()); } - bool addExportedChangeHeap(PropertyName* name, const Global& g, PropertyName* maybeFieldName) { - return module().addExportedChangeHeap(name, g.changeHeapSrcBegin(), g.changeHeapSrcEnd(), - maybeFieldName); + bool addChangeHeapExport(PropertyName* name, const Global& g, PropertyName* maybeFieldName) { + return module().addExport(name, maybeFieldName, AsmJSModule::Export::ChangeHeap, + g.changeHeapSrcBegin(), g.changeHeapSrcEnd()); } private: const LifoSig* getLifoSig(const LifoSig& sig) { @@ -1582,29 +1594,31 @@ class MOZ_STACK_CLASS ModuleValidator FuncPtrTable* t = validationLifo_.new_(cx_, name, firstUse, *lifoSig, mask); return t && funcPtrTables_.append(t); } - bool defineFuncPtrTable(uint32_t funcPtrTableIndex, ModuleGenerator::FuncIndexVector&& elems) { + bool defineFuncPtrTable(uint32_t funcPtrTableIndex, const Vector& elems) { FuncPtrTable& table = *funcPtrTables_[funcPtrTableIndex]; if (table.defined()) return false; table.define(); - return mg_.defineFuncPtrTable(funcPtrTableIndex, Move(elems)); + mg_.defineFuncPtrTable(funcPtrTableIndex, elems); + return true; } - bool addExit(PropertyName* name, MallocSig&& sig, unsigned ffiIndex, unsigned* exitIndex, + bool addImport(PropertyName* name, MallocSig&& sig, unsigned ffiIndex, unsigned* importIndex, const LifoSig** lifoSig) { - ExitDescriptor::Lookup lookup(name, sig); - ExitMap::AddPtr p = exits_.lookupForAdd(lookup); + ImportDescriptor::Lookup lookup(name, sig); + ImportMap::AddPtr p = imports_.lookupForAdd(lookup); if (p) { *lifoSig = &p->key().sig(); - *exitIndex = p->value(); + *importIndex = p->value(); return true; } *lifoSig = getLifoSig(sig); if (!*lifoSig) return false; - if (!module().addExit(Move(sig), ffiIndex, exitIndex)) + if (!mg_.declareImport(Move(sig), importIndex)) return false; - return exits_.add(p, ExitDescriptor(name, **lifoSig), *exitIndex); + return imports_.add(p, ImportDescriptor(name, **lifoSig), *importIndex) && + module().addImport(ffiIndex, *importIndex); } bool tryOnceToValidateChangeHeap() { @@ -1636,7 +1650,7 @@ class MOZ_STACK_CLASS ModuleValidator MOZ_ASSERT(errorOffset_ == UINT32_MAX); MOZ_ASSERT(str); errorOffset_ = offset; - errorString_ = DuplicateString(cx_, str); + errorString_ = make_string_copy(str); return false; } @@ -1692,7 +1706,7 @@ class MOZ_STACK_CLASS ModuleValidator ParseNode* moduleFunctionNode() const { return moduleFunctionNode_; } PropertyName* moduleFunctionName() const { return moduleFunctionName_; } ModuleGenerator& mg() { return mg_; } - AsmJSModule& module() const { return mg_.module(); } + AsmJSModule& module() const { return *module_; } AsmJSParser& parser() const { return parser_; } TokenStream& tokenStream() const { return parser_.tokenStream; } bool supportsSimd() const { return supportsSimd_; } @@ -1753,9 +1767,19 @@ class MOZ_STACK_CLASS ModuleValidator return false; } - void startFunctionBodies() { - if (atomicsPresent_) + bool startFunctionBodies() { + if (atomicsPresent_) { +#if defined(ENABLE_SHARED_ARRAY_BUFFER) module().setViewsAreShared(); +#else + return failOffset(parser_.tokenStream.currentToken().pos.begin, + "shared memory and atomics not supported by this build"); +#endif + } + return true; + } + bool finishFunctionBodies() { + return mg_.finishFuncs(); } }; @@ -2680,8 +2704,7 @@ CheckGlobalSimdImport(ModuleValidator& m, ParseNode* initNode, PropertyName* var static bool CheckGlobalSimdOperationImport(ModuleValidator& m, const ModuleValidator::Global* global, - ParseNode* initNode, PropertyName* varName, PropertyName* ctorVarName, - PropertyName* opName) + ParseNode* initNode, PropertyName* varName, PropertyName* opName) { AsmJSSimdType simdType = global->simdCtorType(); AsmJSSimdOperation simdOp; @@ -2689,7 +2712,7 @@ CheckGlobalSimdOperationImport(ModuleValidator& m, const ModuleValidator::Global return m.failName(initNode, "'%s' is not a standard SIMD operation", opName); if (!IsSimdValidOperationType(simdType, simdOp)) return m.failName(initNode, "'%s' is not an operation supported by the SIMD type", opName); - return m.addSimdOperation(varName, simdType, simdOp, ctorVarName, opName); + return m.addSimdOperation(varName, simdType, simdOp, opName); } static bool @@ -2751,7 +2774,7 @@ CheckGlobalDotImport(ModuleValidator& m, PropertyName* varName, ParseNode* initN if (!global->isSimdCtor()) return m.failName(base, "expecting SIMD constructor name, got %s", field); - return CheckGlobalSimdOperationImport(m, global, initNode, varName, base->name(), field); + return CheckGlobalSimdOperationImport(m, global, initNode, varName, field); } static bool @@ -4054,8 +4077,7 @@ CheckFuncPtrCall(FunctionValidator& f, ParseNode* callNode, ExprType ret, Type* if (!CheckFuncPtrTableAgainstExisting(f.m(), tableNode, name, sig, mask, &funcPtrTableIndex)) return false; - uint32_t globalDataOffset = f.m().module().funcPtrTable(funcPtrTableIndex).globalDataOffset(); - f.patch32(globalDataOffsetAt, globalDataOffset); + f.patch32(globalDataOffsetAt, f.m().mg().funcPtrTableGlobalDataOffset(funcPtrTableIndex)); f.patchSig(sigAt, &f.m().funcPtrTable(funcPtrTableIndex).sig()); *type = Type::ret(ret); @@ -4099,7 +4121,7 @@ CheckFFICall(FunctionValidator& f, ParseNode* callNode, unsigned ffiIndex, ExprT // Global data offset size_t offsetAt = f.temp32(); - // Pointer to the exit's signature in the module's lifo + // Pointer to the import's signature in the module's lifo size_t sigAt = f.tempPtr(); // Call node position (asm.js specific) WriteCallLineCol(f, callNode); @@ -4110,13 +4132,12 @@ CheckFFICall(FunctionValidator& f, ParseNode* callNode, unsigned ffiIndex, ExprT MallocSig sig(Move(args), ret); - unsigned exitIndex = 0; + unsigned importIndex = 0; const LifoSig* lifoSig = nullptr; - if (!f.m().addExit(calleeName, Move(sig), ffiIndex, &exitIndex, &lifoSig)) + if (!f.m().addImport(calleeName, Move(sig), ffiIndex, &importIndex, &lifoSig)) return false; - JS_STATIC_ASSERT(offsetof(AsmJSModule::ExitDatum, exit) == 0); - f.patch32(offsetAt, f.module().exit(exitIndex).globalDataOffset()); + f.patch32(offsetAt, f.m().mg().importExitGlobalDataOffset(importIndex)); f.patchSig(sigAt, lifoSig); *type = Type::ret(ret); return true; @@ -5856,7 +5877,7 @@ enum class InterruptCheckPosition { static void MaybeAddInterruptCheck(FunctionValidator& f, InterruptCheckPosition pos, ParseNode* pn) { - if (f.m().module().usesSignalHandlersForInterrupt()) + if (f.m().mg().args().useSignalHandlersForInterrupt) return; switch (pos) { @@ -6685,7 +6706,7 @@ CheckFuncPtrTable(ModuleValidator& m, ParseNode* var) unsigned mask = length - 1; - ModuleGenerator::FuncIndexVector elems; + Vector elemFuncIndices(m.cx()); const LifoSig* sig = nullptr; for (ParseNode* elem = ListHead(arrayLiteral); elem; elem = NextNode(elem)) { if (!elem->isKind(PNK_NAME)) @@ -6703,7 +6724,7 @@ CheckFuncPtrTable(ModuleValidator& m, ParseNode* var) sig = &func->sig(); } - if (!elems.append(func->index())) + if (!elemFuncIndices.append(func->index())) return false; } @@ -6711,7 +6732,7 @@ CheckFuncPtrTable(ModuleValidator& m, ParseNode* var) if (!CheckFuncPtrTableAgainstExisting(m, var, var->name(), *sig, mask, &funcPtrTableIndex)) return false; - if (!m.defineFuncPtrTable(funcPtrTableIndex, Move(elems))) + if (!m.defineFuncPtrTable(funcPtrTableIndex, elemFuncIndices)) return m.fail(var, "duplicate function-pointer definition"); return true; @@ -6756,10 +6777,10 @@ CheckModuleExportFunction(ModuleValidator& m, ParseNode* pn, PropertyName* maybe return m.failName(pn, "exported function name '%s' not found", funcName); if (global->which() == ModuleValidator::Global::Function) - return m.addExportedFunction(m.function(global->funcIndex()), maybeFieldName); + return m.addExport(m.function(global->funcIndex()), maybeFieldName); if (global->which() == ModuleValidator::Global::ChangeHeap) - return m.addExportedChangeHeap(funcName, *global, maybeFieldName); + return m.addChangeHeapExport(funcName, *global, maybeFieldName); return m.failName(pn, "'%s' is not a function", funcName); } @@ -6842,14 +6863,13 @@ CheckModuleEnd(ModuleValidator &m) } static bool -CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList, - ScopedJSDeletePtr* module, unsigned* time, - SlowFunctionVector* slowFuncs) +CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList, HandleAsmJSModule obj, + unsigned* time, SlowFunctionVector* slowFuncs) { int64_t before = PRMJ_Now(); ModuleValidator m(cx, parser); - if (!m.init()) + if (!m.init(obj)) return false; if (PropertyName* moduleFunctionName = FunctionName(m.moduleFunctionNode())) { @@ -6873,17 +6893,15 @@ CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList, if (!CheckModuleGlobals(m)) return false; - m.startFunctionBodies(); - -#if !defined(ENABLE_SHARED_ARRAY_BUFFER) - if (m.usesSharedMemory()) - return m.failOffset(m.parser().tokenStream.currentToken().pos.begin, - "shared memory and atomics not supported by this build"); -#endif + if (!m.startFunctionBodies()) + return false; if (!CheckFunctions(m)) return false; + if (!m.finishFunctionBodies()) + return false; + if (!CheckFuncPtrTables(m)) return false; @@ -6893,36 +6911,35 @@ CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList, if (!CheckModuleEnd(m)) return false; - if (!m.finish(module, slowFuncs)) + if (!m.finish(slowFuncs)) return false; *time = (PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC; return true; } -static bool -BuildConsoleMessage(ExclusiveContext* cx, AsmJSModule& module, - unsigned time, const SlowFunctionVector& slowFuncs, - JS::AsmJSCacheResult cacheResult, ScopedJSFreePtr* out) +static UniqueChars +BuildConsoleMessage(ExclusiveContext* cx, AsmJSModule& module, unsigned time, + const SlowFunctionVector& slowFuncs, JS::AsmJSCacheResult cacheResult) { #ifndef JS_MORE_DETERMINISTIC - ScopedJSFreePtr slowText; + UniqueChars slowText; if (!slowFuncs.empty()) { slowText.reset(JS_smprintf("; %d functions compiled slowly: ", slowFuncs.length())); if (!slowText) - return true; + return nullptr; for (unsigned i = 0; i < slowFuncs.length(); i++) { const SlowFunction& func = slowFuncs[i]; JSAutoByteString name; if (!AtomToPrintableString(cx, func.name, &name)) - return false; + return nullptr; slowText.reset(JS_smprintf("%s%s:%u:%u (%ums)%s", slowText.get(), name.ptr(), func.line, func.column, func.ms, i+1 < slowFuncs.length() ? ", " : "")); if (!slowText) - return true; + return nullptr; } } @@ -6961,11 +6978,11 @@ BuildConsoleMessage(ExclusiveContext* cx, AsmJSModule& module, break; } - out->reset(JS_smprintf("total compilation time %dms; %s%s", - time, cacheString, slowText ? slowText.get() : "")); + return UniqueChars(JS_smprintf("total compilation time %dms; %s%s", + time, cacheString, slowText ? slowText.get() : "")); +#else + return make_string_copy(""); #endif - - return true; } static bool @@ -7029,42 +7046,40 @@ js::ValidateAsmJS(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList if (!EstablishPreconditions(cx, parser)) return NoExceptionPending(cx); - ScopedJSDeletePtr module; - ScopedJSFreePtr message; + Rooted moduleObj(cx, AsmJSModuleObject::create(cx)); + if (!moduleObj) + return false; // Before spending any time parsing the module, try to look it up in the // embedding's cache using the chars about to be parsed as the key. - if (!LookupAsmJSModuleInCache(cx, parser, &module, &message)) + bool loadedFromCache; + UniqueChars message; + if (!LookupAsmJSModuleInCache(cx, parser, moduleObj, &loadedFromCache, &message)) return false; // If not present in the cache, parse, validate and generate code in a // single linear pass over the chars of the asm.js module. - if (!module) { + if (!loadedFromCache) { // "Checking" parses, validates and compiles, producing a fully compiled - // AsmJSModule as result. + // AsmJSModuleObject as result. unsigned time; SlowFunctionVector slowFuncs(cx); - if (!CheckModule(cx, parser, stmtList, &module, &time, &slowFuncs)) + if (!CheckModule(cx, parser, stmtList, moduleObj, &time, &slowFuncs)) return NoExceptionPending(cx); // Try to store the AsmJSModule in the embedding's cache. The // AsmJSModule must be stored before static linking since static linking // specializes the AsmJSModule to the current process's address space // and therefore must be executed after a cache hit. - JS::AsmJSCacheResult cacheResult = StoreAsmJSModuleInCache(parser, *module, cx); - module->staticallyLink(cx); - - if (!BuildConsoleMessage(cx, *module, time, slowFuncs, cacheResult, &message)) + AsmJSModule& module = moduleObj->module(); + JS::AsmJSCacheResult cacheResult = StoreAsmJSModuleInCache(parser, module, cx); + if (!module.staticallyLink(cx)) return false; - } - // The AsmJSModuleObject isn't directly referenced by user code; it is only - // referenced (and kept alive by) an internal slot of the asm.js module - // function generated below and asm.js export functions generated when the - // asm.js module function is called. - RootedObject moduleObj(cx, AsmJSModuleObject::create(cx, &module)); - if (!moduleObj) - return false; + message = BuildConsoleMessage(cx, module, time, slowFuncs, cacheResult); + if (!message) + return NoExceptionPending(cx); + } // The module function dynamically links the AsmJSModule when called and // generates a set of functions wrapping all the exports. diff --git a/js/src/asmjs/AsmJSValidate.h b/js/src/asmjs/AsmJSValidate.h index 6b3829f78ae..5d65f5bd726 100644 --- a/js/src/asmjs/AsmJSValidate.h +++ b/js/src/asmjs/AsmJSValidate.h @@ -27,7 +27,6 @@ #include "jit/Registers.h" #include "js/TypeDecls.h" -#include "vm/NativeObject.h" namespace js { diff --git a/js/src/asmjs/WasmCompileArgs.h b/js/src/asmjs/WasmCompileArgs.h deleted file mode 100644 index 5b084ffd201..00000000000 --- a/js/src/asmjs/WasmCompileArgs.h +++ /dev/null @@ -1,42 +0,0 @@ -/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- - * vim: set ts=8 sts=4 et sw=4 tw=99: - * - * Copyright 2015 Mozilla Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef asmjs_wasm_compile_args_h -#define asmjs_wasm_compile_args_h - -struct JSRuntime; - -namespace js { -namespace wasm { - -struct CompileArgs -{ - JSRuntime* runtime; - bool usesSignalHandlersForOOB; - - CompileArgs(JSRuntime* runtime, - bool usesSignalHandlersForOOB) - : runtime(runtime), - usesSignalHandlersForOOB(usesSignalHandlersForOOB) - {} -}; - -} // namespace wasm -} // namespace js - -#endif // asmjs_wasm_compile_args_h diff --git a/js/src/asmjs/AsmJSFrameIterator.cpp b/js/src/asmjs/WasmFrameIterator.cpp similarity index 58% rename from js/src/asmjs/AsmJSFrameIterator.cpp rename to js/src/asmjs/WasmFrameIterator.cpp index 1d3201bbdc1..8882f6b8993 100644 --- a/js/src/asmjs/AsmJSFrameIterator.cpp +++ b/js/src/asmjs/WasmFrameIterator.cpp @@ -16,7 +16,9 @@ * limitations under the License. */ -#include "asmjs/AsmJSFrameIterator.h" +#include "asmjs/WasmFrameIterator.h" + +#include "jsatom.h" #include "asmjs/AsmJSModule.h" #include "jit/MacroAssembler-inl.h" @@ -26,9 +28,10 @@ using namespace js::jit; using namespace js::wasm; using mozilla::DebugOnly; +using mozilla::Swap; /*****************************************************************************/ -// AsmJSFrameIterator implementation +// FrameIterator implementation static void* ReturnAddressFromFP(void* fp) @@ -42,17 +45,29 @@ CallerFPFromFP(void* fp) return reinterpret_cast(fp)->callerFP; } -AsmJSFrameIterator::AsmJSFrameIterator(const AsmJSActivation& activation) - : module_(&activation.module()), +FrameIterator::FrameIterator() + : cx_(nullptr), + module_(nullptr), + callsite_(nullptr), + codeRange_(nullptr), + fp_(nullptr) +{ + MOZ_ASSERT(done()); +} + +FrameIterator::FrameIterator(const AsmJSActivation& activation) + : cx_(activation.cx()), + module_(&activation.module().wasm()), + callsite_(nullptr), + codeRange_(nullptr), fp_(activation.fp()) { - if (!fp_) - return; - settle(); + if (fp_) + settle(); } void -AsmJSFrameIterator::operator++() +FrameIterator::operator++() { MOZ_ASSERT(!done()); DebugOnly oldfp = fp_; @@ -62,41 +77,57 @@ AsmJSFrameIterator::operator++() } void -AsmJSFrameIterator::settle() +FrameIterator::settle() { void* returnAddress = ReturnAddressFromFP(fp_); - const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(returnAddress); + const CodeRange* codeRange = module_->lookupCodeRange(returnAddress); MOZ_ASSERT(codeRange); codeRange_ = codeRange; switch (codeRange->kind()) { - case AsmJSModule::CodeRange::Function: + case CodeRange::Function: callsite_ = module_->lookupCallSite(returnAddress); MOZ_ASSERT(callsite_); break; - case AsmJSModule::CodeRange::Entry: + case CodeRange::Entry: fp_ = nullptr; MOZ_ASSERT(done()); break; - case AsmJSModule::CodeRange::JitFFI: - case AsmJSModule::CodeRange::SlowFFI: - case AsmJSModule::CodeRange::Interrupt: - case AsmJSModule::CodeRange::Inline: - case AsmJSModule::CodeRange::Thunk: + case CodeRange::ImportJitExit: + case CodeRange::ImportInterpExit: + case CodeRange::Interrupt: + case CodeRange::Inline: MOZ_CRASH("Should not encounter an exit during iteration"); } } JSAtom* -AsmJSFrameIterator::functionDisplayAtom() const +FrameIterator::functionDisplayAtom() const { MOZ_ASSERT(!done()); - return reinterpret_cast(codeRange_)->functionName(*module_); + + const char* chars = module_->functionName(codeRange_->funcNameIndex()); + UTF8Chars utf8(chars, strlen(chars)); + + size_t twoByteLength; + UniquePtr twoByte(JS::UTF8CharsToNewTwoByteCharsZ(cx_, utf8, &twoByteLength).get()); + if (!twoByte) { + cx_->clearPendingException(); + return cx_->names().empty; + } + + JSAtom* atom = AtomizeChars(cx_, twoByte.get(), twoByteLength); + if (!atom) { + cx_->clearPendingException(); + return cx_->names().empty; + } + + return atom; } unsigned -AsmJSFrameIterator::computeLine(uint32_t* column) const +FrameIterator::computeLine(uint32_t* column) const { MOZ_ASSERT(!done()); if (column) @@ -163,11 +194,11 @@ PushRetAddr(MacroAssembler& masm) } // Generate a prologue that maintains AsmJSActivation::fp as the virtual frame -// pointer so that AsmJSProfilingFrameIterator can walk the stack at any pc in +// pointer so that ProfilingFrameIterator can walk the stack at any pc in // generated code. static void GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason, - AsmJSProfilingOffsets* offsets, Label* maybeEntry = nullptr) + ProfilingOffsets* offsets, Label* maybeEntry = nullptr) { #if !defined (JS_CODEGEN_ARM) Register scratch = ABIArgGenerator::NonArg_VolatileReg; @@ -179,7 +210,7 @@ GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason masm.setSecondScratchReg(InvalidReg); #endif - // AsmJSProfilingFrameIterator needs to know the offsets of several key + // ProfilingFrameIterator needs to know the offsets of several key // instructions from entry. To save space, we make these offsets static // constants and assert that they match the actual codegen below. On ARM, // this requires AutoForbidPools to prevent a constant pool from being @@ -204,9 +235,9 @@ GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason MOZ_ASSERT_IF(!masm.oom(), StoredFP == masm.currentOffset() - offsets->begin); } - if (reason.kind() != ExitReason::None) { - masm.store32_NoSecondScratch(Imm32(reason.pack()), - Address(scratch, AsmJSActivation::offsetOfPackedExitReason())); + if (reason != ExitReason::None) { + masm.store32_NoSecondScratch(Imm32(int32_t(reason)), + Address(scratch, AsmJSActivation::offsetOfExitReason())); } #if defined(JS_CODEGEN_ARM) @@ -220,7 +251,7 @@ GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason // Generate the inverse of GenerateProfilingPrologue. static void GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason, - AsmJSProfilingOffsets* offsets) + ProfilingOffsets* offsets) { Register scratch = ABIArgGenerator::NonReturn_VolatileReg0; #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ @@ -233,12 +264,12 @@ GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason masm.loadAsmJSActivation(scratch); - if (reason.kind() != ExitReason::None) { - masm.store32(Imm32(ExitReason::None), - Address(scratch, AsmJSActivation::offsetOfPackedExitReason())); + if (reason != ExitReason::None) { + masm.store32(Imm32(int32_t(ExitReason::None)), + Address(scratch, AsmJSActivation::offsetOfExitReason())); } - // AsmJSProfilingFrameIterator assumes fixed offsets of the last few + // ProfilingFrameIterator assumes fixed offsets of the last few // instructions from profilingReturn, so AutoForbidPools to ensure that // unintended instructions are not automatically inserted. { @@ -272,11 +303,10 @@ GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason // to call out to C++ so, as an optimization, we don't update fp. To avoid // recompilation when the profiling mode is toggled, we generate both prologues // a priori and switch between prologues when the profiling mode is toggled. -// Specifically, AsmJSModule::setProfilingEnabled patches all callsites to +// Specifically, Module::setProfilingEnabled patches all callsites to // either call the profiling or non-profiling entry point. void -js::GenerateAsmJSFunctionPrologue(MacroAssembler& masm, unsigned framePushed, - AsmJSFunctionOffsets* offsets) +wasm::GenerateFunctionPrologue(MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets) { #if defined(JS_CODEGEN_ARM) // Flush pending pools so they do not get dumped between the 'begin' and @@ -301,14 +331,13 @@ js::GenerateAsmJSFunctionPrologue(MacroAssembler& masm, unsigned framePushed, masm.setFramePushed(framePushed); } -// Similar to GenerateAsmJSFunctionPrologue (see comment), we generate both a +// Similar to GenerateFunctionPrologue (see comment), we generate both a // profiling and non-profiling epilogue a priori. When the profiling mode is -// toggled, AsmJSModule::setProfilingEnabled patches the 'profiling jump' to +// toggled, Module::setProfilingEnabled patches the 'profiling jump' to // either be a nop (falling through to the normal prologue) or a jump (jumping // to the profiling epilogue). void -js::GenerateAsmJSFunctionEpilogue(MacroAssembler& masm, unsigned framePushed, - AsmJSFunctionOffsets* offsets) +wasm::GenerateFunctionEpilogue(MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets) { MOZ_ASSERT(masm.framePushed() == framePushed); @@ -329,7 +358,7 @@ js::GenerateAsmJSFunctionEpilogue(MacroAssembler& masm, unsigned framePushed, #endif // The exact form of this instruction must be kept consistent with the - // patching in AsmJSModule::setProfilingEnabled. + // patching in Module::setProfilingEnabled. offsets->profilingJump = masm.currentOffset(); #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) masm.twoByteNop(); @@ -361,8 +390,8 @@ js::GenerateAsmJSFunctionEpilogue(MacroAssembler& masm, unsigned framePushed, } void -js::GenerateAsmJSExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason, - AsmJSProfilingOffsets* offsets, Label* maybeEntry) +wasm::GenerateExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason, + ProfilingOffsets* offsets, Label* maybeEntry) { masm.haltingAlign(CodeAlignment); GenerateProfilingPrologue(masm, framePushed, reason, offsets, maybeEntry); @@ -370,25 +399,36 @@ js::GenerateAsmJSExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitRe } void -js::GenerateAsmJSExitEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason, - AsmJSProfilingOffsets* offsets) +wasm::GenerateExitEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason, + ProfilingOffsets* offsets) { - // Inverse of GenerateAsmJSExitPrologue: + // Inverse of GenerateExitPrologue: MOZ_ASSERT(masm.framePushed() == framePushed); GenerateProfilingEpilogue(masm, framePushed, reason, offsets); masm.setFramePushed(0); } /*****************************************************************************/ -// AsmJSProfilingFrameIterator +// ProfilingFrameIterator -AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& activation) - : module_(&activation.module()), +ProfilingFrameIterator::ProfilingFrameIterator() + : module_(nullptr), + codeRange_(nullptr), callerFP_(nullptr), callerPC_(nullptr), stackAddress_(nullptr), - exitReason_(ExitReason::None), - codeRange_(nullptr) + exitReason_(ExitReason::None) +{ + MOZ_ASSERT(done()); +} + +ProfilingFrameIterator::ProfilingFrameIterator(const AsmJSActivation& activation) + : module_(&activation.module().wasm()), + codeRange_(nullptr), + callerFP_(nullptr), + callerPC_(nullptr), + stackAddress_(nullptr), + exitReason_(ExitReason::None) { // If profiling hasn't been enabled for this module, then CallerFPFromFP // will be trash, so ignore the entire activation. In practice, this only @@ -404,30 +444,24 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& } static inline void -AssertMatchesCallSite(const AsmJSModule& module, const AsmJSModule::CodeRange* calleeCodeRange, - void* callerPC, void* callerFP, void* fp) +AssertMatchesCallSite(const Module& module, void* callerPC, void* callerFP, void* fp) { #ifdef DEBUG - const AsmJSModule::CodeRange* callerCodeRange = module.lookupCodeRange(callerPC); + const CodeRange* callerCodeRange = module.lookupCodeRange(callerPC); MOZ_ASSERT(callerCodeRange); - if (callerCodeRange->isEntry()) { + if (callerCodeRange->kind() == CodeRange::Entry) { MOZ_ASSERT(callerFP == nullptr); return; } const CallSite* callsite = module.lookupCallSite(callerPC); - if (calleeCodeRange->isThunk()) { - MOZ_ASSERT(!callsite); - MOZ_ASSERT(callerCodeRange->isFunction()); - } else { - MOZ_ASSERT(callsite); - MOZ_ASSERT(callerFP == (uint8_t*)fp + callsite->stackDepth()); - } + MOZ_ASSERT(callsite); + MOZ_ASSERT(callerFP == (uint8_t*)fp + callsite->stackDepth()); #endif } void -AsmJSProfilingFrameIterator::initFromFP(const AsmJSActivation& activation) +ProfilingFrameIterator::initFromFP(const AsmJSActivation& activation) { uint8_t* fp = activation.fp(); @@ -441,59 +475,57 @@ AsmJSProfilingFrameIterator::initFromFP(const AsmJSActivation& activation) // Since we don't have the pc for fp, start unwinding at the caller of fp // (ReturnAddressFromFP(fp)). This means that the innermost frame is // skipped. This is fine because: - // - for FFI calls, the innermost frame is a thunk, so the first frame that - // shows up is the function calling the FFI; - // - for Math and other builtin calls, when profiling is activated, we - // patch all call sites to instead call through a thunk; and - // - for interrupts, we just accept that we'll lose the innermost frame. + // - for import exit calls, the innermost frame is a thunk, so the first + // frame that shows up is the function calling the import; + // - for Math and other builtin calls as well as interrupts, we note the absence + // of an exit reason and inject a fake "builtin" frame; and + // - for async interrupts, we just accept that we'll lose the innermost frame. void* pc = ReturnAddressFromFP(fp); - const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(pc); + const CodeRange* codeRange = module_->lookupCodeRange(pc); MOZ_ASSERT(codeRange); codeRange_ = codeRange; stackAddress_ = fp; switch (codeRange->kind()) { - case AsmJSModule::CodeRange::Entry: + case CodeRange::Entry: callerPC_ = nullptr; callerFP_ = nullptr; break; - case AsmJSModule::CodeRange::Function: + case CodeRange::Function: fp = CallerFPFromFP(fp); callerPC_ = ReturnAddressFromFP(fp); callerFP_ = CallerFPFromFP(fp); - AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp); + AssertMatchesCallSite(*module_, callerPC_, callerFP_, fp); break; - case AsmJSModule::CodeRange::JitFFI: - case AsmJSModule::CodeRange::SlowFFI: - case AsmJSModule::CodeRange::Interrupt: - case AsmJSModule::CodeRange::Inline: - case AsmJSModule::CodeRange::Thunk: + case CodeRange::ImportJitExit: + case CodeRange::ImportInterpExit: + case CodeRange::Interrupt: + case CodeRange::Inline: MOZ_CRASH("Unexpected CodeRange kind"); } - // Despite the above reasoning for skipping a frame, we do actually want FFI - // trampolines and interrupts to show up in the profile (so they can - // accumulate self time and explain performance faults). To do this, an - // "exit reason" is stored on all the paths leaving asm.js and this iterator - // treats this exit reason as its own frame. If we have exited asm.js code - // without setting an exit reason, the reason will be None and this means - // the code was asynchronously interrupted. + // The iterator inserts a pretend innermost frame for non-None ExitReasons. + // This allows the variety of exit reasons to show up in the callstack. exitReason_ = activation.exitReason(); - if (exitReason_.kind() == ExitReason::None) - exitReason_ = ExitReason::Interrupt; + + // In the case of calls to builtins or asynchronous interrupts, no exit path + // is taken so the exitReason is None. Coerce these to the Native exit + // reason so that self-time is accounted for. + if (exitReason_ == ExitReason::None) + exitReason_ = ExitReason::Native; MOZ_ASSERT(!done()); } typedef JS::ProfilingFrameIterator::RegisterState RegisterState; -AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& activation, - const RegisterState& state) - : module_(&activation.module()), +ProfilingFrameIterator::ProfilingFrameIterator(const AsmJSActivation& activation, + const RegisterState& state) + : module_(&activation.module().wasm()), + codeRange_(nullptr), callerFP_(nullptr), callerPC_(nullptr), - exitReason_(ExitReason::None), - codeRange_(nullptr) + exitReason_(ExitReason::None) { // If profiling hasn't been enabled for this module, then CallerFPFromFP // will be trash, so ignore the entire activation. In practice, this only @@ -515,13 +547,12 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& // Note: fp may be null while entering and leaving the activation. uint8_t* fp = activation.fp(); - const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(state.pc); + const CodeRange* codeRange = module_->lookupCodeRange(state.pc); switch (codeRange->kind()) { - case AsmJSModule::CodeRange::Function: - case AsmJSModule::CodeRange::JitFFI: - case AsmJSModule::CodeRange::SlowFFI: - case AsmJSModule::CodeRange::Interrupt: - case AsmJSModule::CodeRange::Thunk: { + case CodeRange::Function: + case CodeRange::ImportJitExit: + case CodeRange::ImportInterpExit: + case CodeRange::Interrupt: { // When the pc is inside the prologue/epilogue, the innermost // call's AsmJSFrame is not complete and thus fp points to the the // second-to-innermost call's AsmJSFrame. Since fp can only tell you @@ -529,8 +560,8 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& // while pc is in the prologue/epilogue would skip the second-to- // innermost call. To avoid this problem, we use the static structure of // the code in the prologue and epilogue to do the Right Thing. - uint32_t offsetInModule = (uint8_t*)state.pc - module_->codeBase(); - MOZ_ASSERT(offsetInModule < module_->codeBytes()); + MOZ_ASSERT(module_->containsCodePC(state.pc)); + uint32_t offsetInModule = (uint8_t*)state.pc - module_->code(); MOZ_ASSERT(offsetInModule >= codeRange->begin()); MOZ_ASSERT(offsetInModule < codeRange->end()); uint32_t offsetInCodeRange = offsetInModule - codeRange->begin(); @@ -541,13 +572,13 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& // still in lr and fp still holds the caller's fp. callerPC_ = state.lr; callerFP_ = fp; - AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp - 2); + AssertMatchesCallSite(*module_, callerPC_, callerFP_, sp - 2); } else if (offsetInModule == codeRange->profilingReturn() - PostStorePrePopFP) { // Second-to-last instruction of the ARM/MIPS function; fp points to // the caller's fp; have not yet popped AsmJSFrame. callerPC_ = ReturnAddressFromFP(sp); callerFP_ = CallerFPFromFP(sp); - AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp); + AssertMatchesCallSite(*module_, callerPC_, callerFP_, sp); } else #endif if (offsetInCodeRange < PushedFP || offsetInModule == codeRange->profilingReturn()) { @@ -555,32 +586,32 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& // still points to the caller's fp. callerPC_ = *sp; callerFP_ = fp; - AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp - 1); + AssertMatchesCallSite(*module_, callerPC_, callerFP_, sp - 1); } else if (offsetInCodeRange < StoredFP) { // The full AsmJSFrame has been pushed; fp still points to the // caller's frame. MOZ_ASSERT(fp == CallerFPFromFP(sp)); callerPC_ = ReturnAddressFromFP(sp); callerFP_ = CallerFPFromFP(sp); - AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp); + AssertMatchesCallSite(*module_, callerPC_, callerFP_, sp); } else { // Not in the prologue/epilogue. callerPC_ = ReturnAddressFromFP(fp); callerFP_ = CallerFPFromFP(fp); - AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp); + AssertMatchesCallSite(*module_, callerPC_, callerFP_, fp); } break; } - case AsmJSModule::CodeRange::Entry: { + case CodeRange::Entry: { // The entry trampoline is the final frame in an AsmJSActivation. The entry - // trampoline also doesn't GenerateAsmJSPrologue/Epilogue so we can't use + // trampoline also doesn't GeneratePrologue/Epilogue so we can't use // the general unwinding logic above. MOZ_ASSERT(!fp); callerPC_ = nullptr; callerFP_ = nullptr; break; } - case AsmJSModule::CodeRange::Inline: { + case CodeRange::Inline: { // The throw stub clears AsmJSActivation::fp on it's way out. if (!fp) { MOZ_ASSERT(done()); @@ -594,7 +625,7 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& // skipped frames. Thus, we use simply unwind based on fp. callerPC_ = ReturnAddressFromFP(fp); callerFP_ = CallerFPFromFP(fp); - AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp); + AssertMatchesCallSite(*module_, callerPC_, callerFP_, fp); break; } } @@ -605,9 +636,9 @@ AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& } void -AsmJSProfilingFrameIterator::operator++() +ProfilingFrameIterator::operator++() { - if (exitReason_.kind() != ExitReason::None) { + if (exitReason_ != ExitReason::None) { MOZ_ASSERT(codeRange_); exitReason_ = ExitReason::None; MOZ_ASSERT(!done()); @@ -621,25 +652,23 @@ AsmJSProfilingFrameIterator::operator++() return; } - MOZ_ASSERT(callerPC_); - const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(callerPC_); + const CodeRange* codeRange = module_->lookupCodeRange(callerPC_); MOZ_ASSERT(codeRange); codeRange_ = codeRange; switch (codeRange->kind()) { - case AsmJSModule::CodeRange::Entry: + case CodeRange::Entry: MOZ_ASSERT(callerFP_ == nullptr); callerPC_ = nullptr; break; - case AsmJSModule::CodeRange::Function: - case AsmJSModule::CodeRange::JitFFI: - case AsmJSModule::CodeRange::SlowFFI: - case AsmJSModule::CodeRange::Interrupt: - case AsmJSModule::CodeRange::Inline: - case AsmJSModule::CodeRange::Thunk: + case CodeRange::Function: + case CodeRange::ImportJitExit: + case CodeRange::ImportInterpExit: + case CodeRange::Interrupt: + case CodeRange::Inline: stackAddress_ = callerFP_; callerPC_ = ReturnAddressFromFP(callerFP_); - AssertMatchesCallSite(*module_, codeRange, callerPC_, CallerFPFromFP(callerFP_), callerFP_); + AssertMatchesCallSite(*module_, callerPC_, CallerFPFromFP(callerFP_), callerFP_); callerFP_ = CallerFPFromFP(callerFP_); break; } @@ -647,82 +676,175 @@ AsmJSProfilingFrameIterator::operator++() MOZ_ASSERT(!done()); } -static const char* -BuiltinToName(Builtin builtin) -{ - // Note: this label is regexp-matched by - // devtools/client/profiler/cleopatra/js/parserWorker.js. - - switch (builtin) { - case Builtin::ToInt32: return "ToInt32 (in asm.js)"; -#if defined(JS_CODEGEN_ARM) - case Builtin::aeabi_idivmod: return "software idivmod (in asm.js)"; - case Builtin::aeabi_uidivmod: return "software uidivmod (in asm.js)"; - case Builtin::AtomicCmpXchg: return "Atomics.compareExchange (in asm.js)"; - case Builtin::AtomicXchg: return "Atomics.exchange (in asm.js)"; - case Builtin::AtomicFetchAdd: return "Atomics.add (in asm.js)"; - case Builtin::AtomicFetchSub: return "Atomics.sub (in asm.js)"; - case Builtin::AtomicFetchAnd: return "Atomics.and (in asm.js)"; - case Builtin::AtomicFetchOr: return "Atomics.or (in asm.js)"; - case Builtin::AtomicFetchXor: return "Atomics.xor (in asm.js)"; -#endif - case Builtin::ModD: return "fmod (in asm.js)"; - case Builtin::SinD: return "Math.sin (in asm.js)"; - case Builtin::CosD: return "Math.cos (in asm.js)"; - case Builtin::TanD: return "Math.tan (in asm.js)"; - case Builtin::ASinD: return "Math.asin (in asm.js)"; - case Builtin::ACosD: return "Math.acos (in asm.js)"; - case Builtin::ATanD: return "Math.atan (in asm.js)"; - case Builtin::CeilD: - case Builtin::CeilF: return "Math.ceil (in asm.js)"; - case Builtin::FloorD: - case Builtin::FloorF: return "Math.floor (in asm.js)"; - case Builtin::ExpD: return "Math.exp (in asm.js)"; - case Builtin::LogD: return "Math.log (in asm.js)"; - case Builtin::PowD: return "Math.pow (in asm.js)"; - case Builtin::ATan2D: return "Math.atan2 (in asm.js)"; - case Builtin::Limit: break; - } - MOZ_CRASH("symbolic immediate not a builtin"); -} - const char* -AsmJSProfilingFrameIterator::label() const +ProfilingFrameIterator::label() const { MOZ_ASSERT(!done()); // Use the same string for both time inside and under so that the two // entries will be coalesced by the profiler. // - // NB: these labels are regexp-matched by - // devtools/client/profiler/cleopatra/js/parserWorker.js. - const char* jitFFIDescription = "fast FFI trampoline (in asm.js)"; - const char* slowFFIDescription = "slow FFI trampoline (in asm.js)"; - const char* interruptDescription = "interrupt due to out-of-bounds or long execution (in asm.js)"; + // NB: these labels are parsed for location by + // devtools/client/performance/modules/logic/frame-utils.js + const char* importJitDescription = "fast FFI trampoline (in asm.js)"; + const char* importInterpDescription = "slow FFI trampoline (in asm.js)"; + const char* nativeDescription = "native call (in asm.js)"; - switch (exitReason_.kind()) { + switch (exitReason_) { case ExitReason::None: break; - case ExitReason::Jit: - return jitFFIDescription; - case ExitReason::Slow: - return slowFFIDescription; - case ExitReason::Interrupt: - return interruptDescription; - case ExitReason::Builtin: - return BuiltinToName(exitReason_.builtin()); + case ExitReason::ImportJit: + return importJitDescription; + case ExitReason::ImportInterp: + return importInterpDescription; + case ExitReason::Native: + return nativeDescription; } - auto codeRange = reinterpret_cast(codeRange_); - switch (codeRange->kind()) { - case AsmJSModule::CodeRange::Function: return codeRange->functionProfilingLabel(*module_); - case AsmJSModule::CodeRange::Entry: return "entry trampoline (in asm.js)"; - case AsmJSModule::CodeRange::JitFFI: return jitFFIDescription; - case AsmJSModule::CodeRange::SlowFFI: return slowFFIDescription; - case AsmJSModule::CodeRange::Interrupt: return interruptDescription; - case AsmJSModule::CodeRange::Inline: return "inline stub (in asm.js)"; - case AsmJSModule::CodeRange::Thunk: return BuiltinToName(codeRange->thunkTarget()); + switch (codeRange_->kind()) { + case CodeRange::Function: return module_->profilingLabel(codeRange_->funcNameIndex()); + case CodeRange::Entry: return "entry trampoline (in asm.js)"; + case CodeRange::ImportJitExit: return importJitDescription; + case CodeRange::ImportInterpExit: return importInterpDescription; + case CodeRange::Interrupt: return nativeDescription; + case CodeRange::Inline: return "inline stub (in asm.js)"; } MOZ_CRASH("bad code range kind"); } + +/*****************************************************************************/ +// Runtime patching to enable/disable profiling + +// Patch all internal (asm.js->asm.js) callsites to call the profiling +// prologues: +void +wasm::EnableProfilingPrologue(const Module& module, const CallSite& callSite, bool enabled) +{ + if (callSite.kind() != CallSite::Relative) + return; + + uint8_t* callerRetAddr = module.code() + callSite.returnAddressOffset(); + +#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) + void* callee = X86Encoding::GetRel32Target(callerRetAddr); +#elif defined(JS_CODEGEN_ARM) + uint8_t* caller = callerRetAddr - 4; + Instruction* callerInsn = reinterpret_cast(caller); + BOffImm calleeOffset; + callerInsn->as()->extractImm(&calleeOffset); + void* callee = calleeOffset.getDest(callerInsn); +#elif defined(JS_CODEGEN_ARM64) + MOZ_CRASH(); + void* callee = nullptr; + (void)callerRetAddr; +#elif defined(JS_CODEGEN_MIPS32) + Instruction* instr = (Instruction*)(callerRetAddr - 4 * sizeof(uint32_t)); + void* callee = (void*)Assembler::ExtractLuiOriValue(instr, instr->next()); +#elif defined(JS_CODEGEN_MIPS64) + Instruction* instr = (Instruction*)(callerRetAddr - 6 * sizeof(uint32_t)); + void* callee = (void*)Assembler::ExtractLoad64Value(instr); +#elif defined(JS_CODEGEN_NONE) + MOZ_CRASH(); + void* callee = nullptr; +#else +# error "Missing architecture" +#endif + + const CodeRange* codeRange = module.lookupCodeRange(callee); + if (!codeRange->isFunction()) + return; + + uint8_t* from = module.code() + codeRange->funcNonProfilingEntry(); + uint8_t* to = module.code() + codeRange->funcProfilingEntry(); + if (!enabled) + Swap(from, to); + + MOZ_ASSERT(callee == from); + +#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) + X86Encoding::SetRel32(callerRetAddr, to); +#elif defined(JS_CODEGEN_ARM) + new (caller) InstBLImm(BOffImm(to - caller), Assembler::Always); +#elif defined(JS_CODEGEN_ARM64) + (void)to; + MOZ_CRASH(); +#elif defined(JS_CODEGEN_MIPS32) + Assembler::WriteLuiOriInstructions(instr, instr->next(), + ScratchRegister, (uint32_t)to); + instr[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); +#elif defined(JS_CODEGEN_MIPS64) + Assembler::WriteLoad64Instructions(instr, ScratchRegister, (uint64_t)to); + instr[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); +#elif defined(JS_CODEGEN_NONE) + MOZ_CRASH(); +#else +# error "Missing architecture" +#endif +} + +// Replace all the nops in all the epilogues of asm.js functions with jumps +// to the profiling epilogues. +void +wasm::EnableProfilingEpilogue(const Module& module, const CodeRange& codeRange, bool enabled) +{ + if (!codeRange.isFunction()) + return; + + uint8_t* jump = module.code() + codeRange.functionProfilingJump(); + uint8_t* profilingEpilogue = module.code() + codeRange.funcProfilingEpilogue(); + +#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) + // An unconditional jump with a 1 byte offset immediate has the opcode + // 0x90. The offset is relative to the address of the instruction after + // the jump. 0x66 0x90 is the canonical two-byte nop. + ptrdiff_t jumpImmediate = profilingEpilogue - jump - 2; + MOZ_ASSERT(jumpImmediate > 0 && jumpImmediate <= 127); + if (enabled) { + MOZ_ASSERT(jump[0] == 0x66); + MOZ_ASSERT(jump[1] == 0x90); + jump[0] = 0xeb; + jump[1] = jumpImmediate; + } else { + MOZ_ASSERT(jump[0] == 0xeb); + MOZ_ASSERT(jump[1] == jumpImmediate); + jump[0] = 0x66; + jump[1] = 0x90; + } +#elif defined(JS_CODEGEN_ARM) + if (enabled) { + MOZ_ASSERT(reinterpret_cast(jump)->is()); + new (jump) InstBImm(BOffImm(profilingEpilogue - jump), Assembler::Always); + } else { + MOZ_ASSERT(reinterpret_cast(jump)->is()); + new (jump) InstNOP(); + } +#elif defined(JS_CODEGEN_ARM64) + (void)jump; + (void)profilingEpilogue; + MOZ_CRASH(); +#elif defined(JS_CODEGEN_MIPS32) + Instruction* instr = (Instruction*)jump; + if (enabled) { + Assembler::WriteLuiOriInstructions(instr, instr->next(), + ScratchRegister, (uint32_t)profilingEpilogue); + instr[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr); + } else { + for (unsigned i = 0; i < 3; i++) + instr[i].makeNop(); + } +#elif defined(JS_CODEGEN_MIPS64) + Instruction* instr = (Instruction*)jump; + if (enabled) { + Assembler::WriteLoad64Instructions(instr, ScratchRegister, (uint64_t)profilingEpilogue); + instr[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr); + } else { + for (unsigned i = 0; i < 5; i++) + instr[i].makeNop(); + } +#elif defined(JS_CODEGEN_NONE) + MOZ_CRASH(); +#else +# error "Missing architecture" +#endif +} diff --git a/js/src/asmjs/WasmFrameIterator.h b/js/src/asmjs/WasmFrameIterator.h new file mode 100644 index 00000000000..0fd3eb42712 --- /dev/null +++ b/js/src/asmjs/WasmFrameIterator.h @@ -0,0 +1,121 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * + * Copyright 2014 Mozilla Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef wasm_frame_iterator_h +#define wasm_frame_iterator_h + +#include "js/ProfilingFrameIterator.h" + +class JSAtom; + +namespace js { + +class AsmJSActivation; +namespace jit { class MacroAssembler; class Label; } + +namespace wasm { + +class CallSite; +class CodeRange; +class Module; +struct FuncOffsets; +struct ProfilingOffsets; + +// Iterates over the frames of a single AsmJSActivation, called synchronously +// from C++ in the thread of the asm.js. The one exception is that this iterator +// may be called from the interrupt callback which may be called asynchronously +// from asm.js code; in this case, the backtrace may not be correct. +class FrameIterator +{ + JSContext* cx_; + const Module* module_; + const CallSite* callsite_; + const CodeRange* codeRange_; + uint8_t* fp_; + + void settle(); + + public: + explicit FrameIterator(); + explicit FrameIterator(const AsmJSActivation& activation); + void operator++(); + bool done() const { return !fp_; } + JSAtom* functionDisplayAtom() const; + unsigned computeLine(uint32_t* column) const; +}; + +// An ExitReason describes the possible reasons for leaving compiled wasm code +// or the state of not having left compiled wasm code (ExitReason::None). +enum class ExitReason : uint32_t +{ + None, // default state, the pc is in wasm code + ImportJit, // fast-path call directly into JIT code + ImportInterp, // slow-path call into C++ Invoke() + Native // call to native C++ code (e.g., Math.sin, ToInt32(), interrupt) +}; + +// Iterates over the frames of a single AsmJSActivation, given an +// asynchrously-interrupted thread's state. If the activation's +// module is not in profiling mode, the activation is skipped. +class ProfilingFrameIterator +{ + const Module* module_; + const CodeRange* codeRange_; + uint8_t* callerFP_; + void* callerPC_; + void* stackAddress_; + ExitReason exitReason_; + + void initFromFP(const AsmJSActivation& activation); + + public: + ProfilingFrameIterator(); + explicit ProfilingFrameIterator(const AsmJSActivation& activation); + ProfilingFrameIterator(const AsmJSActivation& activation, + const JS::ProfilingFrameIterator::RegisterState& state); + void operator++(); + bool done() const { return !codeRange_; } + + void* stackAddress() const { MOZ_ASSERT(!done()); return stackAddress_; } + const char* label() const; +}; + +// Prologue/epilogue code generation +void +GenerateExitPrologue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason, + ProfilingOffsets* offsets, jit::Label* maybeEntry = nullptr); +void +GenerateExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason, + ProfilingOffsets* offsets); +void +GenerateFunctionPrologue(jit::MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets); +void +GenerateFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets); + +// Runtime patching to enable/disable profiling + +void +EnableProfilingPrologue(const Module& module, const CallSite& callSite, bool enabled); + +void +EnableProfilingEpilogue(const Module& module, const CodeRange& codeRange, bool enabled); + +} // namespace wasm +} // namespace js + +#endif // wasm_frame_iterator_h diff --git a/js/src/asmjs/WasmGenerator.cpp b/js/src/asmjs/WasmGenerator.cpp index 50e4abc7e5c..6b2bcf94f80 100644 --- a/js/src/asmjs/WasmGenerator.cpp +++ b/js/src/asmjs/WasmGenerator.cpp @@ -18,31 +18,15 @@ #include "asmjs/WasmGenerator.h" -#include "asmjs/AsmJSModule.h" +#include "asmjs/AsmJSValidate.h" #include "asmjs/WasmStubs.h" -#ifdef MOZ_VTUNE -# include "vtune/VTuneWrapper.h" -#endif + +#include "jit/MacroAssembler-inl.h" using namespace js; using namespace js::jit; using namespace js::wasm; -static bool -ParallelCompilationEnabled(ExclusiveContext* cx) -{ - // Since there are a fixed number of helper threads and one is already being - // consumed by this parsing task, ensure that there another free thread to - // avoid deadlock. (Note: there is at most one thread used for parsing so we - // don't have to worry about general dining philosophers.) - if (HelperThreadState().threadCount <= 1 || !CanUseExtraThreads()) - return false; - - // If 'cx' isn't a JSContext, then we are already off the main thread so - // off-thread compilation must be enabled. - return !cx->isJSContext() || cx->asJSContext()->runtime()->canUseOffthreadIonCompilation(); -} - // **************************************************************************** // ModuleGenerator @@ -51,19 +35,25 @@ static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024; ModuleGenerator::ModuleGenerator(ExclusiveContext* cx) : cx_(cx), + args_(cx), + globalBytes_(InitialGlobalDataBytes), + slowFuncs_(cx), lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE), + jcx_(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread())), alloc_(&lifo_), - masm_(MacroAssembler::AsmJSToken(), &alloc_), + masm_(MacroAssembler::AsmJSToken(), alloc_), sigs_(cx), parallel_(false), outstanding_(0), tasks_(cx), freeTasks_(cx), + funcBytes_(0), funcEntryOffsets_(cx), - funcPtrTables_(cx), - slowFuncs_(cx), - active_(nullptr) -{} + activeFunc_(nullptr), + finishedFuncs_(false) +{ + MOZ_ASSERT(IsCompilingAsmJS()); +} ModuleGenerator::~ModuleGenerator() { @@ -72,12 +62,12 @@ ModuleGenerator::~ModuleGenerator() if (outstanding_) { AutoLockHelperThreadState lock; while (true) { - CompileTaskVector& worklist = HelperThreadState().wasmWorklist(); + IonCompileTaskVector& worklist = HelperThreadState().wasmWorklist(); MOZ_ASSERT(outstanding_ >= worklist.length()); outstanding_ -= worklist.length(); worklist.clear(); - CompileTaskVector& finished = HelperThreadState().wasmFinishedList(); + IonCompileTaskVector& finished = HelperThreadState().wasmFinishedList(); MOZ_ASSERT(outstanding_ >= finished.length()); outstanding_ -= finished.length(); finished.clear(); @@ -100,14 +90,29 @@ ModuleGenerator::~ModuleGenerator() } } -bool -ModuleGenerator::init(ScriptSource* ss, uint32_t srcStart, uint32_t srcBodyStart, bool strict) +static bool +ParallelCompilationEnabled(ExclusiveContext* cx) { - if (!sigs_.init()) + // Since there are a fixed number of helper threads and one is already being + // consumed by this parsing task, ensure that there another free thread to + // avoid deadlock. (Note: there is at most one thread used for parsing so we + // don't have to worry about general dining philosophers.) + if (HelperThreadState().threadCount <= 1 || !CanUseExtraThreads()) return false; - module_ = cx_->new_(ss, srcStart, srcBodyStart, strict, cx_->canUseSignalHandlers()); - if (!module_) + // If 'cx' isn't a JSContext, then we are already off the main thread so + // off-thread compilation must be enabled. + return !cx->isJSContext() || cx->asJSContext()->runtime()->canUseOffthreadIonCompilation(); +} + +bool +ModuleGenerator::init() +{ + staticLinkData_ = cx_->make_unique(); + if (!staticLinkData_) + return false; + + if (!sigs_.init()) return false; uint32_t numTasks; @@ -131,8 +136,9 @@ ModuleGenerator::init(ScriptSource* ss, uint32_t srcStart, uint32_t srcBodyStart if (!tasks_.initCapacity(numTasks)) return false; + JSRuntime* runtime = cx_->compartment()->runtimeFromAnyThread(); for (size_t i = 0; i < numTasks; i++) - tasks_.infallibleEmplaceBack(COMPILATION_LIFO_DEFAULT_CHUNK_SIZE, args()); + tasks_.infallibleEmplaceBack(runtime, args_, COMPILATION_LIFO_DEFAULT_CHUNK_SIZE); if (!freeTasks_.reserve(numTasks)) return false; @@ -143,101 +149,15 @@ ModuleGenerator::init(ScriptSource* ss, uint32_t srcStart, uint32_t srcBodyStart } bool -ModuleGenerator::startFunc(PropertyName* name, unsigned line, unsigned column, - FunctionGenerator* fg) +ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset) { - MOZ_ASSERT(!active_); - - if (freeTasks_.empty() && !finishOutstandingTask()) + uint32_t pad = ComputeByteAlignment(globalBytes_, align); + if (UINT32_MAX - globalBytes_ < pad + bytes) return false; - CompileTask* task = freeTasks_.popCopy(); - FuncIR* func = task->lifo().new_(task->lifo(), name, line, column); - if (!func) - return false; - - task->init(*func); - fg->m_ = this; - fg->task_ = task; - fg->func_ = func; - active_ = fg; - return true; -} - -bool -ModuleGenerator::finishFunc(uint32_t funcIndex, const LifoSig& sig, unsigned generateTime, - FunctionGenerator* fg) -{ - MOZ_ASSERT(active_ == fg); - - fg->func_->finish(funcIndex, sig, generateTime); - - if (parallel_) { - if (!StartOffThreadWasmCompile(cx_, fg->task_)) - return false; - outstanding_++; - } else { - if (!CompileFunction(fg->task_)) - return false; - if (!finishTask(fg->task_)) - return false; - } - - fg->m_ = nullptr; - fg->task_ = nullptr; - fg->func_ = nullptr; - active_ = nullptr; - return true; -} - -bool -ModuleGenerator::finish(frontend::TokenStream& ts, ScopedJSDeletePtr* module, - SlowFunctionVector* slowFuncs) -{ - MOZ_ASSERT(!active_); - - while (outstanding_ > 0) { - if (!finishOutstandingTask()) - return false; - } - - module_->setFunctionBytes(masm_.size()); - - JitContext jitContext(CompileRuntime::get(args().runtime)); - - // Now that all function definitions have been compiled and their function- - // entry offsets are all known, patch inter-function calls and fill in the - // function-pointer table offsets. - - if (!GenerateStubs(masm_, *module_, funcEntryOffsets_)) - return false; - - for (auto& cs : masm_.callSites()) { - if (!cs.isInternal()) - continue; - MOZ_ASSERT(cs.kind() == CallSiteDesc::Relative); - uint32_t callerOffset = cs.returnAddressOffset(); - uint32_t calleeOffset = funcEntryOffsets_[cs.targetIndex()]; - masm_.patchCall(callerOffset, calleeOffset); - } - - for (unsigned tableIndex = 0; tableIndex < funcPtrTables_.length(); tableIndex++) { - FuncPtrTable& table = funcPtrTables_[tableIndex]; - AsmJSModule::OffsetVector entryOffsets; - for (uint32_t funcIndex : table.elems) - entryOffsets.append(funcEntryOffsets_[funcIndex]); - module_->funcPtrTable(tableIndex).define(Move(entryOffsets)); - } - - masm_.finish(); - if (masm_.oom()) - return false; - - if (!module_->finish(cx_, ts, masm_)) - return false; - - *module = module_.forget(); - *slowFuncs = Move(slowFuncs_); + globalBytes_ += pad; + *globalDataOffset = globalBytes_; + globalBytes_ += bytes; return true; } @@ -246,7 +166,7 @@ ModuleGenerator::finishOutstandingTask() { MOZ_ASSERT(parallel_); - CompileTask* task = nullptr; + IonCompileTask* task = nullptr; { AutoLockHelperThreadState lock; while (true) { @@ -269,55 +189,51 @@ ModuleGenerator::finishOutstandingTask() } bool -ModuleGenerator::finishTask(CompileTask* task) +ModuleGenerator::finishTask(IonCompileTask* task) { const FuncIR& func = task->func(); - FunctionCompileResults& results = task->results(); + FuncCompileResults& results = task->results(); - // Merge the compiled results into the whole-module masm. - size_t offset = masm_.size(); - if (!masm_.asmMergeWith(results.masm())) - return false; + // Offset the recorded FuncOffsets by the offset of the function in the + // whole module's code segment. + uint32_t offsetInWhole = masm_.size(); + results.offsets().offsetBy(offsetInWhole); - // Create the code range now that we know offset of results in whole masm. - AsmJSModule::CodeRange codeRange(func.line(), results.offsets()); - codeRange.functionOffsetBy(offset); - if (!module_->addFunctionCodeRange(func.name(), codeRange)) - return false; - - // Compilation may complete out of order, so cannot simply append(). + // Record the non-profiling entry for whole-module linking later. if (func.index() >= funcEntryOffsets_.length()) { if (!funcEntryOffsets_.resize(func.index() + 1)) return false; } - funcEntryOffsets_[func.index()] = codeRange.entry(); + funcEntryOffsets_[func.index()] = results.offsets().nonProfilingEntry; + + // Merge the compiled results into the whole-module masm. + DebugOnly sizeBefore = masm_.size(); + if (!masm_.asmMergeWith(results.masm())) + return false; + MOZ_ASSERT(masm_.size() == offsetInWhole + results.masm().size()); + + // Add the CodeRange for this function. + CacheableChars funcName = StringToNewUTF8CharsZ(cx_, *func.name()); + if (!funcName) + return false; + uint32_t nameIndex = funcNames_.length(); + if (!funcNames_.emplaceBack(Move(funcName))) + return false; + if (!codeRanges_.emplaceBack(nameIndex, func.line(), results.offsets())) + return false; // Keep a record of slow functions for printing in the final console message. unsigned totalTime = func.generateTime() + results.compileTime(); if (totalTime >= SlowFunction::msThreshold) { - if (!slowFuncs_.append(SlowFunction(func.name(), totalTime, func.line(), func.column()))) + if (!slowFuncs_.emplaceBack(func.name(), totalTime, func.line(), func.column())) return false; } -#if defined(MOZ_VTUNE) || defined(JS_ION_PERF) - AsmJSModule::ProfiledFunction pf(func.name(), codeRange.entry(), codeRange.end(), - func.line(), func.column()); - if (!module().addProfiledFunction(pf)) - return false; -#endif - task->reset(); freeTasks_.infallibleAppend(task); return true; } -CompileArgs -ModuleGenerator::args() const -{ - return CompileArgs(cx_->compartment()->runtimeFromAnyThread(), - module().usesSignalHandlersForOOB()); -} - const LifoSig* ModuleGenerator::newLifoSig(const MallocSig& sig) { @@ -333,7 +249,182 @@ ModuleGenerator::newLifoSig(const MallocSig& sig) } bool -ModuleGenerator::declareFuncPtrTable(uint32_t numElems, uint32_t* funcPtrTableIndex) +ModuleGenerator::allocateGlobalVar(ValType type, uint32_t* globalDataOffset) +{ + unsigned width = 0; + switch (type) { + case wasm::ValType::I32: + case wasm::ValType::F32: + width = 4; + break; + case wasm::ValType::I64: + case wasm::ValType::F64: + width = 8; + break; + case wasm::ValType::I32x4: + case wasm::ValType::F32x4: + case wasm::ValType::B32x4: + width = 16; + break; + } + return allocateGlobalBytes(width, width, globalDataOffset); +} + +bool +ModuleGenerator::declareImport(MallocSig&& sig, unsigned* index) +{ + static_assert(Module::SizeOfImportExit % sizeof(void*) == 0, "word aligned"); + + uint32_t globalDataOffset; + if (!allocateGlobalBytes(Module::SizeOfImportExit, sizeof(void*), &globalDataOffset)) + return false; + + *index = unsigned(imports_.length()); + return imports_.emplaceBack(Move(sig), globalDataOffset); +} + +uint32_t +ModuleGenerator::numDeclaredImports() const +{ + return imports_.length(); +} + +uint32_t +ModuleGenerator::importExitGlobalDataOffset(uint32_t index) const +{ + return imports_[index].exitGlobalDataOffset(); +} + +const MallocSig& +ModuleGenerator::importSig(uint32_t index) const +{ + return imports_[index].sig(); +} + +bool +ModuleGenerator::defineImport(uint32_t index, ProfilingOffsets interpExit, ProfilingOffsets jitExit) +{ + Import& import = imports_[index]; + import.initInterpExitOffset(interpExit.begin); + import.initJitExitOffset(jitExit.begin); + return codeRanges_.emplaceBack(CodeRange::ImportInterpExit, interpExit) && + codeRanges_.emplaceBack(CodeRange::ImportJitExit, jitExit); +} + +bool +ModuleGenerator::declareExport(MallocSig&& sig, uint32_t funcIndex, uint32_t* index) +{ + *index = exports_.length(); + return exports_.emplaceBack(Move(sig), funcIndex); +} + +uint32_t +ModuleGenerator::exportFuncIndex(uint32_t index) const +{ + return exports_[index].funcIndex(); +} + +const MallocSig& +ModuleGenerator::exportSig(uint32_t index) const +{ + return exports_[index].sig(); +} + +uint32_t +ModuleGenerator::numDeclaredExports() const +{ + return exports_.length(); +} + +bool +ModuleGenerator::defineExport(uint32_t index, Offsets offsets) +{ + exports_[index].initStubOffset(offsets.begin); + return codeRanges_.emplaceBack(CodeRange::Entry, offsets); +} + +bool +ModuleGenerator::startFunc(PropertyName* name, unsigned line, unsigned column, + FunctionGenerator* fg) +{ + MOZ_ASSERT(!activeFunc_); + MOZ_ASSERT(!finishedFuncs_); + + if (freeTasks_.empty() && !finishOutstandingTask()) + return false; + + IonCompileTask* task = freeTasks_.popCopy(); + FuncIR* func = task->lifo().new_(task->lifo(), name, line, column); + if (!func) + return false; + + task->init(*func); + fg->m_ = this; + fg->task_ = task; + fg->func_ = func; + activeFunc_ = fg; + return true; +} + +bool +ModuleGenerator::finishFunc(uint32_t funcIndex, const LifoSig& sig, unsigned generateTime, + FunctionGenerator* fg) +{ + MOZ_ASSERT(activeFunc_ == fg); + + fg->func_->finish(funcIndex, sig, generateTime); + + if (parallel_) { + if (!StartOffThreadWasmCompile(cx_, fg->task_)) + return false; + outstanding_++; + } else { + if (!IonCompileFunction(fg->task_)) + return false; + if (!finishTask(fg->task_)) + return false; + } + + fg->m_ = nullptr; + fg->task_ = nullptr; + fg->func_ = nullptr; + activeFunc_ = nullptr; + return true; +} + +bool +ModuleGenerator::finishFuncs() +{ + MOZ_ASSERT(!activeFunc_); + MOZ_ASSERT(!finishedFuncs_); + + while (outstanding_ > 0) { + if (!finishOutstandingTask()) + return false; + } + + // During codegen, all wasm->wasm (internal) calls use AsmJSInternalCallee + // as the call target, which contains the function-index of the target. + // These get recorded in a CallSiteAndTargetVector in the MacroAssembler + // so that we can patch them now that all the function entry offsets are + // known. + + for (CallSiteAndTarget& cs : masm_.callSites()) { + if (!cs.isInternal()) + continue; + MOZ_ASSERT(cs.kind() == CallSiteDesc::Relative); + uint32_t callerOffset = cs.returnAddressOffset(); + uint32_t calleeOffset = funcEntryOffsets_[cs.targetIndex()]; + masm_.patchCall(callerOffset, calleeOffset); + } + + funcBytes_ = masm_.size(); + finishedFuncs_ = true; + return true; +} + +bool +ModuleGenerator::declareFuncPtrTable(uint32_t numElems, uint32_t* index) { // Here just add an uninitialized FuncPtrTable and claim space in the global // data section. Later, 'defineFuncPtrTable' will be called with function @@ -343,25 +434,194 @@ ModuleGenerator::declareFuncPtrTable(uint32_t numElems, uint32_t* funcPtrTableIn if (numElems > 1024 * 1024) return false; - if (!module_->declareFuncPtrTable(numElems, funcPtrTableIndex)) + uint32_t globalDataOffset; + if (!allocateGlobalBytes(numElems * sizeof(void*), sizeof(void*), &globalDataOffset)) return false; - MOZ_ASSERT(*funcPtrTableIndex == funcPtrTables_.length()); - return funcPtrTables_.emplaceBack(numElems); -} + StaticLinkData::FuncPtrTableVector& tables = staticLinkData_->funcPtrTables; -bool -ModuleGenerator::defineFuncPtrTable(uint32_t funcPtrTableIndex, FuncIndexVector&& elems) -{ - // The AsmJSModule needs to know the offsets in the code section which won't - // be known until 'finish'. So just remember the function indices for now - // and wait until 'finish' to hand over the offsets to the AsmJSModule. - - FuncPtrTable& table = funcPtrTables_[funcPtrTableIndex]; - if (table.numDeclared != elems.length() || !table.elems.empty()) + *index = tables.length(); + if (!tables.emplaceBack(globalDataOffset)) + return false; + + if (!tables.back().elemOffsets.resize(numElems)) return false; - table.elems = Move(elems); return true; } +uint32_t +ModuleGenerator::funcPtrTableGlobalDataOffset(uint32_t index) const +{ + return staticLinkData_->funcPtrTables[index].globalDataOffset; +} + +void +ModuleGenerator::defineFuncPtrTable(uint32_t index, const Vector& elemFuncIndices) +{ + MOZ_ASSERT(finishedFuncs_); + + StaticLinkData::FuncPtrTable& table = staticLinkData_->funcPtrTables[index]; + MOZ_ASSERT(table.elemOffsets.length() == elemFuncIndices.length()); + + for (size_t i = 0; i < elemFuncIndices.length(); i++) + table.elemOffsets[i] = funcEntryOffsets_[elemFuncIndices[i]]; +} + +bool +ModuleGenerator::defineInlineStub(Offsets offsets) +{ + MOZ_ASSERT(finishedFuncs_); + return codeRanges_.emplaceBack(CodeRange::Inline, offsets); +} + +bool +ModuleGenerator::defineSyncInterruptStub(ProfilingOffsets offsets) +{ + MOZ_ASSERT(finishedFuncs_); + return codeRanges_.emplaceBack(CodeRange::Interrupt, offsets); +} + +bool +ModuleGenerator::defineAsyncInterruptStub(Offsets offsets) +{ + MOZ_ASSERT(finishedFuncs_); + staticLinkData_->pod.interruptOffset = offsets.begin; + return codeRanges_.emplaceBack(CodeRange::Inline, offsets); +} + +bool +ModuleGenerator::defineOutOfBoundsStub(Offsets offsets) +{ + MOZ_ASSERT(finishedFuncs_); + staticLinkData_->pod.outOfBoundsOffset = offsets.begin; + return codeRanges_.emplaceBack(CodeRange::Inline, offsets); +} + +Module* +ModuleGenerator::finish(Module::HeapBool usesHeap, + Module::SharedBool sharedHeap, + UniqueChars filename, + UniqueStaticLinkData* staticLinkData, + SlowFunctionVector* slowFuncs) +{ + MOZ_ASSERT(!activeFunc_); + MOZ_ASSERT(finishedFuncs_); + + if (!GenerateStubs(*this, usesHeap)) + return nullptr; + + masm_.finish(); + if (masm_.oom()) + return nullptr; + + // Start global data on a new page so JIT code may be given independent + // protection flags. Note assumption that global data starts right after + // code below. + uint32_t codeBytes = AlignBytes(masm_.bytesNeeded(), AsmJSPageSize); + + // Inflate the global bytes up to page size so that the total bytes are a + // page size (as required by the allocator functions). + globalBytes_ = AlignBytes(globalBytes_, AsmJSPageSize); + uint32_t totalBytes = codeBytes + globalBytes_; + + // Allocate the code (guarded by a UniquePtr until it is given to the Module). + UniqueCodePtr code = AllocateCode(cx_, totalBytes); + if (!code) + return nullptr; + + // Delay flushing until Module::dynamicallyLink. The flush-inhibited range + // is set by executableCopy. + AutoFlushICache afc("ModuleGenerator::finish", /* inhibit = */ true); + masm_.executableCopy(code.get()); + + // c.f. JitCode::copyFrom + MOZ_ASSERT(masm_.jumpRelocationTableBytes() == 0); + MOZ_ASSERT(masm_.dataRelocationTableBytes() == 0); + MOZ_ASSERT(masm_.preBarrierTableBytes() == 0); + MOZ_ASSERT(!masm_.hasSelfReference()); + + // Convert the CallSiteAndTargetVector (needed during generation) to a + // CallSiteVector (what is stored in the Module). + CallSiteVector callSites; + if (!callSites.appendAll(masm_.callSites())) + return nullptr; + + // Add links to absolute addresses identified symbolically. + StaticLinkData::SymbolicLinkArray& symbolicLinks = staticLinkData_->symbolicLinks; + for (size_t i = 0; i < masm_.numAsmJSAbsoluteAddresses(); i++) { + AsmJSAbsoluteAddress src = masm_.asmJSAbsoluteAddress(i); + if (!symbolicLinks[src.target].append(src.patchAt.offset())) + return nullptr; + } + + // Relative link metadata: absolute addresses that refer to another point within + // the asm.js module. + + // CodeLabels are used for switch cases and loads from floating-point / + // SIMD values in the constant pool. + for (size_t i = 0; i < masm_.numCodeLabels(); i++) { + CodeLabel cl = masm_.codeLabel(i); + StaticLinkData::InternalLink link(StaticLinkData::InternalLink::CodeLabel); + link.patchAtOffset = masm_.labelToPatchOffset(*cl.patchAt()); + link.targetOffset = cl.target()->offset(); + if (!staticLinkData_->internalLinks.append(link)) + return nullptr; + } + +#if defined(JS_CODEGEN_X86) + // Global data accesses in x86 need to be patched with the absolute + // address of the global. Globals are allocated sequentially after the + // code section so we can just use an InternalLink. + for (size_t i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) { + AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i); + StaticLinkData::InternalLink link(StaticLinkData::InternalLink::RawPointer); + link.patchAtOffset = masm_.labelToPatchOffset(a.patchAt); + link.targetOffset = codeBytes + a.globalDataOffset; + if (!staticLinkData_->internalLinks.append(link)) + return nullptr; + } +#endif + +#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) + // On MIPS we need to update all the long jumps because they contain an + // absolute adress. The values are correctly patched for the current address + // space, but not after serialization or profiling-mode toggling. + for (size_t i = 0; i < masm_.numLongJumps(); i++) { + size_t off = masm_.longJump(i); + StaticLinkData::InternalLink link(StaticLinkData::InternalLink::InstructionImmediate); + link.patchAtOffset = off; + link.targetOffset = Assembler::ExtractInstructionImmediate(code.get() + off) - + uintptr_t(code.get()); + if (!staticLinkData_->internalLinks.append(link)) + return nullptr; + } +#endif + +#if defined(JS_CODEGEN_X64) + // Global data accesses on x64 use rip-relative addressing and thus do + // not need patching after deserialization. + uint8_t* globalData = code.get() + codeBytes; + for (size_t i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) { + AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i); + masm_.patchAsmJSGlobalAccess(a.patchAt, code.get(), globalData, a.globalDataOffset); + } +#endif + + *staticLinkData = Move(staticLinkData_); + *slowFuncs = Move(slowFuncs_); + return cx_->new_(args_, + funcBytes_, + codeBytes, + globalBytes_, + usesHeap, + sharedHeap, + Move(code), + Move(imports_), + Move(exports_), + masm_.extractHeapAccesses(), + Move(codeRanges_), + Move(callSites), + Move(funcNames_), + Move(filename)); +} diff --git a/js/src/asmjs/WasmGenerator.h b/js/src/asmjs/WasmGenerator.h index 5e140428aa7..49b48992cd3 100644 --- a/js/src/asmjs/WasmGenerator.h +++ b/js/src/asmjs/WasmGenerator.h @@ -16,22 +16,21 @@ * limitations under the License. */ -#ifndef asmjs_wasm_generator_h -#define asmjs_wasm_generator_h +#ifndef wasm_generator_h +#define wasm_generator_h #include "asmjs/WasmIonCompile.h" -#include "asmjs/WasmStubs.h" +#include "asmjs/WasmIR.h" +#include "asmjs/WasmModule.h" #include "jit/MacroAssembler.h" namespace js { - -class AsmJSModule; -namespace fronted { class TokenStream; } - namespace wasm { class FunctionGenerator; +// A slow function describes a function that took longer than msThreshold to +// validate and compile. struct SlowFunction { SlowFunction(PropertyName* name, unsigned ms, unsigned line, unsigned column) @@ -45,7 +44,6 @@ struct SlowFunction unsigned line; unsigned column; }; - typedef Vector SlowFunctionVector; // A ModuleGenerator encapsulates the creation of a wasm module. During the @@ -55,19 +53,7 @@ typedef Vector SlowFunctionVector; // compilation and extract the resulting wasm module. class MOZ_STACK_CLASS ModuleGenerator { - public: - typedef Vector FuncIndexVector; - - private: - struct FuncPtrTable - { - uint32_t numDeclared; - FuncIndexVector elems; - - explicit FuncPtrTable(uint32_t numDeclared) : numDeclared(numDeclared) {} - FuncPtrTable(FuncPtrTable&& rhs) : numDeclared(rhs.numDeclared), elems(Move(rhs.elems)) {} - }; - typedef Vector FuncPtrTableVector; + typedef Vector FuncOffsetVector; struct SigHashPolicy { @@ -77,45 +63,95 @@ class MOZ_STACK_CLASS ModuleGenerator }; typedef HashSet SigSet; - ExclusiveContext* cx_; - ScopedJSDeletePtr module_; + ExclusiveContext* cx_; + CompileArgs args_; - LifoAlloc lifo_; - jit::TempAllocator alloc_; - jit::MacroAssembler masm_; - SigSet sigs_; + // Data handed over to the Module in finish() + uint32_t globalBytes_; + ImportVector imports_; + ExportVector exports_; + CodeRangeVector codeRanges_; + CacheableCharsVector funcNames_; - bool parallel_; - uint32_t outstanding_; - Vector tasks_; - Vector freeTasks_; + // Data handed back to the caller in finish() + UniqueStaticLinkData staticLinkData_; + SlowFunctionVector slowFuncs_; - FuncOffsetVector funcEntryOffsets_; - FuncPtrTableVector funcPtrTables_; + // Data scoped to the ModuleGenerator's lifetime + LifoAlloc lifo_; + jit::JitContext jcx_; + jit::TempAllocator alloc_; + jit::MacroAssembler masm_; + SigSet sigs_; - SlowFunctionVector slowFuncs_; - mozilla::DebugOnly active_; + // Parallel compilation + bool parallel_; + uint32_t outstanding_; + Vector tasks_; + Vector freeTasks_; + // Function compilation + uint32_t funcBytes_; + FuncOffsetVector funcEntryOffsets_; + DebugOnly activeFunc_; + DebugOnly finishedFuncs_; + + bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset); bool finishOutstandingTask(); - bool finishTask(CompileTask* task); - CompileArgs args() const; + bool finishTask(IonCompileTask* task); public: explicit ModuleGenerator(ExclusiveContext* cx); ~ModuleGenerator(); - bool init(ScriptSource* ss, uint32_t srcStart, uint32_t srcBodyStart, bool strict); - AsmJSModule& module() const { return *module_; } + bool init(); + + CompileArgs args() const { return args_; } + jit::MacroAssembler& masm() { return masm_; } + const FuncOffsetVector& funcEntryOffsets() const { return funcEntryOffsets_; } const LifoSig* newLifoSig(const MallocSig& sig); - bool declareFuncPtrTable(uint32_t numElems, uint32_t* funcPtrTableIndex); - bool defineFuncPtrTable(uint32_t funcPtrTableIndex, FuncIndexVector&& elems); + // Global data: + bool allocateGlobalVar(ValType type, uint32_t* globalDataOffset); + + // Imports: + bool declareImport(MallocSig&& sig, uint32_t* index); + uint32_t numDeclaredImports() const; + uint32_t importExitGlobalDataOffset(uint32_t index) const; + const MallocSig& importSig(uint32_t index) const; + bool defineImport(uint32_t index, ProfilingOffsets interpExit, ProfilingOffsets jitExit); + + // Exports: + bool declareExport(MallocSig&& sig, uint32_t funcIndex, uint32_t* index); + uint32_t numDeclaredExports() const; + uint32_t exportFuncIndex(uint32_t index) const; + const MallocSig& exportSig(uint32_t index) const; + bool defineExport(uint32_t index, Offsets offsets); + + // Functions: bool startFunc(PropertyName* name, unsigned line, unsigned column, FunctionGenerator* fg); bool finishFunc(uint32_t funcIndex, const LifoSig& sig, unsigned generateTime, FunctionGenerator* fg); + bool finishFuncs(); - bool finish(frontend::TokenStream& ts, ScopedJSDeletePtr* module, - SlowFunctionVector* slowFuncs); + // Function-pointer tables: + bool declareFuncPtrTable(uint32_t numElems, uint32_t* index); + uint32_t funcPtrTableGlobalDataOffset(uint32_t index) const; + void defineFuncPtrTable(uint32_t index, const Vector& elemFuncIndices); + + // Stubs: + bool defineInlineStub(Offsets offsets); + bool defineSyncInterruptStub(ProfilingOffsets offsets); + bool defineAsyncInterruptStub(Offsets offsets); + bool defineOutOfBoundsStub(Offsets offsets); + + // Null return indicates failure. The caller must immediately root a + // non-null return value. + Module* finish(Module::HeapBool usesHeap, + Module::SharedBool sharedHeap, + UniqueChars filename, + UniqueStaticLinkData* staticLinkData, + SlowFunctionVector* slowFuncs); }; // A FunctionGenerator encapsulates the generation of a single function body. @@ -128,7 +164,7 @@ class MOZ_STACK_CLASS FunctionGenerator friend class ModuleGenerator; ModuleGenerator* m_; - CompileTask* task_; + IonCompileTask* task_; FuncIR* func_; public: @@ -139,4 +175,4 @@ class MOZ_STACK_CLASS FunctionGenerator } // namespace wasm } // namespace js -#endif // asmjs_wasm_generator_h +#endif // wasm_generator_h diff --git a/js/src/asmjs/WasmIR.h b/js/src/asmjs/WasmIR.h index 14d9b0e2d81..7cf72f171e7 100644 --- a/js/src/asmjs/WasmIR.h +++ b/js/src/asmjs/WasmIR.h @@ -16,10 +16,10 @@ * limitations under the License. */ -#ifndef asmjs_wasm_ir_h -#define asmjs_wasm_ir_h +#ifndef wasm_ir_h +#define wasm_ir_h -#include "asmjs/Wasm.h" +#include "asmjs/WasmTypes.h" namespace js { @@ -434,8 +434,7 @@ class FuncIR typedef Vector> Bytecode; // Note: this unrooted field assumes AutoKeepAtoms via TokenStream via - // asm.js compilation. Wasm compilation will require an alternative way to - // name CodeRanges (index). + // asm.js compilation. PropertyName* name_; unsigned line_; unsigned column_; @@ -572,4 +571,4 @@ class FuncIR } // namespace wasm } // namespace js -#endif // asmjs_wasm_ir_h +#endif // wasm_ir_h diff --git a/js/src/asmjs/WasmIonCompile.cpp b/js/src/asmjs/WasmIonCompile.cpp index 09b4d2373f5..b8ab1095437 100644 --- a/js/src/asmjs/WasmIonCompile.cpp +++ b/js/src/asmjs/WasmIonCompile.cpp @@ -40,33 +40,30 @@ class FunctionCompiler typedef Vector PositionStack; typedef Vector LocalTypes; - CompileArgs args_; - const FuncIR& func_; - size_t pc_; + const FuncIR& func_; + size_t pc_; - TempAllocator& alloc_; - MIRGraph& graph_; - const CompileInfo& info_; - MIRGenerator& mirGen_; + TempAllocator& alloc_; + MIRGraph& graph_; + const CompileInfo& info_; + MIRGenerator& mirGen_; - MBasicBlock* curBlock_; + MBasicBlock* curBlock_; - PositionStack loopStack_; - PositionStack breakableStack_; - UnlabeledBlockMap unlabeledBreaks_; - UnlabeledBlockMap unlabeledContinues_; - LabeledBlockMap labeledBreaks_; - LabeledBlockMap labeledContinues_; + PositionStack loopStack_; + PositionStack breakableStack_; + UnlabeledBlockMap unlabeledBreaks_; + UnlabeledBlockMap unlabeledContinues_; + LabeledBlockMap labeledBreaks_; + LabeledBlockMap labeledContinues_; - LocalTypes localTypes_; + LocalTypes localTypes_; - FunctionCompileResults& compileResults_; + FuncCompileResults& compileResults_; public: - FunctionCompiler(CompileArgs args, const FuncIR& func, MIRGenerator& mirGen, - FunctionCompileResults& compileResults) - : args_(args), - func_(func), + FunctionCompiler(const FuncIR& func, MIRGenerator& mirGen, FuncCompileResults& compileResults) + : func_(func), pc_(0), alloc_(mirGen.alloc()), graph_(mirGen.graph()), @@ -770,7 +767,7 @@ class FunctionCompiler return callPrivate(MAsmJSCall::Callee(ptrFun), call, ret, def); } - bool builtinCall(Builtin builtin, const Call& call, ValType type, MDefinition** def) + bool builtinCall(SymbolicAddress builtin, const Call& call, ValType type, MDefinition** def) { return callPrivate(MAsmJSCall::Callee(builtin), call, ToExprType(type), def); } @@ -1648,7 +1645,7 @@ EmitMathBuiltinCall(FunctionCompiler& f, F32 f32, MDefinition** def) f.finishCallArgs(&call); - Builtin callee = f32 == F32::Ceil ? Builtin::CeilF : Builtin::FloorF; + SymbolicAddress callee = f32 == F32::Ceil ? SymbolicAddress::CeilF : SymbolicAddress::FloorF; return f.builtinCall(callee, call, ValType::F32, def); } @@ -1671,20 +1668,20 @@ EmitMathBuiltinCall(FunctionCompiler& f, F64 f64, MDefinition** def) return false; } - Builtin callee; + SymbolicAddress callee; switch (f64) { - case F64::Ceil: callee = Builtin::CeilD; break; - case F64::Floor: callee = Builtin::FloorD; break; - case F64::Sin: callee = Builtin::SinD; break; - case F64::Cos: callee = Builtin::CosD; break; - case F64::Tan: callee = Builtin::TanD; break; - case F64::Asin: callee = Builtin::ASinD; break; - case F64::Acos: callee = Builtin::ACosD; break; - case F64::Atan: callee = Builtin::ATanD; break; - case F64::Exp: callee = Builtin::ExpD; break; - case F64::Log: callee = Builtin::LogD; break; - case F64::Pow: callee = Builtin::PowD; break; - case F64::Atan2: callee = Builtin::ATan2D; break; + case F64::Ceil: callee = SymbolicAddress::CeilD; break; + case F64::Floor: callee = SymbolicAddress::FloorD; break; + case F64::Sin: callee = SymbolicAddress::SinD; break; + case F64::Cos: callee = SymbolicAddress::CosD; break; + case F64::Tan: callee = SymbolicAddress::TanD; break; + case F64::Asin: callee = SymbolicAddress::ASinD; break; + case F64::Acos: callee = SymbolicAddress::ACosD; break; + case F64::Atan: callee = SymbolicAddress::ATanD; break; + case F64::Exp: callee = SymbolicAddress::ExpD; break; + case F64::Log: callee = SymbolicAddress::LogD; break; + case F64::Pow: callee = SymbolicAddress::PowD; break; + case F64::Atan2: callee = SymbolicAddress::ATan2D; break; default: MOZ_CRASH("unexpected double math builtin callee"); } @@ -3046,26 +3043,25 @@ EmitB32X4Expr(FunctionCompiler& f, MDefinition** def) } bool -wasm::CompileFunction(CompileTask* task) +wasm::IonCompileFunction(IonCompileTask* task) { int64_t before = PRMJ_Now(); - CompileArgs args = task->args(); const FuncIR& func = task->func(); - FunctionCompileResults& results = task->results(); + FuncCompileResults& results = task->results(); - JitContext jitContext(CompileRuntime::get(args.runtime), &results.alloc()); + JitContext jitContext(CompileRuntime::get(task->runtime()), &results.alloc()); const JitCompileOptions options; MIRGraph graph(&results.alloc()); CompileInfo compileInfo(func.numLocals()); MIRGenerator mir(nullptr, options, &results.alloc(), &graph, &compileInfo, IonOptimizations.get(OptimizationLevel::AsmJS), - args.usesSignalHandlersForOOB); + task->args().useSignalHandlersForOOB); // Build MIR graph { - FunctionCompiler f(args, func, mir, results); + FunctionCompiler f(func, mir, results); if (!f.init()) return false; diff --git a/js/src/asmjs/WasmIonCompile.h b/js/src/asmjs/WasmIonCompile.h index 1331774ca4b..fba424c003e 100644 --- a/js/src/asmjs/WasmIonCompile.h +++ b/js/src/asmjs/WasmIonCompile.h @@ -16,60 +16,69 @@ * limitations under the License. */ -#ifndef asmjs_wasm_ion_compile_h -#define asmjs_wasm_ion_compile_h +#ifndef wasm_ion_compile_h +#define wasm_ion_compile_h -#include "asmjs/AsmJSFrameIterator.h" -#include "asmjs/WasmCompileArgs.h" #include "asmjs/WasmIR.h" #include "jit/MacroAssembler.h" namespace js { namespace wasm { -class FunctionCompileResults +// The FuncCompileResults contains the results of compiling a single function +// body, ready to be merged into the whole-module MacroAssembler. +class FuncCompileResults { jit::TempAllocator alloc_; jit::MacroAssembler masm_; - AsmJSFunctionOffsets offsets_; + FuncOffsets offsets_; unsigned compileTime_; - FunctionCompileResults(const FunctionCompileResults&) = delete; - FunctionCompileResults& operator=(const FunctionCompileResults&) = delete; + FuncCompileResults(const FuncCompileResults&) = delete; + FuncCompileResults& operator=(const FuncCompileResults&) = delete; public: - explicit FunctionCompileResults(LifoAlloc& lifo) + explicit FuncCompileResults(LifoAlloc& lifo) : alloc_(&lifo), - masm_(jit::MacroAssembler::AsmJSToken(), &alloc_), + masm_(jit::MacroAssembler::AsmJSToken(), alloc_), compileTime_(0) {} jit::TempAllocator& alloc() { return alloc_; } jit::MacroAssembler& masm() { return masm_; } - - AsmJSFunctionOffsets& offsets() { return offsets_; } - const AsmJSFunctionOffsets& offsets() const { return offsets_; } + FuncOffsets& offsets() { return offsets_; } void setCompileTime(unsigned t) { MOZ_ASSERT(!compileTime_); compileTime_ = t; } unsigned compileTime() const { return compileTime_; } }; -class CompileTask +// An IonCompileTask represents the task of compiling a single function body. An +// IonCompileTask is filled with the wasm code to be compiled on the main +// validation thread, sent off to an Ion compilation helper thread which creates +// the FuncCompileResults, and finally sent back to the validation thread. To +// save time allocating and freeing memory, IonCompileTasks are reset() and +// reused. +class IonCompileTask { - LifoAlloc lifo_; + JSRuntime* const runtime_; const CompileArgs args_; + LifoAlloc lifo_; const FuncIR* func_; - mozilla::Maybe results_; + mozilla::Maybe results_; - CompileTask(const CompileTask&) = delete; - CompileTask& operator=(const CompileTask&) = delete; + IonCompileTask(const IonCompileTask&) = delete; + IonCompileTask& operator=(const IonCompileTask&) = delete; public: - CompileTask(size_t defaultChunkSize, CompileArgs args) - : lifo_(defaultChunkSize), + IonCompileTask(JSRuntime* runtime, CompileArgs args, size_t defaultChunkSize) + : runtime_(runtime), args_(args), + lifo_(defaultChunkSize), func_(nullptr) {} + JSRuntime* runtime() const { + return runtime_; + } LifoAlloc& lifo() { return lifo_; } @@ -84,7 +93,7 @@ class CompileTask MOZ_ASSERT(func_); return *func_; } - FunctionCompileResults& results() { + FuncCompileResults& results() { return *results_; } void reset() { @@ -95,9 +104,9 @@ class CompileTask }; bool -CompileFunction(CompileTask* task); +IonCompileFunction(IonCompileTask* task); } // namespace wasm } // namespace js -#endif // asmjs_wasm_ion_compile_h +#endif // wasm_ion_compile_h diff --git a/js/src/asmjs/WasmModule.cpp b/js/src/asmjs/WasmModule.cpp new file mode 100644 index 00000000000..0f9281c1bd9 --- /dev/null +++ b/js/src/asmjs/WasmModule.cpp @@ -0,0 +1,1368 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * + * Copyright 2015 Mozilla Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asmjs/WasmModule.h" + +#include "mozilla/BinarySearch.h" +#include "mozilla/EnumeratedRange.h" +#include "mozilla/PodOperations.h" + +#include "jsprf.h" + +#include "asmjs/AsmJSValidate.h" +#include "asmjs/WasmSerialize.h" +#include "builtin/AtomicsObject.h" +#ifdef JS_ION_PERF +# include "jit/PerfSpewer.h" +#endif +#include "jit/BaselineJIT.h" +#include "jit/ExecutableAllocator.h" +#include "js/MemoryMetrics.h" +#ifdef MOZ_VTUNE +# include "vtune/VTuneWrapper.h" +#endif + +#include "jit/MacroAssembler-inl.h" +#include "vm/ArrayBufferObject-inl.h" +#include "vm/TypeInference-inl.h" + +using namespace js; +using namespace js::jit; +using namespace js::wasm; +using mozilla::BinarySearch; +using mozilla::MakeEnumeratedRange; +using mozilla::PodZero; +using mozilla::Swap; +using JS::GenericNaN; + +UniqueCodePtr +wasm::AllocateCode(ExclusiveContext* cx, size_t bytes) +{ + // On most platforms, this will allocate RWX memory. On iOS, or when + // --non-writable-jitcode is used, this will allocate RW memory. In this + // case, DynamicallyLinkModule will reprotect the code as RX. + unsigned permissions = + ExecutableAllocator::initialProtectionFlags(ExecutableAllocator::Writable); + + void* p = AllocateExecutableMemory(nullptr, bytes, permissions, "asm-js-code", AsmJSPageSize); + if (!p) + ReportOutOfMemory(cx); + + MOZ_ASSERT(uintptr_t(p) % AsmJSPageSize == 0); + + return UniqueCodePtr((uint8_t*)p, CodeDeleter(bytes)); +} + +void +CodeDeleter::operator()(uint8_t* p) +{ + DeallocateExecutableMemory(p, bytes_, AsmJSPageSize); +} + +#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) +// On MIPS, CodeLabels are instruction immediates so InternalLinks only +// patch instruction immediates. +StaticLinkData::InternalLink::InternalLink(Kind kind) +{ + MOZ_ASSERT(kind == CodeLabel || kind == InstructionImmediate); +} + +bool +StaticLinkData::InternalLink::isRawPointerPatch() +{ + return false; +} +#else +// On the rest, CodeLabels are raw pointers so InternalLinks only patch +// raw pointers. +StaticLinkData::InternalLink::InternalLink(Kind kind) +{ + MOZ_ASSERT(kind == CodeLabel || kind == RawPointer); +} + +bool +StaticLinkData::InternalLink::isRawPointerPatch() +{ + return true; +} +#endif + +size_t +StaticLinkData::SymbolicLinkArray::serializedSize() const +{ + size_t size = 0; + for (const OffsetVector& offsets : *this) + size += SerializedPodVectorSize(offsets); + return size; +} + +uint8_t* +StaticLinkData::SymbolicLinkArray::serialize(uint8_t* cursor) const +{ + for (const OffsetVector& offsets : *this) + cursor = SerializePodVector(cursor, offsets); + return cursor; +} + +const uint8_t* +StaticLinkData::SymbolicLinkArray::deserialize(ExclusiveContext* cx, const uint8_t* cursor) +{ + for (OffsetVector& offsets : *this) { + cursor = DeserializePodVector(cx, cursor, &offsets); + if (!cursor) + return nullptr; + } + return cursor; +} + +bool +StaticLinkData::SymbolicLinkArray::clone(JSContext* cx, SymbolicLinkArray* out) const +{ + for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) { + if (!ClonePodVector(cx, (*this)[imm], &(*out)[imm])) + return false; + } + return true; +} + +size_t +StaticLinkData::SymbolicLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const +{ + size_t size = 0; + for (const OffsetVector& offsets : *this) + size += offsets.sizeOfExcludingThis(mallocSizeOf); + return size; +} + +size_t +StaticLinkData::FuncPtrTable::serializedSize() const +{ + return sizeof(globalDataOffset) + + SerializedPodVectorSize(elemOffsets); +} + +uint8_t* +StaticLinkData::FuncPtrTable::serialize(uint8_t* cursor) const +{ + cursor = WriteBytes(cursor, &globalDataOffset, sizeof(globalDataOffset)); + cursor = SerializePodVector(cursor, elemOffsets); + return cursor; +} + +const uint8_t* +StaticLinkData::FuncPtrTable::deserialize(ExclusiveContext* cx, const uint8_t* cursor) +{ + (cursor = ReadBytes(cursor, &globalDataOffset, sizeof(globalDataOffset))) && + (cursor = DeserializePodVector(cx, cursor, &elemOffsets)); + return cursor; +} + +bool +StaticLinkData::FuncPtrTable::clone(JSContext* cx, FuncPtrTable* out) const +{ + out->globalDataOffset = globalDataOffset; + return ClonePodVector(cx, elemOffsets, &out->elemOffsets); +} + +size_t +StaticLinkData::FuncPtrTable::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const +{ + return elemOffsets.sizeOfExcludingThis(mallocSizeOf); +} + +size_t +StaticLinkData::serializedSize() const +{ + return sizeof(pod) + + SerializedPodVectorSize(internalLinks) + + symbolicLinks.serializedSize() + + SerializedVectorSize(funcPtrTables); +} + +uint8_t* +StaticLinkData::serialize(uint8_t* cursor) const +{ + cursor = WriteBytes(cursor, &pod, sizeof(pod)); + cursor = SerializePodVector(cursor, internalLinks); + cursor = symbolicLinks.serialize(cursor); + cursor = SerializeVector(cursor, funcPtrTables); + return cursor; +} + +const uint8_t* +StaticLinkData::deserialize(ExclusiveContext* cx, const uint8_t* cursor) +{ + (cursor = ReadBytes(cursor, &pod, sizeof(pod))) && + (cursor = DeserializePodVector(cx, cursor, &internalLinks)) && + (cursor = symbolicLinks.deserialize(cx, cursor)) && + (cursor = DeserializeVector(cx, cursor, &funcPtrTables)); + return cursor; +} + +bool +StaticLinkData::clone(JSContext* cx, StaticLinkData* out) const +{ + out->pod = pod; + return ClonePodVector(cx, internalLinks, &out->internalLinks) && + symbolicLinks.clone(cx, &out->symbolicLinks) && + CloneVector(cx, funcPtrTables, &out->funcPtrTables); +} + +size_t +StaticLinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const +{ + size_t size = internalLinks.sizeOfExcludingThis(mallocSizeOf) + + symbolicLinks.sizeOfExcludingThis(mallocSizeOf) + + SizeOfVectorExcludingThis(funcPtrTables, mallocSizeOf); + + for (const OffsetVector& offsets : symbolicLinks) + size += offsets.sizeOfExcludingThis(mallocSizeOf); + + return size; +} + +static size_t +SerializedSigSize(const MallocSig& sig) +{ + return sizeof(ExprType) + + SerializedPodVectorSize(sig.args()); +} + +static uint8_t* +SerializeSig(uint8_t* cursor, const MallocSig& sig) +{ + cursor = WriteScalar(cursor, sig.ret()); + cursor = SerializePodVector(cursor, sig.args()); + return cursor; +} + +static const uint8_t* +DeserializeSig(ExclusiveContext* cx, const uint8_t* cursor, MallocSig* sig) +{ + ExprType ret; + cursor = ReadScalar(cursor, &ret); + + MallocSig::ArgVector args; + cursor = DeserializePodVector(cx, cursor, &args); + if (!cursor) + return nullptr; + + sig->init(Move(args), ret); + return cursor; +} + +static bool +CloneSig(JSContext* cx, const MallocSig& sig, MallocSig* out) +{ + MallocSig::ArgVector args; + if (!ClonePodVector(cx, sig.args(), &args)) + return false; + + out->init(Move(args), sig.ret()); + return true; +} + +static size_t +SizeOfSigExcludingThis(const MallocSig& sig, MallocSizeOf mallocSizeOf) +{ + return sig.args().sizeOfExcludingThis(mallocSizeOf); +} + +size_t +Export::serializedSize() const +{ + return SerializedSigSize(sig_) + + sizeof(pod); +} + +uint8_t* +Export::serialize(uint8_t* cursor) const +{ + cursor = SerializeSig(cursor, sig_); + cursor = WriteBytes(cursor, &pod, sizeof(pod)); + return cursor; +} + +const uint8_t* +Export::deserialize(ExclusiveContext* cx, const uint8_t* cursor) +{ + (cursor = DeserializeSig(cx, cursor, &sig_)) && + (cursor = ReadBytes(cursor, &pod, sizeof(pod))); + return cursor; +} + +bool +Export::clone(JSContext* cx, Export* out) const +{ + out->pod = pod; + return CloneSig(cx, sig_, &out->sig_); +} + +size_t +Export::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const +{ + return SizeOfSigExcludingThis(sig_, mallocSizeOf); +} + +size_t +Import::serializedSize() const +{ + return SerializedSigSize(sig_) + + sizeof(pod); +} + +uint8_t* +Import::serialize(uint8_t* cursor) const +{ + cursor = SerializeSig(cursor, sig_); + cursor = WriteBytes(cursor, &pod, sizeof(pod)); + return cursor; +} + +const uint8_t* +Import::deserialize(ExclusiveContext* cx, const uint8_t* cursor) +{ + (cursor = DeserializeSig(cx, cursor, &sig_)) && + (cursor = ReadBytes(cursor, &pod, sizeof(pod))); + return cursor; +} + +bool +Import::clone(JSContext* cx, Import* out) const +{ + out->pod = pod; + return CloneSig(cx, sig_, &out->sig_); +} + +size_t +Import::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const +{ + return SizeOfSigExcludingThis(sig_, mallocSizeOf); +} + +CodeRange::CodeRange(Kind kind, Offsets offsets) + : nameIndex_(0), + lineNumber_(0), + begin_(offsets.begin), + profilingReturn_(0), + end_(offsets.end) +{ + PodZero(&u); // zero padding for Valgrind + u.kind_ = kind; + + MOZ_ASSERT(begin_ <= end_); + MOZ_ASSERT(u.kind_ == Entry || u.kind_ == Inline); +} + +CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets) + : nameIndex_(0), + lineNumber_(0), + begin_(offsets.begin), + profilingReturn_(offsets.profilingReturn), + end_(offsets.end) +{ + PodZero(&u); // zero padding for Valgrind + u.kind_ = kind; + + MOZ_ASSERT(begin_ < profilingReturn_); + MOZ_ASSERT(profilingReturn_ < end_); + MOZ_ASSERT(u.kind_ == ImportJitExit || u.kind_ == ImportInterpExit || u.kind_ == Interrupt); +} + +CodeRange::CodeRange(uint32_t nameIndex, uint32_t lineNumber, FuncOffsets offsets) + : nameIndex_(nameIndex), + lineNumber_(lineNumber) +{ + PodZero(&u); // zero padding for Valgrind + u.kind_ = Function; + + MOZ_ASSERT(offsets.nonProfilingEntry - offsets.begin <= UINT8_MAX); + begin_ = offsets.begin; + u.func.beginToEntry_ = offsets.nonProfilingEntry - begin_; + + MOZ_ASSERT(offsets.nonProfilingEntry < offsets.profilingReturn); + MOZ_ASSERT(offsets.profilingReturn - offsets.profilingJump <= UINT8_MAX); + MOZ_ASSERT(offsets.profilingReturn - offsets.profilingEpilogue <= UINT8_MAX); + profilingReturn_ = offsets.profilingReturn; + u.func.profilingJumpToProfilingReturn_ = profilingReturn_ - offsets.profilingJump; + u.func.profilingEpilogueToProfilingReturn_ = profilingReturn_ - offsets.profilingEpilogue; + + MOZ_ASSERT(offsets.nonProfilingEntry < offsets.end); + end_ = offsets.end; +} + +size_t +CacheableChars::serializedSize() const +{ + return sizeof(uint32_t) + strlen(get()); +} + +uint8_t* +CacheableChars::serialize(uint8_t* cursor) const +{ + uint32_t length = strlen(get()); + cursor = WriteBytes(cursor, &length, sizeof(uint32_t)); + cursor = WriteBytes(cursor, get(), length); + return cursor; +} + +const uint8_t* +CacheableChars::deserialize(ExclusiveContext* cx, const uint8_t* cursor) +{ + uint32_t length; + cursor = ReadBytes(cursor, &length, sizeof(uint32_t)); + + reset(js_pod_calloc(length + 1)); + if (!get()) + return nullptr; + + cursor = ReadBytes(cursor, get(), length); + return cursor; +} + +bool +CacheableChars::clone(JSContext* cx, CacheableChars* out) const +{ + *out = make_string_copy(get()); + return !!*out; +} + +class Module::AutoMutateCode +{ + AutoWritableJitCode awjc_; + AutoFlushICache afc_; + + public: + AutoMutateCode(JSContext* cx, Module& module, const char* name) + : awjc_(cx->runtime(), module.code(), module.pod.codeBytes_), + afc_(name) + { + AutoFlushICache::setRange(uintptr_t(module.code()), module.pod.codeBytes_); + } +}; + +uint32_t +Module::totalBytes() const +{ + return pod.codeBytes_ + pod.globalBytes_; +} + +uint8_t* +Module::rawHeapPtr() const +{ + return const_cast(this)->rawHeapPtr(); +} + +uint8_t*& +Module::rawHeapPtr() +{ + return *(uint8_t**)(globalData() + HeapGlobalDataOffset); +} + +void +Module::specializeToHeap(ArrayBufferObjectMaybeShared* heap) +{ + MOZ_ASSERT_IF(heap->is(), heap->as().isAsmJS()); + MOZ_ASSERT(!maybeHeap_); + MOZ_ASSERT(!rawHeapPtr()); + + uint8_t* ptrBase = heap->dataPointerEither().unwrap(/*safe - protected by Module methods*/); + uint32_t heapLength = heap->byteLength(); +#if defined(JS_CODEGEN_X86) + // An access is out-of-bounds iff + // ptr + offset + data-type-byte-size > heapLength + // i.e. ptr > heapLength - data-type-byte-size - offset. data-type-byte-size + // and offset are already included in the addend so we + // just have to add the heap length here. + for (const HeapAccess& access : heapAccesses_) { + if (access.hasLengthCheck()) + X86Encoding::AddInt32(access.patchLengthAt(code()), heapLength); + void* addr = access.patchHeapPtrImmAt(code()); + uint32_t disp = reinterpret_cast(X86Encoding::GetPointer(addr)); + MOZ_ASSERT(disp <= INT32_MAX); + X86Encoding::SetPointer(addr, (void*)(ptrBase + disp)); + } +#elif defined(JS_CODEGEN_X64) + // Even with signal handling being used for most bounds checks, there may be + // atomic operations that depend on explicit checks. + // + // If we have any explicit bounds checks, we need to patch the heap length + // checks at the right places. All accesses that have been recorded are the + // only ones that need bound checks (see also + // CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap) + for (const HeapAccess& access : heapAccesses_) { + // See comment above for x86 codegen. + if (access.hasLengthCheck()) + X86Encoding::AddInt32(access.patchLengthAt(code()), heapLength); + } +#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ + defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) + for (const HeapAccess& access : heapAccesses_) + Assembler::UpdateBoundsCheck(heapLength, (Instruction*)(access.insnOffset() + code())); +#endif + + maybeHeap_ = heap; + rawHeapPtr() = ptrBase; +} + +void +Module::despecializeFromHeap(ArrayBufferObjectMaybeShared* heap) +{ + MOZ_ASSERT_IF(maybeHeap_, maybeHeap_ == heap); + MOZ_ASSERT_IF(rawHeapPtr(), rawHeapPtr() == heap->dataPointerEither().unwrap()); + +#if defined(JS_CODEGEN_X86) + uint32_t heapLength = heap->byteLength(); + uint8_t* ptrBase = heap->dataPointerEither().unwrap(/*safe - used for value*/); + for (unsigned i = 0; i < heapAccesses_.length(); i++) { + const HeapAccess& access = heapAccesses_[i]; + if (access.hasLengthCheck()) + X86Encoding::AddInt32(access.patchLengthAt(code()), -heapLength); + void* addr = access.patchHeapPtrImmAt(code()); + uint8_t* ptr = reinterpret_cast(X86Encoding::GetPointer(addr)); + MOZ_ASSERT(ptr >= ptrBase); + X86Encoding::SetPointer(addr, reinterpret_cast(ptr - ptrBase)); + } +#elif defined(JS_CODEGEN_X64) + uint32_t heapLength = heap->byteLength(); + for (unsigned i = 0; i < heapAccesses_.length(); i++) { + const HeapAccess& access = heapAccesses_[i]; + if (access.hasLengthCheck()) + X86Encoding::AddInt32(access.patchLengthAt(code()), -heapLength); + } +#endif + + maybeHeap_ = nullptr; + rawHeapPtr() = nullptr; +} + +void +Module::sendCodeRangesToProfiler(JSContext* cx) +{ +#ifdef JS_ION_PERF + if (PerfFuncEnabled()) { + for (const CodeRange& codeRange : codeRanges_) { + if (!codeRange.isFunction()) + continue; + + uintptr_t start = uintptr_t(code() + codeRange.begin()); + uintptr_t end = uintptr_t(code() + codeRange.end()); + uintptr_t size = end - start; + const char* file = filename_.get(); + unsigned line = codeRange.funcLineNumber(); + unsigned column = 0; + const char* name = funcNames_[codeRange.funcNameIndex()].get(); + + writePerfSpewerAsmJSFunctionMap(start, size, file, line, column, name); + } + } +#endif +#ifdef MOZ_VTUNE + if (IsVTuneProfilingActive()) { + for (const CodeRange& codeRange : codeRanges_) { + if (!codeRange.isFunction()) + continue; + + uintptr_t start = uintptr_t(code() + codeRange.begin()); + uintptr_t end = uintptr_t(code() + codeRange.end()); + uintptr_t size = end - start; + const char* name = funcNames_[codeRange.funcNameIndex()].get(); + + unsigned method_id = iJIT_GetNewMethodID(); + if (method_id == 0) + return; + iJIT_Method_Load method; + method.method_id = method_id; + method.method_name = const_cast(name); + method.method_load_address = (void*)start; + method.method_size = size; + method.line_number_size = 0; + method.line_number_table = nullptr; + method.class_id = 0; + method.class_file_name = nullptr; + method.source_file_name = nullptr; + iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, (void*)&method); + } + } +#endif +} + +Module::ImportExit& +Module::importToExit(const Import& import) +{ + return *reinterpret_cast(globalData() + import.exitGlobalDataOffset()); +} + +/* static */ Module::CacheablePod +Module::zeroPod() +{ + CacheablePod pod = {0, 0, 0, false, false, false, false}; + return pod; +} + +void +Module::init() +{ + staticallyLinked_ = false; + interrupt_ = nullptr; + outOfBounds_ = nullptr; + dynamicallyLinked_ = false; + prev_ = nullptr; + next_ = nullptr; + interrupted_ = false; + + *(double*)(globalData() + NaN64GlobalDataOffset) = GenericNaN(); + *(float*)(globalData() + NaN32GlobalDataOffset) = GenericNaN(); +} + +// Private constructor used for deserialization and cloning. +Module::Module(const CacheablePod& pod, + UniqueCodePtr code, + ImportVector&& imports, + ExportVector&& exports, + HeapAccessVector&& heapAccesses, + CodeRangeVector&& codeRanges, + CallSiteVector&& callSites, + CacheableCharsVector&& funcNames, + CacheableChars filename, + CacheBool loadedFromCache, + ProfilingBool profilingEnabled, + FuncLabelVector&& funcLabels) + : pod(pod), + code_(Move(code)), + imports_(Move(imports)), + exports_(Move(exports)), + heapAccesses_(Move(heapAccesses)), + codeRanges_(Move(codeRanges)), + callSites_(Move(callSites)), + funcNames_(Move(funcNames)), + filename_(Move(filename)), + loadedFromCache_(loadedFromCache), + profilingEnabled_(profilingEnabled), + funcLabels_(Move(funcLabels)) +{ + MOZ_ASSERT_IF(!profilingEnabled, funcLabels_.empty()); + MOZ_ASSERT_IF(profilingEnabled, funcNames_.length() == funcLabels_.length()); + init(); +} + +// Public constructor for compilation. +Module::Module(CompileArgs args, + uint32_t functionBytes, + uint32_t codeBytes, + uint32_t globalBytes, + HeapBool usesHeap, + SharedBool sharedHeap, + UniqueCodePtr code, + ImportVector&& imports, + ExportVector&& exports, + HeapAccessVector&& heapAccesses, + CodeRangeVector&& codeRanges, + CallSiteVector&& callSites, + CacheableCharsVector&& funcNames, + CacheableChars filename) + : pod(zeroPod()), + code_(Move(code)), + imports_(Move(imports)), + exports_(Move(exports)), + heapAccesses_(Move(heapAccesses)), + codeRanges_(Move(codeRanges)), + callSites_(Move(callSites)), + funcNames_(Move(funcNames)), + filename_(Move(filename)), + loadedFromCache_(false), + profilingEnabled_(false) +{ + // Work around MSVC 2013 bug around {} member initialization. + const_cast(pod.functionBytes_) = functionBytes; + const_cast(pod.codeBytes_) = codeBytes; + const_cast(pod.globalBytes_) = globalBytes; + const_cast(pod.usesHeap_) = bool(usesHeap); + const_cast(pod.sharedHeap_) = bool(sharedHeap); + const_cast(pod.usesSignalHandlersForOOB_) = args.useSignalHandlersForOOB; + const_cast(pod.usesSignalHandlersForInterrupt_) = args.useSignalHandlersForInterrupt; + + MOZ_ASSERT_IF(sharedHeap, usesHeap); + init(); +} + +Module::~Module() +{ + MOZ_ASSERT(!interrupted_); + + if (code_) { + for (unsigned i = 0; i < imports_.length(); i++) { + ImportExit& exit = importToExit(imports_[i]); + if (exit.baselineScript) + exit.baselineScript->removeDependentWasmModule(*this, i); + } + } + + if (prev_) + *prev_ = next_; + if (next_) + next_->prev_ = prev_; +} + +void +Module::trace(JSTracer* trc) +{ + for (const Import& import : imports_) { + if (importToExit(import).fun) + TraceEdge(trc, &importToExit(import).fun, "wasm function import"); + } + + if (maybeHeap_) + TraceEdge(trc, &maybeHeap_, "wasm buffer"); +} + +CompileArgs +Module::compileArgs() const +{ + CompileArgs args; + args.useSignalHandlersForOOB = pod.usesSignalHandlersForOOB_; + args.useSignalHandlersForInterrupt = pod.usesSignalHandlersForInterrupt_; + return args; +} + +bool +Module::containsFunctionPC(void* pc) const +{ + return pc >= code() && pc < (code() + pod.functionBytes_); +} + +bool +Module::containsCodePC(void* pc) const +{ + return pc >= code() && pc < (code() + pod.codeBytes_); +} + +struct CallSiteRetAddrOffset +{ + const CallSiteVector& callSites; + explicit CallSiteRetAddrOffset(const CallSiteVector& callSites) : callSites(callSites) {} + uint32_t operator[](size_t index) const { + return callSites[index].returnAddressOffset(); + } +}; + +const CallSite* +Module::lookupCallSite(void* returnAddress) const +{ + uint32_t target = ((uint8_t*)returnAddress) - code(); + size_t lowerBound = 0; + size_t upperBound = callSites_.length(); + + size_t match; + if (!BinarySearch(CallSiteRetAddrOffset(callSites_), lowerBound, upperBound, target, &match)) + return nullptr; + + return &callSites_[match]; +} + +const CodeRange* +Module::lookupCodeRange(void* pc) const +{ + CodeRange::PC target((uint8_t*)pc - code()); + size_t lowerBound = 0; + size_t upperBound = codeRanges_.length(); + + size_t match; + if (!BinarySearch(codeRanges_, lowerBound, upperBound, target, &match)) + return nullptr; + + return &codeRanges_[match]; +} + +struct HeapAccessOffset +{ + const HeapAccessVector& accesses; + explicit HeapAccessOffset(const HeapAccessVector& accesses) : accesses(accesses) {} + uintptr_t operator[](size_t index) const { + return accesses[index].insnOffset(); + } +}; + +const HeapAccess* +Module::lookupHeapAccess(void* pc) const +{ + MOZ_ASSERT(containsFunctionPC(pc)); + + uint32_t target = ((uint8_t*)pc) - code(); + size_t lowerBound = 0; + size_t upperBound = heapAccesses_.length(); + + size_t match; + if (!BinarySearch(HeapAccessOffset(heapAccesses_), lowerBound, upperBound, target, &match)) + return nullptr; + + return &heapAccesses_[match]; +} + +bool +Module::staticallyLink(ExclusiveContext* cx, const StaticLinkData& linkData) +{ + MOZ_ASSERT(!dynamicallyLinked_); + MOZ_ASSERT(!staticallyLinked_); + staticallyLinked_ = true; + + // Push a JitContext for benefit of IsCompilingAsmJS and delay flushing + // until Module::dynamicallyLink. + JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread())); + MOZ_ASSERT(IsCompilingAsmJS()); + AutoFlushICache afc("Module::staticallyLink", /* inhibit = */ true); + AutoFlushICache::setRange(uintptr_t(code()), pod.codeBytes_); + + interrupt_ = code() + linkData.pod.interruptOffset; + outOfBounds_ = code() + linkData.pod.outOfBoundsOffset; + + for (StaticLinkData::InternalLink link : linkData.internalLinks) { + uint8_t* patchAt = code() + link.patchAtOffset; + void* target = code() + link.targetOffset; + if (profilingEnabled_) { + const CodeRange* codeRange = lookupCodeRange(target); + if (codeRange && codeRange->isFunction()) + target = code() + codeRange->funcProfilingEntry(); + } + if (link.isRawPointerPatch()) + *(void**)(patchAt) = target; + else + Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target)); + } + + for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) { + const StaticLinkData::OffsetVector& offsets = linkData.symbolicLinks[imm]; + for (size_t i = 0; i < offsets.length(); i++) { + uint8_t* patchAt = code() + offsets[i]; + void* target = AddressOf(imm, cx); + Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt), + PatchedImmPtr(target), + PatchedImmPtr((void*)-1)); + } + } + + for (const StaticLinkData::FuncPtrTable& table : linkData.funcPtrTables) { + auto array = reinterpret_cast(globalData() + table.globalDataOffset); + for (size_t i = 0; i < table.elemOffsets.length(); i++) { + uint8_t* elem = code() + table.elemOffsets[i]; + if (profilingEnabled_) + elem = code() + lookupCodeRange(elem)->funcProfilingEntry(); + array[i] = elem; + } + } + + // CodeRangeVector, CallSiteVector and the code technically have all the + // necessary info to do all the updates necessary in setProfilingEnabled. + // However, to simplify the finding of function-pointer table sizes and + // global-data offsets, save just that information here. + + if (!funcPtrTables_.appendAll(linkData.funcPtrTables)) { + ReportOutOfMemory(cx); + return false; + } + + return true; +} + +bool +Module::dynamicallyLink(JSContext* cx, Handle heap, + const AutoVectorRooter& imports) +{ + MOZ_ASSERT(staticallyLinked_); + MOZ_ASSERT(!dynamicallyLinked_); + dynamicallyLinked_ = true; + + // Add this module to the JSRuntime-wide list of dynamically-linked modules. + next_ = cx->runtime()->linkedWasmModules; + prev_ = &cx->runtime()->linkedWasmModules; + cx->runtime()->linkedWasmModules = this; + if (next_) + next_->prev_ = &next_; + + // Push a JitContext for benefit of IsCompilingAsmJS and flush the ICache. + // We've been inhibiting flushing up to this point so flush it all now. + JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread())); + MOZ_ASSERT(IsCompilingAsmJS()); + AutoFlushICache afc("Module::dynamicallyLink"); + AutoFlushICache::setRange(uintptr_t(code()), pod.codeBytes_); + + // Initialize imports with actual imported values. + MOZ_ASSERT(imports.length() == imports_.length()); + for (size_t i = 0; i < imports_.length(); i++) { + const Import& import = imports_[i]; + ImportExit& exit = importToExit(import); + exit.code = code() + import.interpExitCodeOffset(); + exit.fun = imports[i]; + exit.baselineScript = nullptr; + } + + // Specialize code to the actual heap. + if (heap) + specializeToHeap(heap); + + // See AllocateCode comment above. + ExecutableAllocator::makeExecutable(code(), pod.codeBytes_); + + sendCodeRangesToProfiler(cx); + return true; +} + +Module* +Module::nextLinked() const +{ + MOZ_ASSERT(dynamicallyLinked_); + return next_; +} + +ArrayBufferObjectMaybeShared* +Module::maybeBuffer() const +{ + MOZ_ASSERT(dynamicallyLinked_); + return maybeHeap_; +} + +SharedMem +Module::maybeHeap() const +{ + MOZ_ASSERT(dynamicallyLinked_); + MOZ_ASSERT_IF(!pod.usesHeap_, rawHeapPtr() == nullptr); + return pod.sharedHeap_ + ? SharedMem::shared(rawHeapPtr()) + : SharedMem::unshared(rawHeapPtr()); +} + +size_t +Module::heapLength() const +{ + MOZ_ASSERT(dynamicallyLinked_); + return maybeHeap_ ? maybeHeap_->byteLength() : 0; +} + +void +Module::deoptimizeImportExit(uint32_t importIndex) +{ + MOZ_ASSERT(dynamicallyLinked_); + const Import& import = imports_[importIndex]; + ImportExit& exit = importToExit(import); + exit.code = code() + import.interpExitCodeOffset(); + exit.baselineScript = nullptr; +} + +bool +Module::hasDetachedHeap() const +{ + MOZ_ASSERT(dynamicallyLinked_); + return pod.usesHeap_ && !maybeHeap_; +} + +bool +Module::changeHeap(Handle newHeap, JSContext* cx) +{ + MOZ_ASSERT(dynamicallyLinked_); + MOZ_ASSERT(pod.usesHeap_); + + // Content JS should not be able to run (and change heap) from within an + // interrupt callback, but in case it does, fail to change heap. Otherwise, + // the heap can change at every single instruction which would prevent + // future optimizations like heap-base hoisting. + if (interrupted_) + return false; + + AutoMutateCode amc(cx, *this, "Module::changeHeap"); + if (maybeHeap_) + despecializeFromHeap(maybeHeap_); + specializeToHeap(newHeap); + return true; +} + +bool +Module::detachHeap(JSContext* cx) +{ + MOZ_ASSERT(dynamicallyLinked_); + MOZ_ASSERT(pod.usesHeap_); + + // Content JS should not be able to run (and detach heap) from within an + // interrupt callback, but in case it does, fail. Otherwise, the heap can + // change at an arbitrary instruction and break the assumption below. + if (interrupted_) { + JS_ReportError(cx, "attempt to detach from inside interrupt handler"); + return false; + } + + // Even if this->active(), to reach here, the activation must have called + // out via an import exit stub. FFI stubs check if heapDatum() is null on + // reentry and throw an exception if so. + MOZ_ASSERT_IF(active(), activation()->exitReason() == ExitReason::ImportJit || + activation()->exitReason() == ExitReason::ImportInterp); + + AutoMutateCode amc(cx, *this, "Module::detachHeap"); + despecializeFromHeap(maybeHeap_); + return true; +} + +void +Module::setInterrupted(bool interrupted) +{ + MOZ_ASSERT(dynamicallyLinked_); + interrupted_ = interrupted; +} + +AsmJSActivation*& +Module::activation() +{ + MOZ_ASSERT(dynamicallyLinked_); + return *reinterpret_cast(globalData() + ActivationGlobalDataOffset); +} + +Module::EntryFuncPtr +Module::entryTrampoline(const Export& func) const +{ + MOZ_ASSERT(dynamicallyLinked_); + return JS_DATA_TO_FUNC_PTR(EntryFuncPtr, code() + func.stubOffset()); +} + +bool +Module::callImport(JSContext* cx, uint32_t importIndex, unsigned argc, const Value* argv, + MutableHandleValue rval) +{ + MOZ_ASSERT(dynamicallyLinked_); + + const Import& import = imports_[importIndex]; + + RootedValue fval(cx, ObjectValue(*importToExit(import).fun)); + if (!Invoke(cx, UndefinedValue(), fval, argc, argv, rval)) + return false; + + ImportExit& exit = importToExit(import); + + // The exit may already have become optimized. + void* jitExitCode = code() + import.jitExitCodeOffset(); + if (exit.code == jitExitCode) + return true; + + // Test if the function is JIT compiled. + if (!exit.fun->hasScript()) + return true; + JSScript* script = exit.fun->nonLazyScript(); + if (!script->hasBaselineScript()) { + MOZ_ASSERT(!script->hasIonScript()); + return true; + } + + // Don't enable jit entry when we have a pending ion builder. + // Take the interpreter path which will link it and enable + // the fast path on the next call. + if (script->baselineScript()->hasPendingIonBuilder()) + return true; + + // Currently we can't rectify arguments. Therefore disable if argc is too low. + if (exit.fun->nargs() > import.sig().args().length()) + return true; + + // Ensure the argument types are included in the argument TypeSets stored in + // the TypeScript. This is necessary for Ion, because the import exit will + // use the skip-arg-checks entry point. + // + // Note that the TypeScript is never discarded while the script has a + // BaselineScript, so if those checks hold now they must hold at least until + // the BaselineScript is discarded and when that happens the import exit is + // patched back. + if (!TypeScript::ThisTypes(script)->hasType(TypeSet::UndefinedType())) + return true; + for (uint32_t i = 0; i < exit.fun->nargs(); i++) { + TypeSet::Type type = TypeSet::UnknownType(); + switch (import.sig().args()[i]) { + case ValType::I32: type = TypeSet::Int32Type(); break; + case ValType::I64: MOZ_CRASH("NYI"); + case ValType::F32: type = TypeSet::DoubleType(); break; + case ValType::F64: type = TypeSet::DoubleType(); break; + case ValType::I32x4: MOZ_CRASH("NYI"); + case ValType::F32x4: MOZ_CRASH("NYI"); + case ValType::B32x4: MOZ_CRASH("NYI"); + } + if (!TypeScript::ArgTypes(script, i)->hasType(type)) + return true; + } + + // Let's optimize it! + if (!script->baselineScript()->addDependentWasmModule(cx, *this, importIndex)) + return false; + + exit.code = jitExitCode; + exit.baselineScript = script->baselineScript(); + return true; +} + +void +Module::setProfilingEnabled(bool enabled, JSContext* cx) +{ + MOZ_ASSERT(dynamicallyLinked_); + MOZ_ASSERT(!active()); + + if (profilingEnabled_ == enabled) + return; + + // When enabled, generate profiling labels for every name in funcNames_ + // that is the name of some Function CodeRange. This involves malloc() so + // do it now since, once we start sampling, we'll be in a signal-handing + // context where we cannot malloc. + if (enabled) { + funcLabels_.resize(funcNames_.length()); + for (const CodeRange& codeRange : codeRanges_) { + if (!codeRange.isFunction()) + continue; + unsigned lineno = codeRange.funcLineNumber(); + const char* name = funcNames_[codeRange.funcNameIndex()].get(); + funcLabels_[codeRange.funcNameIndex()] = + UniqueChars(JS_smprintf("%s (%s:%u)", name, filename_.get(), lineno)); + } + } else { + funcLabels_.clear(); + } + + // Patch callsites and returns to execute profiling prologues/epililogues. + { + AutoMutateCode amc(cx, *this, "Module::setProfilingEnabled"); + + for (const CallSite& callSite : callSites_) + EnableProfilingPrologue(*this, callSite, enabled); + + for (const CodeRange& codeRange : codeRanges_) + EnableProfilingEpilogue(*this, codeRange, enabled); + } + + // Update the function-pointer tables to point to profiling prologues. + for (FuncPtrTable& funcPtrTable : funcPtrTables_) { + auto array = reinterpret_cast(globalData() + funcPtrTable.globalDataOffset); + for (size_t i = 0; i < funcPtrTable.numElems; i++) { + const CodeRange* codeRange = lookupCodeRange(array[i]); + void* from = code() + codeRange->funcNonProfilingEntry(); + void* to = code() + codeRange->funcProfilingEntry(); + if (!enabled) + Swap(from, to); + MOZ_ASSERT(array[i] == from); + array[i] = to; + } + } + + profilingEnabled_ = enabled; +} + +const char* +Module::profilingLabel(uint32_t funcIndex) const +{ + MOZ_ASSERT(dynamicallyLinked_); + MOZ_ASSERT(profilingEnabled_); + return funcLabels_[funcIndex].get(); +} + +size_t +Module::serializedSize() const +{ + return sizeof(pod) + + pod.codeBytes_ + + SerializedVectorSize(imports_) + + SerializedVectorSize(exports_) + + SerializedPodVectorSize(heapAccesses_) + + SerializedPodVectorSize(codeRanges_) + + SerializedPodVectorSize(callSites_) + + SerializedVectorSize(funcNames_) + + filename_.serializedSize(); +} + +uint8_t* +Module::serialize(uint8_t* cursor) const +{ + MOZ_ASSERT(!profilingEnabled_, "assumed by Module::deserialize"); + + cursor = WriteBytes(cursor, &pod, sizeof(pod)); + cursor = WriteBytes(cursor, code(), pod.codeBytes_); + cursor = SerializeVector(cursor, imports_); + cursor = SerializeVector(cursor, exports_); + cursor = SerializePodVector(cursor, heapAccesses_); + cursor = SerializePodVector(cursor, codeRanges_); + cursor = SerializePodVector(cursor, callSites_); + cursor = SerializeVector(cursor, funcNames_); + cursor = filename_.serialize(cursor); + return cursor; +} + +/* static */ const uint8_t* +Module::deserialize(ExclusiveContext* cx, const uint8_t* cursor, UniqueModule* out) +{ + CacheablePod pod = zeroPod(); + cursor = ReadBytes(cursor, &pod, sizeof(pod)); + if (!cursor) + return nullptr; + + UniqueCodePtr code = AllocateCode(cx, pod.codeBytes_ + pod.globalBytes_); + if (!code) + return nullptr; + + cursor = ReadBytes(cursor, code.get(), pod.codeBytes_); + + ImportVector imports; + cursor = DeserializeVector(cx, cursor, &imports); + if (!cursor) + return nullptr; + + ExportVector exports; + cursor = DeserializeVector(cx, cursor, &exports); + if (!cursor) + return nullptr; + + HeapAccessVector heapAccesses; + cursor = DeserializePodVector(cx, cursor, &heapAccesses); + if (!cursor) + return nullptr; + + CodeRangeVector codeRanges; + cursor = DeserializePodVector(cx, cursor, &codeRanges); + if (!cursor) + return nullptr; + + CallSiteVector callSites; + cursor = DeserializePodVector(cx, cursor, &callSites); + if (!cursor) + return nullptr; + + CacheableCharsVector funcNames; + cursor = DeserializeVector(cx, cursor, &funcNames); + if (!cursor) + return nullptr; + + CacheableChars filename; + cursor = filename.deserialize(cx, cursor); + if (!cursor) + return nullptr; + + *out = cx->make_unique(pod, + Move(code), + Move(imports), + Move(exports), + Move(heapAccesses), + Move(codeRanges), + Move(callSites), + Move(funcNames), + Move(filename), + Module::LoadedFromCache, + Module::ProfilingDisabled, + FuncLabelVector()); + + return cursor; +} + +Module::UniqueModule +Module::clone(JSContext* cx, const StaticLinkData& linkData) const +{ + MOZ_ASSERT(dynamicallyLinked_); + + UniqueCodePtr code = AllocateCode(cx, totalBytes()); + if (!code) + return nullptr; + + memcpy(code.get(), this->code(), pod.codeBytes_); + +#ifdef DEBUG + // Put the symbolic links back to -1 so PatchDataWithValueCheck assertions + // in Module::staticallyLink are valid. + for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) { + void* callee = AddressOf(imm, cx); + const StaticLinkData::OffsetVector& offsets = linkData.symbolicLinks[imm]; + for (uint32_t offset : offsets) { + jit::Assembler::PatchDataWithValueCheck(jit::CodeLocationLabel(code.get() + offset), + jit::PatchedImmPtr((void*)-1), + jit::PatchedImmPtr(callee)); + } + } +#endif + + ImportVector imports; + if (!CloneVector(cx, imports_, &imports)) + return nullptr; + + ExportVector exports; + if (!CloneVector(cx, exports_, &exports)) + return nullptr; + + HeapAccessVector heapAccesses; + if (!ClonePodVector(cx, heapAccesses_, &heapAccesses)) + return nullptr; + + CodeRangeVector codeRanges; + if (!ClonePodVector(cx, codeRanges_, &codeRanges)) + return nullptr; + + CallSiteVector callSites; + if (!ClonePodVector(cx, callSites_, &callSites)) + return nullptr; + + CacheableCharsVector funcNames; + if (!CloneVector(cx, funcNames_, &funcNames)) + return nullptr; + + CacheableChars filename; + if (!filename_.clone(cx, &filename)) + return nullptr; + + FuncLabelVector funcLabels; + if (!CloneVector(cx, funcLabels_, &funcLabels)) + return nullptr; + + // Must not GC between Module allocation and (successful) return. + auto out = cx->make_unique(pod, + Move(code), + Move(imports), + Move(exports), + Move(heapAccesses), + Move(codeRanges), + Move(callSites), + Move(funcNames), + Move(filename), + CacheBool::NotLoadedFromCache, + ProfilingBool(profilingEnabled_), + Move(funcLabels)); + if (!out) + return nullptr; + + // If the copied machine code has been specialized to the heap, it must be + // unspecialized in the copy. + if (maybeHeap_) + out->despecializeFromHeap(maybeHeap_); + + if (!out->staticallyLink(cx, linkData)) + return nullptr; + + return Move(out); +} + +void +Module::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode, size_t* asmJSModuleData) +{ + *asmJSModuleCode += pod.codeBytes_; + *asmJSModuleData += mallocSizeOf(this) + + pod.globalBytes_ + + SizeOfVectorExcludingThis(imports_, mallocSizeOf) + + SizeOfVectorExcludingThis(exports_, mallocSizeOf) + + heapAccesses_.sizeOfExcludingThis(mallocSizeOf) + + codeRanges_.sizeOfExcludingThis(mallocSizeOf) + + callSites_.sizeOfExcludingThis(mallocSizeOf) + + funcNames_.sizeOfExcludingThis(mallocSizeOf) + + funcPtrTables_.sizeOfExcludingThis(mallocSizeOf); +} + diff --git a/js/src/asmjs/WasmModule.h b/js/src/asmjs/WasmModule.h new file mode 100644 index 00000000000..f6025b832f4 --- /dev/null +++ b/js/src/asmjs/WasmModule.h @@ -0,0 +1,569 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * + * Copyright 2015 Mozilla Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef wasm_module_h +#define wasm_module_h + +#include "asmjs/WasmTypes.h" +#include "gc/Barrier.h" +#include "vm/MallocProvider.h" + +namespace js { + +class AsmJSActivation; +namespace jit { struct BaselineScript; } + +namespace wasm { + +// A wasm Module and everything it contains must support serialization, +// deserialization and cloning. Some data can be simply copied as raw bytes and, +// as a convention, is stored in an inline CacheablePod struct. Everything else +// should implement the below methods which are called recusively by the +// containing Module. The implementation of all these methods are grouped +// together in WasmSerialize.cpp. + +#define WASM_DECLARE_SERIALIZABLE(Type) \ + size_t serializedSize() const; \ + uint8_t* serialize(uint8_t* cursor) const; \ + const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor); \ + bool clone(JSContext* cx, Type* out) const; \ + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; + +// The StaticLinkData contains all the metadata necessary to perform +// Module::staticallyLink but is not necessary afterwards. + +struct StaticLinkData +{ + struct InternalLink { + enum Kind { + RawPointer, + CodeLabel, + InstructionImmediate + }; + uint32_t patchAtOffset; + uint32_t targetOffset; + + InternalLink() = default; + explicit InternalLink(Kind kind); + bool isRawPointerPatch(); + }; + typedef Vector InternalLinkVector; + + typedef Vector OffsetVector; + struct SymbolicLinkArray : mozilla::EnumeratedArray { + WASM_DECLARE_SERIALIZABLE(SymbolicLinkArray) + }; + + struct FuncPtrTable { + uint32_t globalDataOffset; + OffsetVector elemOffsets; + explicit FuncPtrTable(uint32_t globalDataOffset) : globalDataOffset(globalDataOffset) {} + FuncPtrTable() = default; + FuncPtrTable(FuncPtrTable&& rhs) + : globalDataOffset(rhs.globalDataOffset), elemOffsets(Move(rhs.elemOffsets)) + {} + WASM_DECLARE_SERIALIZABLE(FuncPtrTable) + }; + typedef Vector FuncPtrTableVector; + + struct CacheablePod { + uint32_t interruptOffset; + uint32_t outOfBoundsOffset; + } pod; + InternalLinkVector internalLinks; + SymbolicLinkArray symbolicLinks; + FuncPtrTableVector funcPtrTables; + + WASM_DECLARE_SERIALIZABLE(StaticLinkData) +}; + +typedef UniquePtr> UniqueStaticLinkData; + +// An Export describes an export from a wasm module. Currently only functions +// can be exported. + +class Export +{ + MallocSig sig_; + struct CacheablePod { + uint32_t funcIndex_; + uint32_t stubOffset_; + } pod; + + public: + Export() = default; + Export(MallocSig&& sig, uint32_t funcIndex) + : sig_(Move(sig)) + { + pod.funcIndex_ = funcIndex; + pod.stubOffset_ = UINT32_MAX; + } + Export(Export&& rhs) + : sig_(Move(rhs.sig_)), + pod(rhs.pod) + {} + + void initStubOffset(uint32_t stubOffset) { + MOZ_ASSERT(pod.stubOffset_ == UINT32_MAX); + pod.stubOffset_ = stubOffset; + } + + uint32_t funcIndex() const { + return pod.funcIndex_; + } + uint32_t stubOffset() const { + return pod.stubOffset_; + } + const MallocSig& sig() const { + return sig_; + } + + WASM_DECLARE_SERIALIZABLE(Export) +}; + +typedef Vector ExportVector; + +// An Import describes a wasm module import. Currently, only functions can be +// imported in wasm and a function import also includes the signature used +// within the module to call that import. An import is slightly different than +// an asm.js FFI function: a single asm.js FFI function can be called with many +// different signatures. When compiled to wasm, each unique FFI function paired +// with signature generates a wasm import. + +class Import +{ + MallocSig sig_; + struct CacheablePod { + uint32_t exitGlobalDataOffset_; + uint32_t interpExitCodeOffset_; + uint32_t jitExitCodeOffset_; + } pod; + + public: + Import() {} + Import(Import&& rhs) : sig_(Move(rhs.sig_)), pod(rhs.pod) {} + Import(MallocSig&& sig, uint32_t exitGlobalDataOffset) + : sig_(Move(sig)) + { + pod.exitGlobalDataOffset_ = exitGlobalDataOffset; + pod.interpExitCodeOffset_ = 0; + pod.jitExitCodeOffset_ = 0; + } + + void initInterpExitOffset(uint32_t off) { + MOZ_ASSERT(!pod.interpExitCodeOffset_); + pod.interpExitCodeOffset_ = off; + } + void initJitExitOffset(uint32_t off) { + MOZ_ASSERT(!pod.jitExitCodeOffset_); + pod.jitExitCodeOffset_ = off; + } + + const MallocSig& sig() const { + return sig_; + } + uint32_t exitGlobalDataOffset() const { + return pod.exitGlobalDataOffset_; + } + uint32_t interpExitCodeOffset() const { + MOZ_ASSERT(pod.interpExitCodeOffset_); + return pod.interpExitCodeOffset_; + } + uint32_t jitExitCodeOffset() const { + MOZ_ASSERT(pod.jitExitCodeOffset_); + return pod.jitExitCodeOffset_; + } + + WASM_DECLARE_SERIALIZABLE(Import) +}; + +typedef Vector ImportVector; + +// A CodeRange describes a single contiguous range of code within a wasm +// module's code segment. A CodeRange describes what the code does and, for +// function bodies, the name and source coordinates of the function. + +class CodeRange +{ + uint32_t nameIndex_; + uint32_t lineNumber_; + uint32_t begin_; + uint32_t profilingReturn_; + uint32_t end_; + union { + struct { + uint8_t kind_; + uint8_t beginToEntry_; + uint8_t profilingJumpToProfilingReturn_; + uint8_t profilingEpilogueToProfilingReturn_; + } func; + uint8_t kind_; + } u; + + void assertValid(); + + public: + enum Kind { Function, Entry, ImportJitExit, ImportInterpExit, Interrupt, Inline }; + + CodeRange() = default; + CodeRange(Kind kind, Offsets offsets); + CodeRange(Kind kind, ProfilingOffsets offsets); + CodeRange(uint32_t nameIndex, uint32_t lineNumber, FuncOffsets offsets); + + // All CodeRanges have a begin and end. + + uint32_t begin() const { + return begin_; + } + uint32_t end() const { + return end_; + } + + // Other fields are only available for certain CodeRange::Kinds. + + Kind kind() const { return Kind(u.kind_); } + + // Every CodeRange except entry and inline stubs has a profiling return + // which is used for asynchronous profiling to determine the frame pointer. + + uint32_t profilingReturn() const { + MOZ_ASSERT(kind() != Entry && kind() != Inline); + return profilingReturn_; + } + + // Functions have offsets which allow patching to selectively execute + // profiling prologues/epilogues. + + bool isFunction() const { + return kind() == Function; + } + uint32_t funcProfilingEntry() const { + MOZ_ASSERT(isFunction()); + return begin(); + } + uint32_t funcNonProfilingEntry() const { + MOZ_ASSERT(isFunction()); + return begin_ + u.func.beginToEntry_; + } + uint32_t functionProfilingJump() const { + MOZ_ASSERT(isFunction()); + return profilingReturn_ - u.func.profilingJumpToProfilingReturn_; + } + uint32_t funcProfilingEpilogue() const { + MOZ_ASSERT(isFunction()); + return profilingReturn_ - u.func.profilingEpilogueToProfilingReturn_; + } + uint32_t funcNameIndex() const { + MOZ_ASSERT(isFunction()); + return nameIndex_; + } + uint32_t funcLineNumber() const { + MOZ_ASSERT(isFunction()); + return lineNumber_; + } + + // A sorted array of CodeRanges can be looked up via BinarySearch and PC. + + struct PC { + size_t offset; + explicit PC(size_t offset) : offset(offset) {} + bool operator==(const CodeRange& rhs) const { + return offset >= rhs.begin() && offset < rhs.end(); + } + bool operator<(const CodeRange& rhs) const { + return offset < rhs.begin(); + } + }; +}; + +typedef Vector CodeRangeVector; + +// A CacheableChars is used to cacheably store UniqueChars in Module. + +struct CacheableChars : public UniqueChars +{ + explicit CacheableChars(char* ptr) : UniqueChars(ptr) {} + MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs) : UniqueChars(Move(rhs)) {} + CacheableChars() = default; + CacheableChars(CacheableChars&& rhs) : UniqueChars(Move(rhs)) {} + void operator=(CacheableChars&& rhs) { UniqueChars& base = *this; base = Move(rhs); } + WASM_DECLARE_SERIALIZABLE(CacheableChars) +}; +typedef Vector CacheableCharsVector; + +// A UniqueCodePtr owns allocated executable code. Code passed to the Module +// constructor must be allocated via AllocateCode. + +class CodeDeleter +{ + uint32_t bytes_; + public: + explicit CodeDeleter(uint32_t bytes) : bytes_(bytes) {} + void operator()(uint8_t* p); +}; +typedef JS::UniquePtr UniqueCodePtr; + +UniqueCodePtr +AllocateCode(ExclusiveContext* cx, size_t bytes); + +// Module represents a compiled WebAssembly module which lives until the last +// reference to any exported functions is dropped. Modules must be wrapped by a +// rooted JSObject immediately after creation so that Module::trace() is called +// during GC. Modules are created after compilation completes and start in a +// a fully unlinked state. After creation, a module must be first statically +// linked and then dynamically linked: +// +// - Static linking patches code or global data that relies on absolute +// addresses. Static linking should happen after a module is serialized into +// a cache file so that the cached code is stored unlinked and ready to be +// statically linked after deserialization. +// +// - Dynamic linking patches code or global data that relies on the address of +// the heap and imports of a module. A module may only be dynamically linked +// once. However, a dynamically-linked module may be cloned so that the clone +// can be independently dynamically linked. +// +// Once fully dynamically linked, a module can have its exports invoked (via +// entryTrampoline). While executing, profiling may be enabled/disabled (when +// the Module is not active()) via setProfilingEnabled(). When profiling is +// enabled, a module's frames will be visible to wasm::ProfilingFrameIterator. + +class Module +{ + struct ImportExit { + void* code; + jit::BaselineScript* baselineScript; + HeapPtrFunction fun; + static_assert(sizeof(HeapPtrFunction) == sizeof(void*), "for JIT access"); + }; + struct FuncPtrTable { + uint32_t globalDataOffset; + uint32_t numElems; + explicit FuncPtrTable(const StaticLinkData::FuncPtrTable& table) + : globalDataOffset(table.globalDataOffset), + numElems(table.elemOffsets.length()) + {} + }; + typedef Vector FuncPtrTableVector; + typedef Vector FuncLabelVector; + typedef RelocatablePtrArrayBufferObjectMaybeShared BufferPtr; + + // Initialized when constructed: + struct CacheablePod { + const uint32_t functionBytes_; + const uint32_t codeBytes_; + const uint32_t globalBytes_; + const bool usesHeap_; + const bool sharedHeap_; + const bool usesSignalHandlersForOOB_; + const bool usesSignalHandlersForInterrupt_; + } pod; + const UniqueCodePtr code_; + const ImportVector imports_; + const ExportVector exports_; + const HeapAccessVector heapAccesses_; + const CodeRangeVector codeRanges_; + const CallSiteVector callSites_; + const CacheableCharsVector funcNames_; + const CacheableChars filename_; + const bool loadedFromCache_; + + // Initialized during staticallyLink: + bool staticallyLinked_; + uint8_t* interrupt_; + uint8_t* outOfBounds_; + FuncPtrTableVector funcPtrTables_; + + // Initialized during dynamicallyLink: + bool dynamicallyLinked_; + BufferPtr maybeHeap_; + Module** prev_; + Module* next_; + + // Mutated after dynamicallyLink: + bool profilingEnabled_; + FuncLabelVector funcLabels_; + bool interrupted_; + + class AutoMutateCode; + + uint32_t totalBytes() const; + uint8_t* rawHeapPtr() const; + uint8_t*& rawHeapPtr(); + void specializeToHeap(ArrayBufferObjectMaybeShared* heap); + void despecializeFromHeap(ArrayBufferObjectMaybeShared* heap); + void sendCodeRangesToProfiler(JSContext* cx); + ImportExit& importToExit(const Import& import); + + enum CacheBool { NotLoadedFromCache = false, LoadedFromCache = true }; + enum ProfilingBool { ProfilingDisabled = false, ProfilingEnabled = true }; + + static CacheablePod zeroPod(); + void init(); + Module(const CacheablePod& pod, + UniqueCodePtr code, + ImportVector&& imports, + ExportVector&& exports, + HeapAccessVector&& heapAccesses, + CodeRangeVector&& codeRanges, + CallSiteVector&& callSites, + CacheableCharsVector&& funcNames, + CacheableChars filename, + CacheBool loadedFromCache, + ProfilingBool profilingEnabled, + FuncLabelVector&& funcLabels); + + template friend struct js::MallocProvider; + + public: + static const unsigned SizeOfImportExit = sizeof(ImportExit); + static const unsigned OffsetOfImportExitFun = offsetof(ImportExit, fun); + + enum HeapBool { DoesntUseHeap = false, UsesHeap = true }; + enum SharedBool { UnsharedHeap = false, SharedHeap = true }; + + Module(CompileArgs args, + uint32_t functionBytes, + uint32_t codeBytes, + uint32_t globalBytes, + HeapBool usesHeap, + SharedBool sharedHeap, + UniqueCodePtr code, + ImportVector&& imports, + ExportVector&& exports, + HeapAccessVector&& heapAccesses, + CodeRangeVector&& codeRanges, + CallSiteVector&& callSites, + CacheableCharsVector&& funcNames, + CacheableChars filename); + ~Module(); + void trace(JSTracer* trc); + + uint8_t* code() const { return code_.get(); } + uint8_t* globalData() const { return code() + pod.codeBytes_; } + uint32_t globalBytes() const { return pod.globalBytes_; } + bool usesHeap() const { return pod.usesHeap_; } + bool sharedHeap() const { return pod.sharedHeap_; } + CompileArgs compileArgs() const; + const ImportVector& imports() const { return imports_; } + const ExportVector& exports() const { return exports_; } + const char* functionName(uint32_t i) const { return funcNames_[i].get(); } + const char* filename() const { return filename_.get(); } + bool loadedFromCache() const { return loadedFromCache_; } + bool staticallyLinked() const { return staticallyLinked_; } + bool dynamicallyLinked() const { return dynamicallyLinked_; } + bool profilingEnabled() const { return profilingEnabled_; } + + // The range [0, functionBytes) is a subrange of [0, codeBytes) that + // contains only function body code, not the stub code. This distinction is + // used by the async interrupt handler to only interrupt when the pc is in + // function code which, in turn, simplifies reasoning about how stubs + // enter/exit. + + bool containsFunctionPC(void* pc) const; + bool containsCodePC(void* pc) const; + const CallSite* lookupCallSite(void* returnAddress) const; + const CodeRange* lookupCodeRange(void* pc) const; + const HeapAccess* lookupHeapAccess(void* pc) const; + + // This function transitions the module from an unlinked state to a + // statically-linked state. The given StaticLinkData must have come from the + // compilation of this module. + + bool staticallyLink(ExclusiveContext* cx, const StaticLinkData& linkData); + + // This function transitions the module from a statically-linked state to a + // dynamically-linked state. If this module usesHeap(), a non-null heap + // buffer must be given. The given import vector must match the module's + // ImportVector. + + bool dynamicallyLink(JSContext* cx, Handle heap, + const AutoVectorRooter& imports); + Module* nextLinked() const; + + // The wasm heap, established by dynamicallyLink. + + ArrayBufferObjectMaybeShared* maybeBuffer() const; + SharedMem maybeHeap() const; + size_t heapLength() const; + + // asm.js may detach and change the heap at any time. As an internal detail, + // the heap may not be changed while the module has been asynchronously + // interrupted. + + bool hasDetachedHeap() const; + bool changeHeap(Handle newBuffer, JSContext* cx); + bool detachHeap(JSContext* cx); + void setInterrupted(bool interrupted); + + // The exports of a wasm module are called by preparing an array of + // arguments (coerced to the corresponding types of the Export signature) + // and calling the export's entry trampoline. All such calls must be + // associated with a containing AsmJSActivation. The innermost + // AsmJSActivation must be maintained in the Module::activation field. + + struct EntryArg { + uint64_t lo; + uint64_t hi; + }; + typedef int32_t (*EntryFuncPtr)(EntryArg* args, uint8_t* global); + EntryFuncPtr entryTrampoline(const Export& func) const; + AsmJSActivation*& activation(); + + // Initially, calls to imports in wasm code call out through the generic + // callImport method. If the imported callee gets JIT compiled and the types + // match up, callImport will patch the code to instead call through a thunk + // directly into the JIT code. If the JIT code is released, the Module must + // be notified so it can go back to the generic callImport. + + bool callImport(JSContext* cx, uint32_t importIndex, unsigned argc, const Value* argv, + MutableHandleValue rval); + void deoptimizeImportExit(uint32_t importIndex); + + // At runtime, when $pc is in wasm function code (containsFunctionPC($pc)), + // $pc may be moved abruptly to interrupt() or outOfBounds() by a signal + // handler or SetContext() from another thread. + + uint8_t* interrupt() const { MOZ_ASSERT(staticallyLinked_); return interrupt_; } + uint8_t* outOfBounds() const { MOZ_ASSERT(staticallyLinked_); return outOfBounds_; } + + // When a module is inactive (no live activations), the profiling mode + // can be toggled. WebAssembly frames only show up in the + // ProfilingFrameIterator when profiling is enabled. + + bool active() { return !!activation(); } + void setProfilingEnabled(bool enabled, JSContext* cx); + const char* profilingLabel(uint32_t funcIndex) const; + + // See WASM_DECLARE_SERIALIZABLE. + size_t serializedSize() const; + uint8_t* serialize(uint8_t* cursor) const; + typedef UniquePtr> UniqueModule; + static const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor, + UniqueModule* out); + UniqueModule clone(JSContext* cx, const StaticLinkData& linkData) const; + void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode, + size_t* asmJSModuleData); +}; + +} // namespace js +} // namespace wasm + +#endif // wasm_module_h diff --git a/js/src/asmjs/WasmSerialize.h b/js/src/asmjs/WasmSerialize.h new file mode 100644 index 00000000000..6efccb83555 --- /dev/null +++ b/js/src/asmjs/WasmSerialize.h @@ -0,0 +1,350 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * + * Copyright 2015 Mozilla Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef wasm_serialize_h +#define wasm_serialize_h + +#include "jit/MacroAssembler.h" + +namespace js { +namespace wasm { + +// Factor out common serialization, cloning and about:memory size-computation +// functions for reuse when serializing wasm and asm.js modules. + +static inline uint8_t* +WriteBytes(uint8_t* dst, const void* src, size_t nbytes) +{ + memcpy(dst, src, nbytes); + return dst + nbytes; +} + +static inline const uint8_t* +ReadBytes(const uint8_t* src, void* dst, size_t nbytes) +{ + memcpy(dst, src, nbytes); + return src + nbytes; +} + +template +static inline uint8_t* +WriteScalar(uint8_t* dst, T t) +{ + memcpy(dst, &t, sizeof(t)); + return dst + sizeof(t); +} + +template +static inline const uint8_t* +ReadScalar(const uint8_t* src, T* dst) +{ + memcpy(dst, src, sizeof(*dst)); + return src + sizeof(*dst); +} + +static inline size_t +SerializedNameSize(PropertyName* name) +{ + size_t s = sizeof(uint32_t); + if (name) + s += name->length() * (name->hasLatin1Chars() ? sizeof(Latin1Char) : sizeof(char16_t)); + return s; +} + +static inline uint8_t* +SerializeName(uint8_t* cursor, PropertyName* name) +{ + MOZ_ASSERT_IF(name, !name->empty()); + if (name) { + static_assert(JSString::MAX_LENGTH <= INT32_MAX, "String length must fit in 31 bits"); + uint32_t length = name->length(); + uint32_t lengthAndEncoding = (length << 1) | uint32_t(name->hasLatin1Chars()); + cursor = WriteScalar(cursor, lengthAndEncoding); + JS::AutoCheckCannotGC nogc; + if (name->hasLatin1Chars()) + cursor = WriteBytes(cursor, name->latin1Chars(nogc), length * sizeof(Latin1Char)); + else + cursor = WriteBytes(cursor, name->twoByteChars(nogc), length * sizeof(char16_t)); + } else { + cursor = WriteScalar(cursor, 0); + } + return cursor; +} + +template +static inline const uint8_t* +DeserializeChars(ExclusiveContext* cx, const uint8_t* cursor, size_t length, PropertyName** name) +{ + Vector tmp(cx); + CharT* src; + if ((size_t(cursor) & (sizeof(CharT) - 1)) != 0) { + // Align 'src' for AtomizeChars. + if (!tmp.resize(length)) + return nullptr; + memcpy(tmp.begin(), cursor, length * sizeof(CharT)); + src = tmp.begin(); + } else { + src = (CharT*)cursor; + } + + JSAtom* atom = AtomizeChars(cx, src, length); + if (!atom) + return nullptr; + + *name = atom->asPropertyName(); + return cursor + length * sizeof(CharT); +} + +static inline const uint8_t* +DeserializeName(ExclusiveContext* cx, const uint8_t* cursor, PropertyName** name) +{ + uint32_t lengthAndEncoding; + cursor = ReadScalar(cursor, &lengthAndEncoding); + + uint32_t length = lengthAndEncoding >> 1; + if (length == 0) { + *name = nullptr; + return cursor; + } + + bool latin1 = lengthAndEncoding & 0x1; + return latin1 + ? DeserializeChars(cx, cursor, length, name) + : DeserializeChars(cx, cursor, length, name); +} + +template +static inline size_t +SerializedVectorSize(const mozilla::Vector& vec) +{ + size_t size = sizeof(uint32_t); + for (size_t i = 0; i < vec.length(); i++) + size += vec[i].serializedSize(); + return size; +} + +template +static inline uint8_t* +SerializeVector(uint8_t* cursor, const mozilla::Vector& vec) +{ + cursor = WriteScalar(cursor, vec.length()); + for (size_t i = 0; i < vec.length(); i++) + cursor = vec[i].serialize(cursor); + return cursor; +} + +template +static inline const uint8_t* +DeserializeVector(ExclusiveContext* cx, const uint8_t* cursor, + mozilla::Vector* vec) +{ + uint32_t length; + cursor = ReadScalar(cursor, &length); + if (!vec->resize(length)) + return nullptr; + for (size_t i = 0; i < vec->length(); i++) { + if (!(cursor = (*vec)[i].deserialize(cx, cursor))) + return nullptr; + } + return cursor; +} + +template +static inline bool +CloneVector(JSContext* cx, const mozilla::Vector& in, + mozilla::Vector* out) +{ + if (!out->resize(in.length())) + return false; + for (size_t i = 0; i < in.length(); i++) { + if (!in[i].clone(cx, &(*out)[i])) + return false; + } + return true; +} + +template +static inline size_t +SizeOfVectorExcludingThis(const mozilla::Vector& vec, + MallocSizeOf mallocSizeOf) +{ + size_t size = vec.sizeOfExcludingThis(mallocSizeOf); + for (const T& t : vec) + size += t.sizeOfExcludingThis(mallocSizeOf); + return size; +} + +template +static inline size_t +SerializedPodVectorSize(const mozilla::Vector& vec) +{ + return sizeof(uint32_t) + + vec.length() * sizeof(T); +} + +template +static inline uint8_t* +SerializePodVector(uint8_t* cursor, const mozilla::Vector& vec) +{ + cursor = WriteScalar(cursor, vec.length()); + cursor = WriteBytes(cursor, vec.begin(), vec.length() * sizeof(T)); + return cursor; +} + +template +static inline const uint8_t* +DeserializePodVector(ExclusiveContext* cx, const uint8_t* cursor, + mozilla::Vector* vec) +{ + uint32_t length; + cursor = ReadScalar(cursor, &length); + if (!vec->resize(length)) + return nullptr; + cursor = ReadBytes(cursor, vec->begin(), length * sizeof(T)); + return cursor; +} + +template +static inline bool +ClonePodVector(JSContext* cx, const mozilla::Vector& in, + mozilla::Vector* out) +{ + if (!out->resize(in.length())) + return false; + mozilla::PodCopy(out->begin(), in.begin(), in.length()); + return true; +} + +static inline bool +GetCPUID(uint32_t* cpuId) +{ + enum Arch { + X86 = 0x1, + X64 = 0x2, + ARM = 0x3, + MIPS = 0x4, + MIPS64 = 0x5, + ARCH_BITS = 3 + }; + +#if defined(JS_CODEGEN_X86) + MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS)); + *cpuId = X86 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS); + return true; +#elif defined(JS_CODEGEN_X64) + MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS)); + *cpuId = X64 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS); + return true; +#elif defined(JS_CODEGEN_ARM) + MOZ_ASSERT(jit::GetARMFlags() <= (UINT32_MAX >> ARCH_BITS)); + *cpuId = ARM | (jit::GetARMFlags() << ARCH_BITS); + return true; +#elif defined(JS_CODEGEN_MIPS32) + MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS)); + *cpuId = MIPS | (jit::GetMIPSFlags() << ARCH_BITS); + return true; +#elif defined(JS_CODEGEN_MIPS64) + MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS)); + *cpuId = MIPS64 | (jit::GetMIPSFlags() << ARCH_BITS); + return true; +#else + return false; +#endif +} + +class MachineId +{ + uint32_t cpuId_; + JS::BuildIdCharVector buildId_; + + public: + bool extractCurrentState(ExclusiveContext* cx) { + if (!cx->asmJSCacheOps().buildId) + return false; + if (!cx->asmJSCacheOps().buildId(&buildId_)) + return false; + if (!GetCPUID(&cpuId_)) + return false; + return true; + } + + size_t serializedSize() const { + return sizeof(uint32_t) + + SerializedPodVectorSize(buildId_); + } + + uint8_t* serialize(uint8_t* cursor) const { + cursor = WriteScalar(cursor, cpuId_); + cursor = SerializePodVector(cursor, buildId_); + return cursor; + } + + const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor) { + (cursor = ReadScalar(cursor, &cpuId_)) && + (cursor = DeserializePodVector(cx, cursor, &buildId_)); + return cursor; + } + + bool operator==(const MachineId& rhs) const { + return cpuId_ == rhs.cpuId_ && + buildId_.length() == rhs.buildId_.length() && + mozilla::PodEqual(buildId_.begin(), rhs.buildId_.begin(), buildId_.length()); + } + bool operator!=(const MachineId& rhs) const { + return !(*this == rhs); + } +}; + +struct ScopedCacheEntryOpenedForWrite +{ + ExclusiveContext* cx; + const size_t serializedSize; + uint8_t* memory; + intptr_t handle; + + ScopedCacheEntryOpenedForWrite(ExclusiveContext* cx, size_t serializedSize) + : cx(cx), serializedSize(serializedSize), memory(nullptr), handle(-1) + {} + + ~ScopedCacheEntryOpenedForWrite() { + if (memory) + cx->asmJSCacheOps().closeEntryForWrite(serializedSize, memory, handle); + } +}; + +struct ScopedCacheEntryOpenedForRead +{ + ExclusiveContext* cx; + size_t serializedSize; + const uint8_t* memory; + intptr_t handle; + + explicit ScopedCacheEntryOpenedForRead(ExclusiveContext* cx) + : cx(cx), serializedSize(0), memory(nullptr), handle(0) + {} + + ~ScopedCacheEntryOpenedForRead() { + if (memory) + cx->asmJSCacheOps().closeEntryForRead(serializedSize, memory, handle); + } +}; + +} // namespace wasm +} // namespace js + +#endif // wasm_serialize_h diff --git a/js/src/asmjs/AsmJSSignalHandlers.cpp b/js/src/asmjs/WasmSignalHandlers.cpp similarity index 96% rename from js/src/asmjs/AsmJSSignalHandlers.cpp rename to js/src/asmjs/WasmSignalHandlers.cpp index 0bf27a0ed16..a9999c364c1 100644 --- a/js/src/asmjs/AsmJSSignalHandlers.cpp +++ b/js/src/asmjs/WasmSignalHandlers.cpp @@ -16,18 +16,20 @@ * limitations under the License. */ -#include "asmjs/AsmJSSignalHandlers.h" +#include "asmjs/WasmSignalHandlers.h" #include "mozilla/DebugOnly.h" #include "mozilla/PodOperations.h" #include "asmjs/AsmJSModule.h" +#include "asmjs/AsmJSValidate.h" #include "jit/AtomicOperations.h" #include "jit/Disassembler.h" #include "vm/Runtime.h" using namespace js; using namespace js::jit; +using namespace js::wasm; using JS::GenericNaN; using mozilla::DebugOnly; @@ -600,12 +602,12 @@ ComputeAccessAddress(EMULATOR_CONTEXT* context, const Disassembler::ComplexAddre MOZ_COLD static uint8_t* EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress, - const HeapAccess* heapAccess, const AsmJSModule& module) + const HeapAccess* heapAccess, const Module& module) { MOZ_RELEASE_ASSERT(module.containsFunctionPC(pc)); - MOZ_RELEASE_ASSERT(module.usesSignalHandlersForOOB()); + MOZ_RELEASE_ASSERT(module.compileArgs().useSignalHandlersForOOB); MOZ_RELEASE_ASSERT(!heapAccess->hasLengthCheck()); - MOZ_RELEASE_ASSERT(heapAccess->insnOffset() == (pc - module.codeBase())); + MOZ_RELEASE_ASSERT(heapAccess->insnOffset() == (pc - module.code())); // Disassemble the instruction which caused the trap so that we can extract // information about it and decide what to do. @@ -704,7 +706,7 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre // load/store that we should handle. if (heapAccess->throwOnOOB()) - return module.outOfBoundsExit(); + return module.outOfBounds(); switch (access.kind()) { case Disassembler::HeapAccess::Load: @@ -755,7 +757,7 @@ HandleFault(PEXCEPTION_POINTERS exception) if (!activation) return false; - const AsmJSModule& module = activation->module(); + const Module& module = activation->module().wasm(); // These checks aren't necessary, but, since we can, check anyway to make // sure we aren't covering up a real bug. @@ -772,18 +774,14 @@ HandleFault(PEXCEPTION_POINTERS exception) // between a faulting heap access and the handling of the fault due // to InterruptRunningCode's use of SuspendThread. When this happens, // after ResumeThread, the exception handler is called with pc equal to - // module.interruptExit, which is logically wrong. The Right Thing would + // module.interrupt, which is logically wrong. The Right Thing would // be for the OS to make fault-handling atomic (so that CONTEXT.pc was // always the logically-faulting pc). Fortunately, we can detect this // case and silence the exception ourselves (the exception will // retrigger after the interrupt jumps back to resumePC). - if (pc == module.interruptExit() && - module.containsFunctionPC(activation->resumePC()) && - module.lookupHeapAccess(activation->resumePC())) - { - return true; - } - return false; + return pc == module.interrupt() && + module.containsFunctionPC(activation->resumePC()) && + module.lookupHeapAccess(activation->resumePC()); } const HeapAccess* heapAccess = module.lookupHeapAccess(pc); @@ -902,7 +900,7 @@ HandleMachException(JSRuntime* rt, const ExceptionRequest& request) if (!activation) return false; - const AsmJSModule& module = activation->module(); + const Module& module = activation->module().wasm(); if (!module.containsFunctionPC(pc)) return false; @@ -939,11 +937,11 @@ static const mach_msg_id_t sExceptionId = 2405; // The choice of id here is arbitrary, the only constraint is that sQuitId != sExceptionId. static const mach_msg_id_t sQuitId = 42; -void -AsmJSMachExceptionHandlerThread(void* threadArg) +static void +MachExceptionHandlerThread(void* threadArg) { JSRuntime* rt = reinterpret_cast(threadArg); - mach_port_t port = rt->asmJSMachExceptionHandler.port(); + mach_port_t port = rt->wasmMachExceptionHandler.port(); kern_return_t kret; while(true) { @@ -954,7 +952,7 @@ AsmJSMachExceptionHandlerThread(void* threadArg) // If we fail even receiving the message, we can't even send a reply! // Rather than hanging the faulting thread (hanging the browser), crash. if (kret != KERN_SUCCESS) { - fprintf(stderr, "AsmJSMachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret); + fprintf(stderr, "MachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret); MOZ_CRASH(); } @@ -992,14 +990,14 @@ AsmJSMachExceptionHandlerThread(void* threadArg) } } -AsmJSMachExceptionHandler::AsmJSMachExceptionHandler() +MachExceptionHandler::MachExceptionHandler() : installed_(false), thread_(nullptr), port_(MACH_PORT_NULL) {} void -AsmJSMachExceptionHandler::uninstall() +MachExceptionHandler::uninstall() { if (installed_) { thread_port_t thread = mach_thread_self(); @@ -1025,7 +1023,7 @@ AsmJSMachExceptionHandler::uninstall() kern_return_t kret = mach_msg(&msg, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); if (kret != KERN_SUCCESS) { - fprintf(stderr, "AsmJSMachExceptionHandler: failed to send quit message: %d\n", (int)kret); + fprintf(stderr, "MachExceptionHandler: failed to send quit message: %d\n", (int)kret); MOZ_CRASH(); } @@ -1041,7 +1039,7 @@ AsmJSMachExceptionHandler::uninstall() } bool -AsmJSMachExceptionHandler::install(JSRuntime* rt) +MachExceptionHandler::install(JSRuntime* rt) { MOZ_ASSERT(!installed()); kern_return_t kret; @@ -1056,7 +1054,7 @@ AsmJSMachExceptionHandler::install(JSRuntime* rt) goto error; // Create a thread to block on reading port_. - thread_ = PR_CreateThread(PR_USER_THREAD, AsmJSMachExceptionHandlerThread, rt, + thread_ = PR_CreateThread(PR_USER_THREAD, MachExceptionHandlerThread, rt, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 0); if (!thread_) goto error; @@ -1112,7 +1110,7 @@ HandleFault(int signum, siginfo_t* info, void* ctx) if (!activation) return false; - const AsmJSModule& module = activation->module(); + const Module& module = activation->module().wasm(); if (!module.containsFunctionPC(pc)) return false; @@ -1185,18 +1183,18 @@ RedirectJitCodeToInterruptCheck(JSRuntime* rt, CONTEXT* context) RedirectIonBackedgesToInterruptCheck(rt); if (AsmJSActivation* activation = rt->asmJSActivationStack()) { - const AsmJSModule& module = activation->module(); + const Module& module = activation->module().wasm(); #ifdef JS_SIMULATOR if (module.containsFunctionPC(rt->simulator()->get_pc_as())) - rt->simulator()->set_resume_pc(module.interruptExit()); + rt->simulator()->set_resume_pc(module.interrupt()); #endif uint8_t** ppc = ContextToPC(context); uint8_t* pc = *ppc; if (module.containsFunctionPC(pc)) { activation->setResumePC(pc); - *ppc = module.interruptExit(); + *ppc = module.interrupt(); return true; } } @@ -1223,11 +1221,11 @@ JitInterruptHandler(int signum, siginfo_t* info, void* context) #endif bool -js::EnsureSignalHandlersInstalled(JSRuntime* rt) +wasm::EnsureSignalHandlersInstalled(JSRuntime* rt) { #if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB) // On OSX, each JSRuntime gets its own handler thread. - if (!rt->asmJSMachExceptionHandler.installed() && !rt->asmJSMachExceptionHandler.install(rt)) + if (!rt->wasmMachExceptionHandler.installed() && !rt->wasmMachExceptionHandler.install(rt)) return false; #endif diff --git a/js/src/asmjs/AsmJSSignalHandlers.h b/js/src/asmjs/WasmSignalHandlers.h similarity index 86% rename from js/src/asmjs/AsmJSSignalHandlers.h rename to js/src/asmjs/WasmSignalHandlers.h index dc29ca3bd1f..f6ec0720be8 100644 --- a/js/src/asmjs/AsmJSSignalHandlers.h +++ b/js/src/asmjs/WasmSignalHandlers.h @@ -16,8 +16,8 @@ * limitations under the License. */ -#ifndef asmjs_AsmJSSignalHandlers_h -#define asmjs_AsmJSSignalHandlers_h +#ifndef wasm_signal_handlers_h +#define wasm_signal_handlers_h #if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB) # include @@ -28,6 +28,12 @@ struct JSRuntime; namespace js { +// Force any currently-executing asm.js/ion code to call HandleExecutionInterrupt. +extern void +InterruptRunningJitCode(JSRuntime* rt); + +namespace wasm { + // Set up any signal/exception handlers needed to execute code in the given // runtime. Return whether runtime can: // - rely on fault handler support for avoiding asm.js heap bounds checks @@ -35,10 +41,6 @@ namespace js { bool EnsureSignalHandlersInstalled(JSRuntime* rt); -// Force any currently-executing asm.js code to call HandleExecutionInterrupt. -extern void -InterruptRunningJitCode(JSRuntime* rt); - #if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB) // On OSX we are forced to use the lower-level Mach exception mechanism instead // of Unix signals. Mach exceptions are not handled on the victim's stack but @@ -46,7 +48,7 @@ InterruptRunningJitCode(JSRuntime* rt); // per JSRuntime (upon the first use of asm.js in the JSRuntime). This thread // and related resources are owned by AsmJSMachExceptionHandler which is owned // by JSRuntime. -class AsmJSMachExceptionHandler +class MachExceptionHandler { bool installed_; PRThread* thread_; @@ -55,14 +57,15 @@ class AsmJSMachExceptionHandler void uninstall(); public: - AsmJSMachExceptionHandler(); - ~AsmJSMachExceptionHandler() { uninstall(); } + MachExceptionHandler(); + ~MachExceptionHandler() { uninstall(); } mach_port_t port() const { return port_; } bool installed() const { return installed_; } bool install(JSRuntime* rt); }; #endif +} // namespace wasm } // namespace js -#endif // asmjs_AsmJSSignalHandlers_h +#endif // wasm_signal_handlers_h diff --git a/js/src/asmjs/WasmStubs.cpp b/js/src/asmjs/WasmStubs.cpp index 8247caf3829..336c60d4078 100644 --- a/js/src/asmjs/WasmStubs.cpp +++ b/js/src/asmjs/WasmStubs.cpp @@ -21,8 +21,6 @@ #include "mozilla/ArrayUtils.h" #include "mozilla/EnumeratedRange.h" -#include "asmjs/AsmJSModule.h" - #include "jit/MacroAssembler-inl.h" using namespace js; @@ -97,20 +95,18 @@ static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * siz static const unsigned FramePushedForEntrySP = FramePushedAfterSave + sizeof(void*); // Generate a stub that enters wasm from a C++ caller via the native ABI. -// The signature of the entry point is AsmJSModule::CodePtr. The exported wasm +// The signature of the entry point is Module::CodePtr. The exported wasm // function has an ABI derived from its specific signature, so this function // must map from the ABI of CodePtr to the export's signature's ABI. static bool -GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex, - const FuncOffsetVector& funcOffsets) +GenerateEntry(ModuleGenerator& mg, unsigned exportIndex, Module::HeapBool usesHeap) { - AsmJSModule::ExportedFunction& exp = module.exportedFunction(exportIndex); - if (exp.isChangeHeap()) - return true; + MacroAssembler& masm = mg.masm(); + const MallocSig& sig = mg.exportSig(exportIndex); masm.haltingAlign(CodeAlignment); - AsmJSOffsets offsets; + Offsets offsets; offsets.begin = masm.currentOffset(); // Save the return address if it wasn't already saved by the call insn. @@ -139,7 +135,8 @@ GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex, // ARM, MIPS/MIPS64 and x64 have a globally-pinned HeapReg (x86 uses immediates in // effective addresses). Loading the heap register depends on the global // register already having been loaded. - masm.loadAsmJSHeapRegisterFromGlobalData(); + if (usesHeap) + masm.loadAsmJSHeapRegisterFromGlobalData(); // Put the 'argv' argument into a non-argument/return register so that we // can use 'argv' while we fill in the arguments for the asm.js callee. @@ -168,12 +165,12 @@ GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex, masm.andToStackPtr(Imm32(~(AsmJSStackAlignment - 1))); // Bump the stack for the call. - masm.reserveStack(AlignBytes(StackArgBytes(exp.sig().args()), AsmJSStackAlignment)); + masm.reserveStack(AlignBytes(StackArgBytes(sig.args()), AsmJSStackAlignment)); // Copy parameters out of argv and into the registers/stack-slots specified by // the system ABI. - for (ABIArgValTypeIter iter(exp.sig().args()); !iter.done(); iter++) { - unsigned argOffset = iter.index() * sizeof(AsmJSModule::EntryArg); + for (ABIArgValTypeIter iter(sig.args()); !iter.done(); iter++) { + unsigned argOffset = iter.index() * sizeof(Module::EntryArg); Address src(argv, argOffset); MIRType type = iter.mirType(); switch (iter->kind()) { @@ -186,7 +183,7 @@ GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex, break; #endif case ABIArg::FPU: { - static_assert(sizeof(AsmJSModule::EntryArg) >= jit::Simd128DataSize, + static_assert(sizeof(Module::EntryArg) >= jit::Simd128DataSize, "EntryArg must be big enough to store SIMD values"); switch (type) { case MIRType_Int32x4: @@ -243,7 +240,7 @@ GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex, // Call into the real function. masm.assertStackAlignment(AsmJSStackAlignment); Label target; - target.bind(funcOffsets[exp.funcIndex()]); + target.bind(mg.funcEntryOffsets()[mg.exportFuncIndex(exportIndex)]); masm.call(CallSiteDesc(CallSiteDesc::Relative), &target); // Recover the stack pointer value before dynamic alignment. @@ -255,7 +252,7 @@ GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex, masm.Pop(argv); // Store the return value in argv[0] - switch (exp.sig().ret()) { + switch (sig.ret()) { case ExprType::Void: break; case ExprType::I32: @@ -291,117 +288,8 @@ GenerateEntry(MacroAssembler& masm, AsmJSModule& module, unsigned exportIndex, if (masm.oom()) return false; - exp.initCodeOffset(offsets.begin); offsets.end = masm.currentOffset(); - return module.addCodeRange(AsmJSModule::CodeRange::Entry, offsets); -} - -// Generate a thunk that updates fp before calling the given builtin so that -// both the builtin and the calling function show up in profiler stacks. (This -// thunk is dynamically patched in when profiling is enabled.) Since the thunk -// pushes an AsmJSFrame on the stack, that means we must rebuild the stack -// frame. Fortunately, these are low arity functions and everything is passed in -// regs on everything but x86 anyhow. -// -// NB: Since this thunk is being injected at system ABI callsites, it must -// preserve the argument registers (going in) and the return register -// (coming out) and preserve non-volatile registers. -static bool -GenerateBuiltinThunk(MacroAssembler& masm, AsmJSModule& module, Builtin builtin) -{ - MIRTypeVector args; - switch (builtin) { - case Builtin::ToInt32: - MOZ_ALWAYS_TRUE(args.append(MIRType_Int32)); - break; -#if defined(JS_CODEGEN_ARM) - case Builtin::aeabi_idivmod: - case Builtin::aeabi_uidivmod: - MOZ_ALWAYS_TRUE(args.append(MIRType_Int32)); - MOZ_ALWAYS_TRUE(args.append(MIRType_Int32)); - break; - case Builtin::AtomicCmpXchg: - MOZ_ALWAYS_TRUE(args.append(MIRType_Int32)); - MOZ_ALWAYS_TRUE(args.append(MIRType_Int32)); - MOZ_ALWAYS_TRUE(args.append(MIRType_Int32)); - MOZ_ALWAYS_TRUE(args.append(MIRType_Int32)); - break; - case Builtin::AtomicXchg: - case Builtin::AtomicFetchAdd: - case Builtin::AtomicFetchSub: - case Builtin::AtomicFetchAnd: - case Builtin::AtomicFetchOr: - case Builtin::AtomicFetchXor: - MOZ_ALWAYS_TRUE(args.append(MIRType_Int32)); - MOZ_ALWAYS_TRUE(args.append(MIRType_Int32)); - MOZ_ALWAYS_TRUE(args.append(MIRType_Int32)); - break; -#endif - case Builtin::SinD: - case Builtin::CosD: - case Builtin::TanD: - case Builtin::ASinD: - case Builtin::ACosD: - case Builtin::ATanD: - case Builtin::CeilD: - case Builtin::FloorD: - case Builtin::ExpD: - case Builtin::LogD: - MOZ_ALWAYS_TRUE(args.append(MIRType_Double)); - break; - case Builtin::ModD: - case Builtin::PowD: - case Builtin::ATan2D: - MOZ_ALWAYS_TRUE(args.append(MIRType_Double)); - MOZ_ALWAYS_TRUE(args.append(MIRType_Double)); - break; - case Builtin::CeilF: - case Builtin::FloorF: - MOZ_ALWAYS_TRUE(args.append(MIRType_Float32)); - break; - case Builtin::Limit: - MOZ_CRASH("Bad builtin"); - } - - MOZ_ASSERT(args.length() <= 4); - static_assert(MIRTypeVector::InlineLength >= 4, "infallibility of append"); - - MOZ_ASSERT(masm.framePushed() == 0); - uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, args); - - AsmJSProfilingOffsets offsets; - GenerateAsmJSExitPrologue(masm, framePushed, ExitReason(builtin), &offsets); - - for (ABIArgMIRTypeIter i(args); !i.done(); i++) { - if (i->kind() != ABIArg::Stack) - continue; -#if !defined(JS_CODEGEN_ARM) - unsigned offsetToCallerStackArgs = sizeof(AsmJSFrame) + masm.framePushed(); - Address srcAddr(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase()); - Address dstAddr(masm.getStackPointer(), i->offsetFromArgBase()); - if (i.mirType() == MIRType_Int32 || i.mirType() == MIRType_Float32) { - masm.load32(srcAddr, ABIArgGenerator::NonArg_VolatileReg); - masm.store32(ABIArgGenerator::NonArg_VolatileReg, dstAddr); - } else { - MOZ_ASSERT(i.mirType() == MIRType_Double); - masm.loadDouble(srcAddr, ScratchDoubleReg); - masm.storeDouble(ScratchDoubleReg, dstAddr); - } -#else - MOZ_CRASH("Architecture should have enough registers for all builtin calls"); -#endif - } - - AssertStackAlignment(masm, ABIStackAlignment); - masm.call(BuiltinToImmediate(builtin)); - - GenerateAsmJSExitEpilogue(masm, framePushed, ExitReason(builtin), &offsets); - - if (masm.oom()) - return false; - - offsets.end = masm.currentOffset(); - return module.addBuiltinThunkCodeRange(builtin, offsets); + return mg.defineExport(exportIndex, offsets); } static void @@ -444,17 +332,13 @@ FillArgumentArray(MacroAssembler& masm, const MallocSig::ArgVector& args, unsign } } -// If an FFI detaches its heap (viz., via ArrayBuffer.transfer), it must +// If an import call detaches its heap (viz., via ArrayBuffer.transfer), it must // call change-heap to another heap (viz., the new heap returned by transfer) // before returning to asm.js code. If the application fails to do this (if the // heap pointer is null), jump to a stub. static void -CheckForHeapDetachment(MacroAssembler& masm, const AsmJSModule& module, Register scratch, - Label* onDetached) +CheckForHeapDetachment(MacroAssembler& masm, Register scratch, Label* onDetached) { - if (!module.hasArrayView()) - return; - MOZ_ASSERT(int(masm.framePushed()) >= int(ShadowStackSpace)); AssertStackAlignment(masm, ABIStackAlignment); #if defined(JS_CODEGEN_X86) @@ -467,18 +351,19 @@ CheckForHeapDetachment(MacroAssembler& masm, const AsmJSModule& module, Register } // Generate a stub that is called via the internal ABI derived from the -// signature of the exit and calls into an appropriate InvokeFromAsmJS_* C++ +// signature of the import and calls into an appropriate InvokeImport C++ // function, having boxed all the ABI arguments into a homogeneous Value array. static bool -GenerateInterpExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex, - Label* throwLabel, Label* onDetached) +GenerateInterpExitStub(ModuleGenerator& mg, unsigned importIndex, Module::HeapBool usesHeap, + Label* throwLabel, Label* onDetached, ProfilingOffsets* offsets) { - AsmJSModule::Exit& exit = module.exit(exitIndex); + MacroAssembler& masm = mg.masm(); + const MallocSig& sig = mg.importSig(importIndex); masm.setFramePushed(0); - // Argument types for InvokeFromAsmJS_*: - static const MIRType typeArray[] = { MIRType_Pointer, // exitDatum + // Argument types for InvokeImport_*: + static const MIRType typeArray[] = { MIRType_Pointer, // ImportExit MIRType_Int32, // argc MIRType_Pointer }; // argv MIRTypeVector invokeArgTypes; @@ -489,29 +374,28 @@ GenerateInterpExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex // The padding between stack args and argv ensures that argv is aligned. The // padding between argv and retaddr ensures that sp is aligned. unsigned argOffset = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double)); - unsigned argBytes = Max(1, exit.sig().args().length()) * sizeof(Value); + unsigned argBytes = Max(1, sig.args().length()) * sizeof(Value); unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes); - AsmJSProfilingOffsets offsets; - GenerateAsmJSExitPrologue(masm, framePushed, ExitReason::Slow, &offsets); + GenerateExitPrologue(masm, framePushed, ExitReason::ImportInterp, offsets); // Fill the argument array. unsigned offsetToCallerStackArgs = sizeof(AsmJSFrame) + masm.framePushed(); Register scratch = ABIArgGenerator::NonArgReturnReg0; - FillArgumentArray(masm, exit.sig().args(), argOffset, offsetToCallerStackArgs, scratch); + FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch); - // Prepare the arguments for the call to InvokeFromAsmJS_*. + // Prepare the arguments for the call to InvokeImport_*. ABIArgMIRTypeIter i(invokeArgTypes); - // argument 0: exitIndex + // argument 0: importIndex if (i->kind() == ABIArg::GPR) - masm.mov(ImmWord(exitIndex), i->gpr()); + masm.mov(ImmWord(importIndex), i->gpr()); else - masm.store32(Imm32(exitIndex), Address(masm.getStackPointer(), i->offsetFromArgBase())); + masm.store32(Imm32(importIndex), Address(masm.getStackPointer(), i->offsetFromArgBase())); i++; // argument 1: argc - unsigned argc = exit.sig().args().length(); + unsigned argc = sig.args().length(); if (i->kind() == ABIArg::GPR) masm.mov(ImmWord(argc), i->gpr()); else @@ -531,13 +415,13 @@ GenerateInterpExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex // Make the call, test whether it succeeded, and extract the return value. AssertStackAlignment(masm, ABIStackAlignment); - switch (exit.sig().ret()) { + switch (sig.ret()) { case ExprType::Void: - masm.call(SymbolicAddress::InvokeFromAsmJS_Ignore); + masm.call(SymbolicAddress::InvokeImport_Void); masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); break; case ExprType::I32: - masm.call(SymbolicAddress::InvokeFromAsmJS_ToInt32); + masm.call(SymbolicAddress::InvokeImport_I32); masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); masm.unboxInt32(argv, ReturnReg); break; @@ -546,7 +430,7 @@ GenerateInterpExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex case ExprType::F32: MOZ_CRASH("Float32 shouldn't be returned from a FFI"); case ExprType::F64: - masm.call(SymbolicAddress::InvokeFromAsmJS_ToNumber); + masm.call(SymbolicAddress::InvokeImport_F64); masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); masm.loadDouble(argv, ReturnDoubleReg); break; @@ -558,17 +442,18 @@ GenerateInterpExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex // The heap pointer may have changed during the FFI, so reload it and test // for detachment. - masm.loadAsmJSHeapRegisterFromGlobalData(); - CheckForHeapDetachment(masm, module, ABIArgGenerator::NonReturn_VolatileReg0, onDetached); + if (usesHeap) { + masm.loadAsmJSHeapRegisterFromGlobalData(); + CheckForHeapDetachment(masm, ABIArgGenerator::NonReturn_VolatileReg0, onDetached); + } - GenerateAsmJSExitEpilogue(masm, framePushed, ExitReason::Slow, &offsets); + GenerateExitEpilogue(masm, framePushed, ExitReason::ImportInterp, offsets); if (masm.oom()) return false; - offsets.end = masm.currentOffset(); - exit.initInterpOffset(offsets.begin); - return module.addCodeRange(AsmJSModule::CodeRange::SlowFFI, offsets); + offsets->end = masm.currentOffset(); + return true; } #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) @@ -578,34 +463,35 @@ static const unsigned MaybeSavedGlobalReg = 0; #endif // Generate a stub that is called via the internal ABI derived from the -// signature of the exit and calls into a compatible Ion-compiled JIT function, -// having boxed all the ABI arguments into the Ion stack frame layout. +// signature of the import and calls into a compatible JIT function, +// having boxed all the ABI arguments into the JIT stack frame layout. static bool -GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex, - Label* throwLabel, Label* onDetached) +GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, Module::HeapBool usesHeap, + Label* throwLabel, Label* onDetached, ProfilingOffsets* offsets) { - AsmJSModule::Exit& exit = module.exit(exitIndex); + MacroAssembler& masm = mg.masm(); + const MallocSig& sig = mg.importSig(importIndex); masm.setFramePushed(0); - // Ion calls use the following stack layout (sp grows to the left): + // JIT calls use the following stack layout (sp grows to the left): // | retaddr | descriptor | callee | argc | this | arg1..N | - // After the Ion frame, the global register (if present) is saved since Ion - // does not preserve non-volatile regs. Also, unlike most ABIs, Ion requires - // that sp be JitStackAlignment-aligned *after* pushing the return address. + // After the JIT frame, the global register (if present) is saved since the + // JIT's ABI does not preserve non-volatile regs. Also, unlike most ABIs, + // the JIT ABI requires that sp be JitStackAlignment-aligned *after* pushing + // the return address. static_assert(AsmJSStackAlignment >= JitStackAlignment, "subsumes"); unsigned sizeOfRetAddr = sizeof(void*); - unsigned ionFrameBytes = 3 * sizeof(void*) + (1 + exit.sig().args().length()) * sizeof(Value); - unsigned totalIonBytes = sizeOfRetAddr + ionFrameBytes + MaybeSavedGlobalReg; - unsigned ionFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalIonBytes) - + unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + sig.args().length()) * sizeof(Value); + unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes + MaybeSavedGlobalReg; + unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) - sizeOfRetAddr; - AsmJSProfilingOffsets offsets; - GenerateAsmJSExitPrologue(masm, ionFramePushed, ExitReason::Jit, &offsets); + GenerateExitPrologue(masm, jitFramePushed, ExitReason::ImportJit, offsets); // 1. Descriptor size_t argOffset = 0; - uint32_t descriptor = MakeFrameDescriptor(ionFramePushed, JitFrame_Entry); + uint32_t descriptor = MakeFrameDescriptor(jitFramePushed, JitFrame_Entry); masm.storePtr(ImmWord(uintptr_t(descriptor)), Address(masm.getStackPointer(), argOffset)); argOffset += sizeof(size_t); @@ -614,17 +500,18 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex, Register scratch = ABIArgGenerator::NonArgReturnReg1; // repeatedly clobbered // 2.1. Get ExitDatum - unsigned globalDataOffset = module.exit(exitIndex).globalDataOffset(); + unsigned globalDataOffset = mg.importExitGlobalDataOffset(importIndex); #if defined(JS_CODEGEN_X64) masm.append(AsmJSGlobalAccess(masm.leaRipRelative(callee), globalDataOffset)); #elif defined(JS_CODEGEN_X86) masm.append(AsmJSGlobalAccess(masm.movlWithPatch(Imm32(0), callee), globalDataOffset)); -#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) +#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ + defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) masm.computeEffectiveAddress(Address(GlobalReg, globalDataOffset - AsmJSGlobalRegBias), callee); #endif // 2.2. Get callee - masm.loadPtr(Address(callee, offsetof(AsmJSModule::ExitDatum, fun)), callee); + masm.loadPtr(Address(callee, Module::OffsetOfImportExitFun), callee); // 2.3. Save callee masm.storePtr(callee, Address(masm.getStackPointer(), argOffset)); @@ -635,7 +522,7 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex, masm.loadBaselineOrIonNoArgCheck(callee, callee, nullptr); // 3. Argc - unsigned argc = exit.sig().args().length(); + unsigned argc = sig.args().length(); masm.storePtr(ImmWord(uintptr_t(argc)), Address(masm.getStackPointer(), argOffset)); argOffset += sizeof(size_t); @@ -644,10 +531,10 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex, argOffset += sizeof(Value); // 5. Fill the arguments - unsigned offsetToCallerStackArgs = ionFramePushed + sizeof(AsmJSFrame); - FillArgumentArray(masm, exit.sig().args(), argOffset, offsetToCallerStackArgs, scratch); - argOffset += exit.sig().args().length() * sizeof(Value); - MOZ_ASSERT(argOffset == ionFrameBytes); + unsigned offsetToCallerStackArgs = jitFramePushed + sizeof(AsmJSFrame); + FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch); + argOffset += sig.args().length() * sizeof(Value); + MOZ_ASSERT(argOffset == jitFrameBytes); // 6. Jit code will clobber all registers, even non-volatiles. GlobalReg and // HeapReg are removed from the general register set for asm.js code, so @@ -657,7 +544,7 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex, // heap may change during the FFI call. #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) static_assert(MaybeSavedGlobalReg == sizeof(void*), "stack frame accounting"); - masm.storePtr(GlobalReg, Address(masm.getStackPointer(), ionFrameBytes)); + masm.storePtr(GlobalReg, Address(masm.getStackPointer(), jitFrameBytes)); #endif { @@ -770,13 +657,13 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex, masm.storePtr(reg2, Address(reg0, offsetOfJitActivation)); } - // Reload the global register since Ion code can clobber any register. + // Reload the global register since JIT code can clobber any register. #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) static_assert(MaybeSavedGlobalReg == sizeof(void*), "stack frame accounting"); - masm.loadPtr(Address(masm.getStackPointer(), ionFrameBytes), GlobalReg); + masm.loadPtr(Address(masm.getStackPointer(), jitFrameBytes), GlobalReg); #endif - // As explained above, the frame was aligned for Ion such that + // As explained above, the frame was aligned for the JIT ABI such that // (sp + sizeof(void*)) % JitStackAlignment == 0 // But now we possibly want to call one of several different C++ functions, // so subtract the sizeof(void*) so that sp is aligned for an ABI call. @@ -788,7 +675,7 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex, masm.branchTestMagic(Assembler::Equal, JSReturnOperand, throwLabel); Label oolConvert; - switch (exit.sig().ret()) { + switch (sig.ret()) { case ExprType::Void: break; case ExprType::I32: @@ -798,25 +685,27 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex, case ExprType::I64: MOZ_CRASH("no int64 in asm.js"); case ExprType::F32: - MOZ_CRASH("Float shouldn't be returned from a FFI"); + MOZ_CRASH("Float shouldn't be returned from an import"); case ExprType::F64: masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert); break; case ExprType::I32x4: case ExprType::F32x4: case ExprType::B32x4: - MOZ_CRASH("SIMD types shouldn't be returned from a FFI"); + MOZ_CRASH("SIMD types shouldn't be returned from an import"); } Label done; masm.bind(&done); - // The heap pointer has to be reloaded anyway since Ion could have clobbered - // it. Additionally, the FFI may have detached the heap buffer. - masm.loadAsmJSHeapRegisterFromGlobalData(); - CheckForHeapDetachment(masm, module, ABIArgGenerator::NonReturn_VolatileReg0, onDetached); + // The heap pointer has to be reloaded anyway since JIT code could have + // clobbered it. Additionally, the import may have detached the heap buffer. + if (usesHeap) { + masm.loadAsmJSHeapRegisterFromGlobalData(); + CheckForHeapDetachment(masm, ABIArgGenerator::NonReturn_VolatileReg0, onDetached); + } - GenerateAsmJSExitEpilogue(masm, masm.framePushed(), ExitReason::Jit, &offsets); + GenerateExitEpilogue(masm, masm.framePushed(), ExitReason::ImportJit, offsets); if (oolConvert.used()) { masm.bind(&oolConvert); @@ -847,7 +736,7 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex, // Call coercion function AssertStackAlignment(masm, ABIStackAlignment); - switch (exit.sig().ret()) { + switch (sig.ret()) { case ExprType::I32: masm.call(SymbolicAddress::CoerceInPlace_ToInt32); masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); @@ -871,9 +760,8 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex, if (masm.oom()) return false; - offsets.end = masm.currentOffset(); - exit.initJitOffset(offsets.begin); - return module.addCodeRange(AsmJSModule::CodeRange::JitFFI, offsets); + offsets->end = masm.currentOffset(); + return true; } // Generate a stub that is called when returning from an exit where the module's @@ -881,11 +769,12 @@ GenerateIonExit(MacroAssembler& masm, AsmJSModule& module, unsigned exitIndex, // exception and then jumps to the generic throw stub to pop everything off the // stack. static bool -GenerateOnDetachedExit(MacroAssembler& masm, AsmJSModule& module, Label* onDetached, - Label* throwLabel) +GenerateOnDetachedStub(ModuleGenerator& mg, Label* onDetached, Label* throwLabel) { + MacroAssembler& masm = mg.masm(); + masm.haltingAlign(CodeAlignment); - AsmJSOffsets offsets; + Offsets offsets; offsets.begin = masm.currentOffset(); masm.bind(onDetached); @@ -898,17 +787,19 @@ GenerateOnDetachedExit(MacroAssembler& masm, AsmJSModule& module, Label* onDetac return false; offsets.end = masm.currentOffset(); - return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets); + return mg.defineInlineStub(offsets); } // Generate a stub that is called immediately after the prologue when there is a // stack overflow. This stub calls a C++ function to report the error and then // jumps to the throw stub to pop the activation. static bool -GenerateStackOverflowExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel) +GenerateStackOverflowStub(ModuleGenerator& mg, Label* throwLabel) { + MacroAssembler& masm = mg.masm(); + masm.haltingAlign(CodeAlignment); - AsmJSOffsets offsets; + Offsets offsets; offsets.begin = masm.currentOffset(); masm.bind(masm.asmStackOverflowLabel()); @@ -935,43 +826,47 @@ GenerateStackOverflowExit(MacroAssembler& masm, AsmJSModule& module, Label* thro return false; offsets.end = masm.currentOffset(); - return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets); + return mg.defineInlineStub(offsets); } // Generate a stub that is called from the synchronous, inline interrupt checks // when the interrupt flag is set. This stub calls the C++ function to handle // the interrupt which returns whether execution has been interrupted. static bool -GenerateSyncInterruptExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel) +GenerateSyncInterruptStub(ModuleGenerator& mg, Label* throwLabel) { + MacroAssembler& masm = mg.masm(); + masm.setFramePushed(0); unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, ShadowStackSpace); - AsmJSProfilingOffsets offsets; - GenerateAsmJSExitPrologue(masm, framePushed, ExitReason::Interrupt, &offsets, - masm.asmSyncInterruptLabel()); + ProfilingOffsets offsets; + GenerateExitPrologue(masm, framePushed, ExitReason::Native, &offsets, + masm.asmSyncInterruptLabel()); AssertStackAlignment(masm, ABIStackAlignment); masm.call(SymbolicAddress::HandleExecutionInterrupt); masm.branchIfFalseBool(ReturnReg, throwLabel); - GenerateAsmJSExitEpilogue(masm, framePushed, ExitReason::Interrupt, &offsets); + GenerateExitEpilogue(masm, framePushed, ExitReason::Native, &offsets); if (masm.oom()) return false; offsets.end = masm.currentOffset(); - return module.addCodeRange(AsmJSModule::CodeRange::Interrupt, offsets); + return mg.defineSyncInterruptStub(offsets); } // Generate a stub that is jumped to from an out-of-bounds heap access when // there are throwing semantics. This stub calls a C++ function to report an // error and then jumps to the throw stub to pop the activation. static bool -GenerateConversionErrorExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel) +GenerateConversionErrorStub(ModuleGenerator& mg, Label* throwLabel) { + MacroAssembler& masm = mg.masm(); + masm.haltingAlign(CodeAlignment); - AsmJSOffsets offsets; + Offsets offsets; offsets.begin = masm.currentOffset(); masm.bind(masm.asmOnConversionErrorLabel()); @@ -979,7 +874,7 @@ GenerateConversionErrorExit(MacroAssembler& masm, AsmJSModule& module, Label* th // into C++. We unconditionally jump to throw so don't worry about restoring sp. masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1))); - // OnOutOfBounds always throws. + // OnImpreciseConversion always throws. masm.assertStackAlignment(ABIStackAlignment); masm.call(SymbolicAddress::OnImpreciseConversion); masm.jump(throwLabel); @@ -988,18 +883,19 @@ GenerateConversionErrorExit(MacroAssembler& masm, AsmJSModule& module, Label* th return false; offsets.end = masm.currentOffset(); - module.setOnOutOfBoundsExitOffset(offsets.begin); - return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets); + return mg.defineInlineStub(offsets); } // Generate a stub that is jumped to from an out-of-bounds heap access when // there are throwing semantics. This stub calls a C++ function to report an // error and then jumps to the throw stub to pop the activation. static bool -GenerateOutOfBoundsExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel) +GenerateOutOfBoundsStub(ModuleGenerator& mg, Label* throwLabel) { + MacroAssembler& masm = mg.masm(); + masm.haltingAlign(CodeAlignment); - AsmJSOffsets offsets; + Offsets offsets; offsets.begin = masm.currentOffset(); masm.bind(masm.asmOnOutOfBoundsLabel()); @@ -1016,8 +912,7 @@ GenerateOutOfBoundsExit(MacroAssembler& masm, AsmJSModule& module, Label* throwL return false; offsets.end = masm.currentOffset(); - module.setOnOutOfBoundsExitOffset(offsets.begin); - return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets); + return mg.defineOutOfBoundsStub(offsets); } static const LiveRegisterSet AllRegsExceptSP( @@ -1034,10 +929,12 @@ static const LiveRegisterSet AllRegsExceptSP( // after restoring all registers. To hack around this, push the resumePC on the // stack so that it can be popped directly into PC. static bool -GenerateAsyncInterruptExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel) +GenerateAsyncInterruptStub(ModuleGenerator& mg, Module::HeapBool usesHeap, Label* throwLabel) { + MacroAssembler& masm = mg.masm(); + masm.haltingAlign(CodeAlignment); - AsmJSOffsets offsets; + Offsets offsets; offsets.begin = masm.currentOffset(); #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) @@ -1187,8 +1084,7 @@ GenerateAsyncInterruptExit(MacroAssembler& masm, AsmJSModule& module, Label* thr return false; offsets.end = masm.currentOffset(); - module.setAsyncInterruptOffset(offsets.begin); - return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets); + return mg.defineAsyncInterruptStub(offsets); } // If an exception is thrown, simply pop all frames (since asm.js does not @@ -1197,10 +1093,12 @@ GenerateAsyncInterruptExit(MacroAssembler& masm, AsmJSModule& module, Label* thr // 2. PopRegsInMask to restore the caller's non-volatile registers. // 3. Return (to CallAsmJS). static bool -GenerateThrowStub(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel) +GenerateThrowStub(ModuleGenerator& mg, Label* throwLabel) { + MacroAssembler& masm = mg.masm(); + masm.haltingAlign(CodeAlignment); - AsmJSOffsets offsets; + Offsets offsets; offsets.begin = masm.currentOffset(); masm.bind(throwLabel); @@ -1224,19 +1122,14 @@ GenerateThrowStub(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel) return false; offsets.end = masm.currentOffset(); - return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets); + return mg.defineInlineStub(offsets); } bool -wasm::GenerateStubs(MacroAssembler& masm, AsmJSModule& module, const FuncOffsetVector& funcOffsets) +wasm::GenerateStubs(ModuleGenerator& mg, Module::HeapBool usesHeap) { - for (unsigned i = 0; i < module.numExportedFunctions(); i++) { - if (!GenerateEntry(masm, module, i, funcOffsets)) - return false; - } - - for (auto builtin : MakeEnumeratedRange(Builtin::Limit)) { - if (!GenerateBuiltinThunk(masm, module, builtin)) + for (unsigned i = 0; i < mg.numDeclaredExports(); i++) { + if (!GenerateEntry(mg, i, usesHeap)) return false; } @@ -1245,45 +1138,51 @@ wasm::GenerateStubs(MacroAssembler& masm, AsmJSModule& module, const FuncOffsetV { Label onDetached; - for (size_t i = 0; i < module.numExits(); i++) { - if (!GenerateInterpExit(masm, module, i, &onThrow, &onDetached)) + for (size_t i = 0; i < mg.numDeclaredImports(); i++) { + ProfilingOffsets interp; + if (!GenerateInterpExitStub(mg, i, usesHeap, &onThrow, &onDetached, &interp)) return false; - if (!GenerateIonExit(masm, module, i, &onThrow, &onDetached)) + + ProfilingOffsets jit; + if (!GenerateJitExitStub(mg, i, usesHeap, &onThrow, &onDetached, &jit)) + return false; + + if (!mg.defineImport(i, interp, jit)) return false; } if (onDetached.used()) { - if (!GenerateOnDetachedExit(masm, module, &onDetached, &onThrow)) + if (!GenerateOnDetachedStub(mg, &onDetached, &onThrow)) return false; } } - if (masm.asmStackOverflowLabel()->used()) { - if (!GenerateStackOverflowExit(masm, module, &onThrow)) + if (mg.masm().asmStackOverflowLabel()->used()) { + if (!GenerateStackOverflowStub(mg, &onThrow)) return false; } - if (masm.asmSyncInterruptLabel()->used()) { - if (!GenerateSyncInterruptExit(masm, module, &onThrow)) + if (mg.masm().asmSyncInterruptLabel()->used()) { + if (!GenerateSyncInterruptStub(mg, &onThrow)) return false; } - if (masm.asmOnConversionErrorLabel()->used()) { - if (!GenerateConversionErrorExit(masm, module, &onThrow)) + if (mg.masm().asmOnConversionErrorLabel()->used()) { + if (!GenerateConversionErrorStub(mg, &onThrow)) return false; } // Generate unconditionally: the out-of-bounds exit may be used later even // if signal handling isn't used for out-of-bounds at the moment. - if (!GenerateOutOfBoundsExit(masm, module, &onThrow)) + if (!GenerateOutOfBoundsStub(mg, &onThrow)) return false; // Generate unconditionally: the async interrupt may be taken at any time. - if (!GenerateAsyncInterruptExit(masm, module, &onThrow)) + if (!GenerateAsyncInterruptStub(mg, usesHeap, &onThrow)) return false; if (onThrow.used()) { - if (!GenerateThrowStub(masm, module, &onThrow)) + if (!GenerateThrowStub(mg, &onThrow)) return false; } diff --git a/js/src/asmjs/WasmStubs.h b/js/src/asmjs/WasmStubs.h index 97b9a1c6509..ae947f49647 100644 --- a/js/src/asmjs/WasmStubs.h +++ b/js/src/asmjs/WasmStubs.h @@ -16,23 +16,18 @@ * limitations under the License. */ -#ifndef asmjs_wasm_stubs_h -#define asmjs_wasm_stubs_h +#ifndef wasm_stubs_h +#define wasm_stubs_h -#include "asmjs/Wasm.h" +#include "asmjs/WasmGenerator.h" namespace js { - -class AsmJSModule; -namespace jit { class MacroAssembler; } - namespace wasm { -typedef Vector FuncOffsetVector; - bool -GenerateStubs(jit::MacroAssembler& masm, AsmJSModule& module, const FuncOffsetVector& funcOffsets); +GenerateStubs(ModuleGenerator& mg, Module::HeapBool usesHeap); } // namespace wasm } // namespace js -#endif // asmjs_wasm_stubs_h + +#endif // wasm_stubs_h diff --git a/js/src/asmjs/WasmTypes.cpp b/js/src/asmjs/WasmTypes.cpp new file mode 100644 index 00000000000..c8abd8ad902 --- /dev/null +++ b/js/src/asmjs/WasmTypes.cpp @@ -0,0 +1,292 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * + * Copyright 2015 Mozilla Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asmjs/WasmTypes.h" + +#include "jslibmath.h" +#include "jsmath.h" + +#include "asmjs/AsmJSModule.h" +#include "js/Conversions.h" +#include "vm/Interpreter.h" + +#include "vm/Stack-inl.h" + +using namespace js; +using namespace js::jit; +using namespace js::wasm; + +#if defined(JS_CODEGEN_ARM) +extern "C" { + +extern MOZ_EXPORT int64_t +__aeabi_idivmod(int, int); + +extern MOZ_EXPORT int64_t +__aeabi_uidivmod(int, int); + +} +#endif + +namespace js { +namespace wasm { + +void +ReportOverRecursed() +{ + JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx(); + ReportOverRecursed(cx); +} + +bool +HandleExecutionInterrupt() +{ + AsmJSActivation* act = JSRuntime::innermostAsmJSActivation(); + act->module().wasm().setInterrupted(true); + bool ret = CheckForInterrupt(act->cx()); + act->module().wasm().setInterrupted(false); + return ret; +} + +} // namespace wasm +} // namespace js + +static void +OnDetached() +{ + // See hasDetachedHeap comment in LinkAsmJS. + JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx(); + JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_OUT_OF_MEMORY); +} + +static void +OnOutOfBounds() +{ + JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx(); + JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX); +} + +static void +OnImpreciseConversion() +{ + JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx(); + JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_SIMD_FAILED_CONVERSION); +} + +static int32_t +CoerceInPlace_ToInt32(MutableHandleValue val) +{ + JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx(); + + int32_t i32; + if (!ToInt32(cx, val, &i32)) + return false; + val.set(Int32Value(i32)); + + return true; +} + +static int32_t +CoerceInPlace_ToNumber(MutableHandleValue val) +{ + JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx(); + + double dbl; + if (!ToNumber(cx, val, &dbl)) + return false; + val.set(DoubleValue(dbl)); + + return true; +} + +// Use an int32_t return type instead of bool since bool does not have a +// specified width and the caller is assuming a word-sized return. +static int32_t +InvokeImport_Void(int32_t importIndex, int32_t argc, Value* argv) +{ + AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation(); + JSContext* cx = activation->cx(); + Module& module = activation->module().wasm(); + + RootedValue rval(cx); + return module.callImport(cx, importIndex, argc, argv, &rval); +} + +// Use an int32_t return type instead of bool since bool does not have a +// specified width and the caller is assuming a word-sized return. +static int32_t +InvokeImport_I32(int32_t importIndex, int32_t argc, Value* argv) +{ + AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation(); + JSContext* cx = activation->cx(); + Module& module = activation->module().wasm(); + + RootedValue rval(cx); + if (!module.callImport(cx, importIndex, argc, argv, &rval)) + return false; + + int32_t i32; + if (!ToInt32(cx, rval, &i32)) + return false; + + argv[0] = Int32Value(i32); + return true; +} + +// Use an int32_t return type instead of bool since bool does not have a +// specified width and the caller is assuming a word-sized return. +static int32_t +InvokeImport_F64(int32_t importIndex, int32_t argc, Value* argv) +{ + AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation(); + JSContext* cx = activation->cx(); + Module& module = activation->module().wasm(); + + RootedValue rval(cx); + if (!module.callImport(cx, importIndex, argc, argv, &rval)) + return false; + + double dbl; + if (!ToNumber(cx, rval, &dbl)) + return false; + + argv[0] = DoubleValue(dbl); + return true; +} + +template +static inline void* +FuncCast(F* pf, ABIFunctionType type) +{ + void *pv = JS_FUNC_TO_DATA_PTR(void*, pf); +#ifdef JS_SIMULATOR + pv = Simulator::RedirectNativeFunction(pv, type); +#endif + return pv; +} + +void* +wasm::AddressOf(SymbolicAddress imm, ExclusiveContext* cx) +{ + switch (imm) { + case SymbolicAddress::Runtime: + return cx->runtimeAddressForJit(); + case SymbolicAddress::RuntimeInterruptUint32: + return cx->runtimeAddressOfInterruptUint32(); + case SymbolicAddress::StackLimit: + return cx->stackLimitAddressForJitCode(StackForUntrustedScript); + case SymbolicAddress::ReportOverRecursed: + return FuncCast(wasm::ReportOverRecursed, Args_General0); + case SymbolicAddress::OnDetached: + return FuncCast(OnDetached, Args_General0); + case SymbolicAddress::OnOutOfBounds: + return FuncCast(OnOutOfBounds, Args_General0); + case SymbolicAddress::OnImpreciseConversion: + return FuncCast(OnImpreciseConversion, Args_General0); + case SymbolicAddress::HandleExecutionInterrupt: + return FuncCast(wasm::HandleExecutionInterrupt, Args_General0); + case SymbolicAddress::InvokeImport_Void: + return FuncCast(InvokeImport_Void, Args_General3); + case SymbolicAddress::InvokeImport_I32: + return FuncCast(InvokeImport_I32, Args_General3); + case SymbolicAddress::InvokeImport_F64: + return FuncCast(InvokeImport_F64, Args_General3); + case SymbolicAddress::CoerceInPlace_ToInt32: + return FuncCast(CoerceInPlace_ToInt32, Args_General1); + case SymbolicAddress::CoerceInPlace_ToNumber: + return FuncCast(CoerceInPlace_ToNumber, Args_General1); + case SymbolicAddress::ToInt32: + return FuncCast(JS::ToInt32, Args_Int_Double); +#if defined(JS_CODEGEN_ARM) + case SymbolicAddress::aeabi_idivmod: + return FuncCast(__aeabi_idivmod, Args_General2); + case SymbolicAddress::aeabi_uidivmod: + return FuncCast(__aeabi_uidivmod, Args_General2); + case SymbolicAddress::AtomicCmpXchg: + return FuncCast(js::atomics_cmpxchg_asm_callout, Args_General4); + case SymbolicAddress::AtomicXchg: + return FuncCast(js::atomics_xchg_asm_callout, Args_General3); + case SymbolicAddress::AtomicFetchAdd: + return FuncCast(js::atomics_add_asm_callout, Args_General3); + case SymbolicAddress::AtomicFetchSub: + return FuncCast(js::atomics_sub_asm_callout, Args_General3); + case SymbolicAddress::AtomicFetchAnd: + return FuncCast(js::atomics_and_asm_callout, Args_General3); + case SymbolicAddress::AtomicFetchOr: + return FuncCast(js::atomics_or_asm_callout, Args_General3); + case SymbolicAddress::AtomicFetchXor: + return FuncCast(js::atomics_xor_asm_callout, Args_General3); +#endif + case SymbolicAddress::ModD: + return FuncCast(NumberMod, Args_Double_DoubleDouble); + case SymbolicAddress::SinD: +#ifdef _WIN64 + // Workaround a VS 2013 sin issue, see math_sin_uncached. + return FuncCast(js::math_sin_uncached, Args_Double_Double); +#else + return FuncCast(sin, Args_Double_Double); +#endif + case SymbolicAddress::CosD: + return FuncCast(cos, Args_Double_Double); + case SymbolicAddress::TanD: + return FuncCast(tan, Args_Double_Double); + case SymbolicAddress::ASinD: + return FuncCast(asin, Args_Double_Double); + case SymbolicAddress::ACosD: + return FuncCast(acos, Args_Double_Double); + case SymbolicAddress::ATanD: + return FuncCast(atan, Args_Double_Double); + case SymbolicAddress::CeilD: + return FuncCast(ceil, Args_Double_Double); + case SymbolicAddress::CeilF: + return FuncCast(ceilf, Args_Float32_Float32); + case SymbolicAddress::FloorD: + return FuncCast(floor, Args_Double_Double); + case SymbolicAddress::FloorF: + return FuncCast(floorf, Args_Float32_Float32); + case SymbolicAddress::ExpD: + return FuncCast(exp, Args_Double_Double); + case SymbolicAddress::LogD: + return FuncCast(log, Args_Double_Double); + case SymbolicAddress::PowD: + return FuncCast(ecmaPow, Args_Double_DoubleDouble); + case SymbolicAddress::ATan2D: + return FuncCast(ecmaAtan2, Args_Double_DoubleDouble); + case SymbolicAddress::Limit: + break; + } + + MOZ_CRASH("Bad SymbolicAddress"); +} + +CompileArgs::CompileArgs(ExclusiveContext* cx) + : +#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB) + useSignalHandlersForOOB(cx->canUseSignalHandlers()), +#else + useSignalHandlersForOOB(false), +#endif + useSignalHandlersForInterrupt(cx->canUseSignalHandlers()) +{} + +bool +CompileArgs::operator==(CompileArgs rhs) const +{ + return useSignalHandlersForOOB == rhs.useSignalHandlersForOOB && + useSignalHandlersForInterrupt == rhs.useSignalHandlersForInterrupt; +} diff --git a/js/src/asmjs/Wasm.h b/js/src/asmjs/WasmTypes.h similarity index 81% rename from js/src/asmjs/Wasm.h rename to js/src/asmjs/WasmTypes.h index c47e1537e60..1bf56a33757 100644 --- a/js/src/asmjs/Wasm.h +++ b/js/src/asmjs/WasmTypes.h @@ -16,10 +16,13 @@ * limitations under the License. */ -#ifndef asmjs_wasm_h -#define asmjs_wasm_h +#ifndef wasm_types_h +#define wasm_types_h +#include "mozilla/DebugOnly.h" #include "mozilla/HashFunctions.h" +#include "mozilla/Move.h" +#include "mozilla/UniquePtr.h" #include "ds/LifoAlloc.h" #include "jit/IonTypes.h" @@ -27,9 +30,15 @@ #include "js/Vector.h" namespace js { + +class PropertyName; + namespace wasm { using mozilla::Move; +using mozilla::DebugOnly; +using mozilla::UniquePtr; +using mozilla::MallocSizeOf; // The ValType enum represents the WebAssembly "value type", which are used to // specify the type of locals and parameters. @@ -264,6 +273,76 @@ class LifoSig : public Sig> } }; +// The (,Profiling,Func)Offsets classes are used to record the offsets of +// different key points in a CodeRange during compilation. + +struct Offsets +{ + MOZ_IMPLICIT Offsets(uint32_t begin = 0, uint32_t end = 0) + : begin(begin), end(end) + {} + + // These define a [begin, end) contiguous range of instructions compiled + // into a CodeRange. + uint32_t begin; + uint32_t end; + + void offsetBy(uint32_t offset) { + begin += offset; + end += offset; + } +}; + +struct ProfilingOffsets : Offsets +{ + MOZ_IMPLICIT ProfilingOffsets(uint32_t profilingReturn = 0) + : Offsets(), profilingReturn(profilingReturn) + {} + + // For CodeRanges with ProfilingOffsets, 'begin' is the offset of the + // profiling entry. + uint32_t profilingEntry() const { return begin; } + + // The profiling return is the offset of the return instruction, which + // precedes the 'end' by a variable number of instructions due to + // out-of-line codegen. + uint32_t profilingReturn; + + void offsetBy(uint32_t offset) { + Offsets::offsetBy(offset); + profilingReturn += offset; + } +}; + +struct FuncOffsets : ProfilingOffsets +{ + MOZ_IMPLICIT FuncOffsets(uint32_t nonProfilingEntry = 0, + uint32_t profilingJump = 0, + uint32_t profilingEpilogue = 0) + : ProfilingOffsets(), + nonProfilingEntry(nonProfilingEntry), + profilingJump(profilingJump), + profilingEpilogue(profilingEpilogue) + {} + + // Function CodeRanges have an additional non-profiling entry that comes + // after the profiling entry and a non-profiling epilogue that comes before + // the profiling epilogue. + uint32_t nonProfilingEntry; + + // When profiling is enabled, the 'nop' at offset 'profilingJump' is + // overwritten to be a jump to 'profilingEpilogue'. + uint32_t profilingJump; + uint32_t profilingEpilogue; + + void offsetBy(uint32_t offset) { + ProfilingOffsets::offsetBy(offset); + nonProfilingEntry += offset; + profilingJump += offset; + profilingEpilogue += offset; + } +}; + // While the frame-pointer chain allows the stack to be unwound without // metadata, Error.stack still needs to know the line/column of every call in // the chain. A CallSiteDesc describes a single callsite to which CallSite adds @@ -438,10 +517,14 @@ class HeapAccess { typedef Vector HeapAccessVector; -// A wasm::Builtin represents a function implemented by the engine that is -// called directly from wasm code and should show up in the callstack. +// A wasm::SymbolicAddress represents a pointer to a well-known function or +// object that is embedded in wasm code. Since wasm code is serialized and +// later deserialized into a different address space, symbolic addresses must be +// used for *all* pointers into the address space. The MacroAssembler records a +// list of all SymbolicAddresses and the offsets of their use in the code for +// later patching during static linking. -enum class Builtin : uint16_t +enum class SymbolicAddress { ToInt32, #if defined(JS_CODEGEN_ARM) @@ -470,45 +553,6 @@ enum class Builtin : uint16_t LogD, PowD, ATan2D, - Limit -}; - -// A wasm::SymbolicAddress represents a pointer to a well-known function or -// object that is embedded in wasm code. Since wasm code is serialized and -// later deserialized into a different address space, symbolic addresses must be -// used for *all* pointers into the address space. The MacroAssembler records a -// list of all SymbolicAddresses and the offsets of their use in the code for -// later patching during static linking. - -enum class SymbolicAddress -{ - ToInt32 = unsigned(Builtin::ToInt32), -#if defined(JS_CODEGEN_ARM) - aeabi_idivmod = unsigned(Builtin::aeabi_idivmod), - aeabi_uidivmod = unsigned(Builtin::aeabi_uidivmod), - AtomicCmpXchg = unsigned(Builtin::AtomicCmpXchg), - AtomicXchg = unsigned(Builtin::AtomicXchg), - AtomicFetchAdd = unsigned(Builtin::AtomicFetchAdd), - AtomicFetchSub = unsigned(Builtin::AtomicFetchSub), - AtomicFetchAnd = unsigned(Builtin::AtomicFetchAnd), - AtomicFetchOr = unsigned(Builtin::AtomicFetchOr), - AtomicFetchXor = unsigned(Builtin::AtomicFetchXor), -#endif - ModD = unsigned(Builtin::ModD), - SinD = unsigned(Builtin::SinD), - CosD = unsigned(Builtin::CosD), - TanD = unsigned(Builtin::TanD), - ASinD = unsigned(Builtin::ASinD), - ACosD = unsigned(Builtin::ACosD), - ATanD = unsigned(Builtin::ATanD), - CeilD = unsigned(Builtin::CeilD), - CeilF = unsigned(Builtin::CeilF), - FloorD = unsigned(Builtin::FloorD), - FloorF = unsigned(Builtin::FloorF), - ExpD = unsigned(Builtin::ExpD), - LogD = unsigned(Builtin::LogD), - PowD = unsigned(Builtin::PowD), - ATan2D = unsigned(Builtin::ATan2D), Runtime, RuntimeInterruptUint32, StackLimit, @@ -517,80 +561,41 @@ enum class SymbolicAddress OnOutOfBounds, OnImpreciseConversion, HandleExecutionInterrupt, - InvokeFromAsmJS_Ignore, - InvokeFromAsmJS_ToInt32, - InvokeFromAsmJS_ToNumber, + InvokeImport_Void, + InvokeImport_I32, + InvokeImport_F64, CoerceInPlace_ToInt32, CoerceInPlace_ToNumber, Limit }; -static inline SymbolicAddress -BuiltinToImmediate(Builtin b) +void* +AddressOf(SymbolicAddress imm, ExclusiveContext* cx); + +// The CompileArgs struct captures global parameters that affect all wasm code +// generation. It also currently is the single source of truth for whether or +// not to use signal handlers for different purposes. + +struct CompileArgs { - return SymbolicAddress(b); -} + bool useSignalHandlersForOOB; + bool useSignalHandlersForInterrupt; -static inline bool -ImmediateIsBuiltin(SymbolicAddress imm, Builtin* builtin) -{ - if (uint32_t(imm) < uint32_t(Builtin::Limit)) { - *builtin = Builtin(imm); - return true; - } - return false; -} - -// An ExitReason describes the possible reasons for leaving compiled wasm code -// or the state of not having left compiled wasm code (ExitReason::None). - -class ExitReason -{ - public: - // List of reasons for execution leaving compiled wasm code (or None, if - // control hasn't exited). - enum Kind - { - None, // default state, the pc is in wasm code - Jit, // fast-path exit to JIT code - Slow, // general case exit to C++ Invoke - Interrupt, // executing an interrupt callback - Builtin // calling into a builtin (native) function - }; - - private: - Kind kind_; - wasm::Builtin builtin_; - - public: - ExitReason() = default; - MOZ_IMPLICIT ExitReason(Kind kind) : kind_(kind) { MOZ_ASSERT(kind != Builtin); } - MOZ_IMPLICIT ExitReason(wasm::Builtin builtin) : kind_(Builtin), builtin_(builtin) {} - Kind kind() const { return kind_; } - wasm::Builtin builtin() const { MOZ_ASSERT(kind_ == Builtin); return builtin_; } - - uint32_t pack() const { - static_assert(sizeof(wasm::Builtin) == 2, "fits"); - return uint16_t(kind_) | (uint16_t(builtin_) << 16); - } - static ExitReason unpack(uint32_t u32) { - static_assert(sizeof(wasm::Builtin) == 2, "fits"); - ExitReason r; - r.kind_ = Kind(uint16_t(u32)); - r.builtin_ = wasm::Builtin(uint16_t(u32 >> 16)); - return r; - } + CompileArgs() = default; + explicit CompileArgs(ExclusiveContext* cx); + bool operator==(CompileArgs rhs) const; + bool operator!=(CompileArgs rhs) const { return !(*this == rhs); } }; -// A hoisting of constants that would otherwise require #including WasmModule.h -// everywhere. Values are asserted in WasmModule.h. +// Constants: static const unsigned ActivationGlobalDataOffset = 0; -static const unsigned HeapGlobalDataOffset = sizeof(void*); -static const unsigned NaN64GlobalDataOffset = 2 * sizeof(void*); -static const unsigned NaN32GlobalDataOffset = 2 * sizeof(void*) + sizeof(double); +static const unsigned HeapGlobalDataOffset = ActivationGlobalDataOffset + sizeof(void*); +static const unsigned NaN64GlobalDataOffset = HeapGlobalDataOffset + sizeof(void*); +static const unsigned NaN32GlobalDataOffset = NaN64GlobalDataOffset + sizeof(double); +static const unsigned InitialGlobalDataBytes = NaN32GlobalDataOffset + sizeof(float); } // namespace wasm } // namespace js -#endif // asmjs_wasm_h +#endif // wasm_types_h diff --git a/js/src/builtin/AtomicsObject.cpp b/js/src/builtin/AtomicsObject.cpp index bd257d8d1c0..857a77cdc42 100644 --- a/js/src/builtin/AtomicsObject.cpp +++ b/js/src/builtin/AtomicsObject.cpp @@ -523,9 +523,9 @@ static void GetCurrentAsmJSHeap(SharedMem* heap, size_t* length) { JSRuntime* rt = js::TlsPerThreadData.get()->runtimeFromMainThread(); - AsmJSModule& mod = rt->asmJSActivationStack()->module(); - *heap = mod.maybeHeap().cast(); - *length = mod.heapLength(); + wasm::Module& module = rt->asmJSActivationStack()->module().wasm(); + *heap = module.maybeHeap().cast(); + *length = module.heapLength(); } int32_t diff --git a/js/src/builtin/WeakSetObject.cpp b/js/src/builtin/WeakSetObject.cpp index 6ae866c236d..6a55de06788 100644 --- a/js/src/builtin/WeakSetObject.cpp +++ b/js/src/builtin/WeakSetObject.cpp @@ -11,6 +11,7 @@ #include "jsiter.h" #include "builtin/SelfHostingDefines.h" +#include "builtin/WeakMapObject.h" #include "vm/GlobalObject.h" #include "vm/SelfHosting.h" diff --git a/js/src/frontend/ParseNode.h b/js/src/frontend/ParseNode.h index ebbb1e69953..67cfee52055 100644 --- a/js/src/frontend/ParseNode.h +++ b/js/src/frontend/ParseNode.h @@ -9,6 +9,7 @@ #include "mozilla/Attributes.h" +#include "builtin/ModuleObject.h" #include "frontend/TokenStream.h" namespace js { diff --git a/js/src/jit-test/tests/asm.js/testProfiling.js b/js/src/jit-test/tests/asm.js/testProfiling.js index 664f5ef43be..8dc41080c4c 100644 --- a/js/src/jit-test/tests/asm.js/testProfiling.js +++ b/js/src/jit-test/tests/asm.js/testProfiling.js @@ -108,11 +108,12 @@ function testBuiltinD2D(name) { enableSingleStepProfiling(); assertEq(f(.1), eval("Math." + name + "(.1)")); var stacks = disableSingleStepProfiling(); - assertStackContainsSeq(stacks, ">,f,>,Math." + name + ",f,>,f,>,>"); + assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>"); } } for (name of ['sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'ceil', 'floor', 'exp', 'log']) testBuiltinD2D(name); + function testBuiltinF2F(name) { var m = asmCompile('g', USE_ASM + "var tof=g.Math.fround; var fun=g.Math." + name + "; function f(d) { d=tof(d); return tof(fun(d)) } return f"); for (var i = 0; i < 3; i++) { @@ -120,11 +121,12 @@ function testBuiltinF2F(name) { enableSingleStepProfiling(); assertEq(f(.1), eval("Math.fround(Math." + name + "(Math.fround(.1)))")); var stacks = disableSingleStepProfiling(); - assertStackContainsSeq(stacks, ">,f,>,Math." + name + ",f,>,f,>,>"); + assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>"); } } for (name of ['ceil', 'floor']) testBuiltinF2F(name); + function testBuiltinDD2D(name) { var m = asmCompile('g', USE_ASM + "var fun=g.Math." + name + "; function f(d, e) { d=+d; e=+e; return +fun(d,e) } return f"); for (var i = 0; i < 3; i++) { @@ -132,7 +134,7 @@ function testBuiltinDD2D(name) { enableSingleStepProfiling(); assertEq(f(.1, .2), eval("Math." + name + "(.1, .2)")); var stacks = disableSingleStepProfiling(); - assertStackContainsSeq(stacks, ">,f,>,Math." + name + ",f,>,f,>,>"); + assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>"); } } for (name of ['atan2', 'pow']) diff --git a/js/src/jit/BaselineJIT.cpp b/js/src/jit/BaselineJIT.cpp index 6b2e90a9f27..b2fb0183adb 100644 --- a/js/src/jit/BaselineJIT.cpp +++ b/js/src/jit/BaselineJIT.cpp @@ -8,7 +8,7 @@ #include "mozilla/MemoryReporting.h" -#include "asmjs/AsmJSModule.h" +#include "asmjs/WasmModule.h" #include "jit/BaselineCompiler.h" #include "jit/BaselineIC.h" #include "jit/CompileInfo.h" @@ -51,7 +51,7 @@ BaselineScript::BaselineScript(uint32_t prologueOffset, uint32_t epilogueOffset, : method_(nullptr), templateScope_(nullptr), fallbackStubSpace_(), - dependentAsmJSModules_(nullptr), + dependentWasmModules_(nullptr), prologueOffset_(prologueOffset), epilogueOffset_(epilogueOffset), profilerEnterToggleOffset_(profilerEnterToggleOffset), @@ -485,60 +485,57 @@ BaselineScript::Destroy(FreeOp* fop, BaselineScript* script) MOZ_ASSERT(!script->hasPendingIonBuilder()); - script->unlinkDependentAsmJSModules(fop); + script->unlinkDependentWasmModules(fop); fop->delete_(script); } void -BaselineScript::clearDependentAsmJSModules() +BaselineScript::clearDependentWasmModules() { - // Remove any links from AsmJSModules that contain optimized FFI calls into + // Remove any links from wasm::Modules that contain optimized import calls into // this BaselineScript. - if (dependentAsmJSModules_) { - for (size_t i = 0; i < dependentAsmJSModules_->length(); i++) { - DependentAsmJSModuleExit exit = (*dependentAsmJSModules_)[i]; - exit.module->exit(exit.exitIndex).deoptimize(*exit.module); - } - - dependentAsmJSModules_->clear(); + if (dependentWasmModules_) { + for (DependentWasmModuleImport dep : *dependentWasmModules_) + dep.module->deoptimizeImportExit(dep.importIndex); + dependentWasmModules_->clear(); } } void -BaselineScript::unlinkDependentAsmJSModules(FreeOp* fop) +BaselineScript::unlinkDependentWasmModules(FreeOp* fop) { - // Remove any links from AsmJSModules that contain optimized FFI calls into + // Remove any links from wasm::Modules that contain optimized FFI calls into // this BaselineScript. - clearDependentAsmJSModules(); - if (dependentAsmJSModules_) { - fop->delete_(dependentAsmJSModules_); - dependentAsmJSModules_ = nullptr; + clearDependentWasmModules(); + if (dependentWasmModules_) { + fop->delete_(dependentWasmModules_); + dependentWasmModules_ = nullptr; } } bool -BaselineScript::addDependentAsmJSModule(JSContext* cx, DependentAsmJSModuleExit exit) +BaselineScript::addDependentWasmModule(JSContext* cx, wasm::Module& module, uint32_t importIndex) { - if (!dependentAsmJSModules_) { - dependentAsmJSModules_ = cx->new_ >(cx); - if (!dependentAsmJSModules_) + if (!dependentWasmModules_) { + dependentWasmModules_ = cx->new_ >(cx); + if (!dependentWasmModules_) return false; } - return dependentAsmJSModules_->append(exit); + return dependentWasmModules_->emplaceBack(&module, importIndex); } void -BaselineScript::removeDependentAsmJSModule(DependentAsmJSModuleExit exit) +BaselineScript::removeDependentWasmModule(wasm::Module& module, uint32_t importIndex) { - if (!dependentAsmJSModules_) + if (!dependentWasmModules_) return; - for (size_t i = 0; i < dependentAsmJSModules_->length(); i++) { - if ((*dependentAsmJSModules_)[i].module == exit.module && - (*dependentAsmJSModules_)[i].exitIndex == exit.exitIndex) + for (size_t i = 0; i < dependentWasmModules_->length(); i++) { + if ((*dependentWasmModules_)[i].module == &module && + (*dependentWasmModules_)[i].importIndex == importIndex) { - dependentAsmJSModules_->erase(dependentAsmJSModules_->begin() + i); + dependentWasmModules_->erase(dependentWasmModules_->begin() + i); break; } } diff --git a/js/src/jit/BaselineJIT.h b/js/src/jit/BaselineJIT.h index 08ff74ec1e1..cdcbad2bd47 100644 --- a/js/src/jit/BaselineJIT.h +++ b/js/src/jit/BaselineJIT.h @@ -94,16 +94,16 @@ struct PCMappingIndexEntry uint32_t bufferOffset; }; -// Describes a single AsmJSModule which jumps (via an FFI exit with the given -// index) directly to a BaselineScript or IonScript. -struct DependentAsmJSModuleExit +// Describes a single wasm::Module::ImportExit which jumps (via an import with +// the given index) directly to a BaselineScript or IonScript. +struct DependentWasmModuleImport { - const AsmJSModule* module; - size_t exitIndex; + wasm::Module* module; + size_t importIndex; - DependentAsmJSModuleExit(const AsmJSModule* module, size_t exitIndex) + DependentWasmModuleImport(wasm::Module* module, size_t importIndex) : module(module), - exitIndex(exitIndex) + importIndex(importIndex) { } }; @@ -129,9 +129,9 @@ struct BaselineScript // Allocated space for fallback stubs. FallbackICStubSpace fallbackStubSpace_; - // If non-null, the list of AsmJSModules that contain an optimized call + // If non-null, the list of wasm::Modules that contain an optimized call // directly to this script. - Vector* dependentAsmJSModules_; + Vector* dependentWasmModules_; // Native code offset right before the scope chain is initialized. uint32_t prologueOffset_; @@ -400,10 +400,10 @@ struct BaselineScript // the result may not be accurate. jsbytecode* approximatePcForNativeAddress(JSScript* script, uint8_t* nativeAddress); - bool addDependentAsmJSModule(JSContext* cx, DependentAsmJSModuleExit exit); - void unlinkDependentAsmJSModules(FreeOp* fop); - void clearDependentAsmJSModules(); - void removeDependentAsmJSModule(DependentAsmJSModuleExit exit); + bool addDependentWasmModule(JSContext* cx, wasm::Module& module, uint32_t importIndex); + void unlinkDependentWasmModules(FreeOp* fop); + void clearDependentWasmModules(); + void removeDependentWasmModule(wasm::Module& module, uint32_t importIndex); // Toggle debug traps (used for breakpoints and step mode) in the script. // If |pc| is nullptr, toggle traps for all ops in the script. Else, only @@ -480,7 +480,7 @@ struct BaselineScript pendingBuilder_ = builder; // lazy linking cannot happen during asmjs to ion. - clearDependentAsmJSModules(); + clearDependentWasmModules(); script->updateBaselineOrIonRaw(maybecx); } diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp index 9f089390732..5c708149b6f 100644 --- a/js/src/jit/CodeGenerator.cpp +++ b/js/src/jit/CodeGenerator.cpp @@ -161,7 +161,7 @@ CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* CodeGenerator::~CodeGenerator() { - MOZ_ASSERT_IF(!gen->compilingAsmJS(), masm.numAsmJSAbsoluteLinks() == 0); + MOZ_ASSERT_IF(!gen->compilingAsmJS(), masm.numAsmJSAbsoluteAddresses() == 0); js_delete(scriptCounts_); } @@ -7873,11 +7873,11 @@ CodeGenerator::visitRest(LRest* lir) } bool -CodeGenerator::generateAsmJS(AsmJSFunctionOffsets* offsets) +CodeGenerator::generateAsmJS(wasm::FuncOffsets* offsets) { JitSpew(JitSpew_Codegen, "# Emitting asm.js code"); - GenerateAsmJSFunctionPrologue(masm, frameSize(), offsets); + wasm::GenerateFunctionPrologue(masm, frameSize(), offsets); // Overflow checks are omitted by CodeGenerator in some cases (leaf // functions with small framePushed). Perform overflow-checking after @@ -7897,7 +7897,7 @@ CodeGenerator::generateAsmJS(AsmJSFunctionOffsets* offsets) return false; masm.bind(&returnLabel_); - GenerateAsmJSFunctionEpilogue(masm, frameSize(), offsets); + wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets); if (onOverflow.used()) { // The stack overflow stub assumes that only sizeof(AsmJSFrame) bytes have diff --git a/js/src/jit/CodeGenerator.h b/js/src/jit/CodeGenerator.h index d3b035a8ef4..154917a18f2 100644 --- a/js/src/jit/CodeGenerator.h +++ b/js/src/jit/CodeGenerator.h @@ -61,7 +61,7 @@ class CodeGenerator : public CodeGeneratorSpecific public: bool generate(); - bool generateAsmJS(AsmJSFunctionOffsets *offsets); + bool generateAsmJS(wasm::FuncOffsets *offsets); bool link(JSContext* cx, CompilerConstraintList* constraints); bool linkSharedStubs(JSContext* cx); diff --git a/js/src/jit/Ion.cpp b/js/src/jit/Ion.cpp index 2bb99a18194..53d891b488e 100644 --- a/js/src/jit/Ion.cpp +++ b/js/src/jit/Ion.cpp @@ -3290,6 +3290,7 @@ AutoFlushICache::flush(uintptr_t start, size_t len) PerThreadData* pt = TlsPerThreadData.get(); AutoFlushICache* afc = pt ? pt->PerThreadData::autoFlushICache() : nullptr; if (!afc) { + MOZ_ASSERT(!IsCompilingAsmJS(), "asm.js should always create an AutoFlushICache"); JitSpewCont(JitSpew_CacheFlush, "#"); ExecutableAllocator::cacheFlush((void*)start, len); MOZ_ASSERT(len <= 32); @@ -3303,6 +3304,7 @@ AutoFlushICache::flush(uintptr_t start, size_t len) return; } + MOZ_ASSERT(!IsCompilingAsmJS(), "asm.js should always flush within the range"); JitSpewCont(JitSpew_CacheFlush, afc->inhibit_ ? "x" : "*"); ExecutableAllocator::cacheFlush((void*)start, len); #endif diff --git a/js/src/jit/Linker.h b/js/src/jit/Linker.h index 8764fe1b627..189c46a1528 100644 --- a/js/src/jit/Linker.h +++ b/js/src/jit/Linker.h @@ -38,7 +38,7 @@ class Linker template JitCode* newCode(JSContext* cx, CodeKind kind, bool hasPatchableBackedges = false) { - MOZ_ASSERT(masm.numAsmJSAbsoluteLinks() == 0); + MOZ_ASSERT(masm.numAsmJSAbsoluteAddresses() == 0); MOZ_ASSERT_IF(hasPatchableBackedges, kind == ION_CODE); gc::AutoSuppressGC suppressGC(cx); diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h index 732a999d00a..cb2049b429f 100644 --- a/js/src/jit/MIR.h +++ b/js/src/jit/MIR.h @@ -13916,17 +13916,17 @@ class MAsmJSCall final union { AsmJSInternalCallee internal_; MDefinition* dynamic_; - wasm::Builtin builtin_; + wasm::SymbolicAddress builtin_; } u; public: Callee() {} explicit Callee(AsmJSInternalCallee callee) : which_(Internal) { u.internal_ = callee; } explicit Callee(MDefinition* callee) : which_(Dynamic) { u.dynamic_ = callee; } - explicit Callee(wasm::Builtin callee) : which_(Builtin) { u.builtin_ = callee; } + explicit Callee(wasm::SymbolicAddress callee) : which_(Builtin) { u.builtin_ = callee; } Which which() const { return which_; } AsmJSInternalCallee internal() const { MOZ_ASSERT(which_ == Internal); return u.internal_; } MDefinition* dynamic() const { MOZ_ASSERT(which_ == Dynamic); return u.dynamic_; } - wasm::Builtin builtin() const { MOZ_ASSERT(which_ == Builtin); return u.builtin_; } + wasm::SymbolicAddress builtin() const { MOZ_ASSERT(which_ == Builtin); return u.builtin_; } }; private: diff --git a/js/src/jit/MIRGraph.cpp b/js/src/jit/MIRGraph.cpp index d0d8c681d72..bf6bd23d812 100644 --- a/js/src/jit/MIRGraph.cpp +++ b/js/src/jit/MIRGraph.cpp @@ -6,7 +6,6 @@ #include "jit/MIRGraph.h" -#include "asmjs/AsmJSValidate.h" #include "jit/BytecodeAnalysis.h" #include "jit/Ion.h" #include "jit/JitSpewer.h" diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h index 05b0aedf272..0d9a982b2ec 100644 --- a/js/src/jit/MacroAssembler.h +++ b/js/src/jit/MacroAssembler.h @@ -388,15 +388,14 @@ class MacroAssembler : public MacroAssemblerSpecific // asm.js compilation handles its own JitContext-pushing struct AsmJSToken {}; - explicit MacroAssembler(AsmJSToken, TempAllocator *alloc) + explicit MacroAssembler(AsmJSToken, TempAllocator& alloc) : framePushed_(0), #ifdef DEBUG inCall_(false), #endif emitProfilingInstrumentation_(false) { - if (alloc) - moveResolver_.setAllocator(*alloc); + moveResolver_.setAllocator(alloc); #if defined(JS_CODEGEN_ARM) initWithAllocator(); diff --git a/js/src/jit/arm/Assembler-arm.cpp b/js/src/jit/arm/Assembler-arm.cpp index 90fd555285d..8732f8ca65e 100644 --- a/js/src/jit/arm/Assembler-arm.cpp +++ b/js/src/jit/arm/Assembler-arm.cpp @@ -3334,7 +3334,7 @@ void Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction* inst) *inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always); // NOTE: we don't update the Auto Flush Cache! this function is currently - // only called from within AsmJSModule::patchHeapAccesses, which does that + // only called from within ModuleGenerator::finish, which does that // for us. Don't call this! } diff --git a/js/src/jit/arm/MacroAssembler-arm.cpp b/js/src/jit/arm/MacroAssembler-arm.cpp index bfcafdc10d0..13a497d2734 100644 --- a/js/src/jit/arm/MacroAssembler-arm.cpp +++ b/js/src/jit/arm/MacroAssembler-arm.cpp @@ -1928,7 +1928,7 @@ MacroAssemblerARMCompat::movePtr(ImmPtr imm, Register dest) void MacroAssemblerARMCompat::movePtr(wasm::SymbolicAddress imm, Register dest) { - append(AsmJSAbsoluteLink(CodeOffset(currentOffset()), imm)); + append(AsmJSAbsoluteAddress(CodeOffset(currentOffset()), imm)); ma_movPatchable(Imm32(-1), dest, Always); } diff --git a/js/src/jit/arm64/MacroAssembler-arm64.h b/js/src/jit/arm64/MacroAssembler-arm64.h index 69e48e78ece..081cbfed128 100644 --- a/js/src/jit/arm64/MacroAssembler-arm64.h +++ b/js/src/jit/arm64/MacroAssembler-arm64.h @@ -778,7 +778,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler } void movePtr(wasm::SymbolicAddress imm, Register dest) { BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest); - append(AsmJSAbsoluteLink(CodeOffset(off.getOffset()), imm)); + append(AsmJSAbsoluteAddress(CodeOffset(off.getOffset()), imm)); } void movePtr(ImmGCPtr imm, Register dest) { BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest); diff --git a/js/src/jit/mips32/MacroAssembler-mips32.cpp b/js/src/jit/mips32/MacroAssembler-mips32.cpp index 95593367002..dade4aadfd1 100644 --- a/js/src/jit/mips32/MacroAssembler-mips32.cpp +++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp @@ -756,7 +756,7 @@ MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest) void MacroAssemblerMIPSCompat::movePtr(wasm::SymbolicAddress imm, Register dest) { - append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm)); + append(AsmJSAbsoluteAddress(CodeOffset(nextOffset().getOffset()), imm)); ma_liPatchable(dest, ImmWord(-1)); } diff --git a/js/src/jit/mips32/Simulator-mips32.cpp b/js/src/jit/mips32/Simulator-mips32.cpp index c6d0b36bd40..3de18f679ff 100644 --- a/js/src/jit/mips32/Simulator-mips32.cpp +++ b/js/src/jit/mips32/Simulator-mips32.cpp @@ -35,7 +35,6 @@ #include -#include "asmjs/AsmJSValidate.h" #include "jit/mips32/Assembler-mips32.h" #include "vm/Runtime.h" diff --git a/js/src/jit/mips64/MacroAssembler-mips64.cpp b/js/src/jit/mips64/MacroAssembler-mips64.cpp index 3041b2b58ec..207b09dfee9 100644 --- a/js/src/jit/mips64/MacroAssembler-mips64.cpp +++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp @@ -895,7 +895,7 @@ MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest) void MacroAssemblerMIPS64Compat::movePtr(wasm::SymbolicAddress imm, Register dest) { - append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm)); + append(AsmJSAbsoluteAddress(CodeOffset(nextOffset().getOffset()), imm)); ma_liPatchable(dest, ImmWord(-1)); } diff --git a/js/src/jit/mips64/Simulator-mips64.cpp b/js/src/jit/mips64/Simulator-mips64.cpp index 2728af20fa6..d55fe843b2c 100644 --- a/js/src/jit/mips64/Simulator-mips64.cpp +++ b/js/src/jit/mips64/Simulator-mips64.cpp @@ -36,7 +36,6 @@ #include -#include "asmjs/AsmJSValidate.h" #include "jit/mips64/Assembler-mips64.h" #include "vm/Runtime.h" diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h index 123e23c7c9a..7dc0df2ae8c 100644 --- a/js/src/jit/shared/Assembler-shared.h +++ b/js/src/jit/shared/Assembler-shared.h @@ -11,7 +11,7 @@ #include -#include "asmjs/AsmJSFrameIterator.h" +#include "asmjs/WasmTypes.h" #include "jit/JitAllocPolicy.h" #include "jit/Label.h" #include "jit/Registers.h" @@ -681,10 +681,10 @@ struct AsmJSGlobalAccess // Represents an instruction to be patched and the intended pointee. These // links are accumulated in the MacroAssembler, but patching is done outside -// the MacroAssembler (in AsmJSModule::staticallyLink). -struct AsmJSAbsoluteLink +// the MacroAssembler (in Module::staticallyLink). +struct AsmJSAbsoluteAddress { - AsmJSAbsoluteLink(CodeOffset patchAt, wasm::SymbolicAddress target) + AsmJSAbsoluteAddress(CodeOffset patchAt, wasm::SymbolicAddress target) : patchAt(patchAt), target(target) {} CodeOffset patchAt; @@ -711,7 +711,7 @@ class AssemblerShared wasm::CallSiteAndTargetVector callsites_; wasm::HeapAccessVector heapAccesses_; Vector asmJSGlobalAccesses_; - Vector asmJSAbsoluteLinks_; + Vector asmJSAbsoluteAddresses_; protected: Vector codeLabels_; @@ -758,9 +758,9 @@ class AssemblerShared size_t numAsmJSGlobalAccesses() const { return asmJSGlobalAccesses_.length(); } AsmJSGlobalAccess asmJSGlobalAccess(size_t i) const { return asmJSGlobalAccesses_[i]; } - void append(AsmJSAbsoluteLink link) { enoughMemory_ &= asmJSAbsoluteLinks_.append(link); } - size_t numAsmJSAbsoluteLinks() const { return asmJSAbsoluteLinks_.length(); } - AsmJSAbsoluteLink asmJSAbsoluteLink(size_t i) const { return asmJSAbsoluteLinks_[i]; } + void append(AsmJSAbsoluteAddress link) { enoughMemory_ &= asmJSAbsoluteAddresses_.append(link); } + size_t numAsmJSAbsoluteAddresses() const { return asmJSAbsoluteAddresses_.length(); } + AsmJSAbsoluteAddress asmJSAbsoluteAddress(size_t i) const { return asmJSAbsoluteAddresses_[i]; } static bool canUseInSingleByteInstruction(Register reg) { return true; } @@ -792,10 +792,10 @@ class AssemblerShared for (; i < asmJSGlobalAccesses_.length(); i++) asmJSGlobalAccesses_[i].patchAt.offsetBy(delta); - i = asmJSAbsoluteLinks_.length(); - enoughMemory_ &= asmJSAbsoluteLinks_.appendAll(other.asmJSAbsoluteLinks_); - for (; i < asmJSAbsoluteLinks_.length(); i++) - asmJSAbsoluteLinks_[i].patchAt.offsetBy(delta); + i = asmJSAbsoluteAddresses_.length(); + enoughMemory_ &= asmJSAbsoluteAddresses_.appendAll(other.asmJSAbsoluteAddresses_); + for (; i < asmJSAbsoluteAddresses_.length(); i++) + asmJSAbsoluteAddresses_[i].patchAt.offsetBy(delta); i = codeLabels_.length(); enoughMemory_ &= codeLabels_.appendAll(other.codeLabels_); diff --git a/js/src/jit/shared/CodeGenerator-shared.cpp b/js/src/jit/shared/CodeGenerator-shared.cpp index 7a5bc176271..154c1795e29 100644 --- a/js/src/jit/shared/CodeGenerator-shared.cpp +++ b/js/src/jit/shared/CodeGenerator-shared.cpp @@ -1521,7 +1521,7 @@ CodeGeneratorShared::emitAsmJSCall(LAsmJSCall* ins) masm.call(mir->desc(), ToRegister(ins->getOperand(mir->dynamicCalleeOperandIndex()))); break; case MAsmJSCall::Callee::Builtin: - masm.call(BuiltinToImmediate(callee.builtin())); + masm.call(callee.builtin()); break; } diff --git a/js/src/jit/x64/Assembler-x64.h b/js/src/jit/x64/Assembler-x64.h index 2efaed15f1c..7d42ea4473a 100644 --- a/js/src/jit/x64/Assembler-x64.h +++ b/js/src/jit/x64/Assembler-x64.h @@ -601,7 +601,7 @@ class Assembler : public AssemblerX86Shared } void mov(wasm::SymbolicAddress imm, Register dest) { masm.movq_i64r(-1, dest.encoding()); - append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), imm)); + append(AsmJSAbsoluteAddress(CodeOffset(masm.currentOffset()), imm)); } void mov(const Operand& src, Register dest) { movq(src, dest); diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp index de46357a4d4..59a03c2c633 100644 --- a/js/src/jit/x64/CodeGenerator-x64.cpp +++ b/js/src/jit/x64/CodeGenerator-x64.cpp @@ -239,16 +239,6 @@ void CodeGeneratorX64::visitAsmJSCall(LAsmJSCall* ins) { emitAsmJSCall(ins); - -#ifdef DEBUG - Register scratch = ABIArgGenerator::NonReturn_VolatileReg0; - masm.movePtr(HeapReg, scratch); - masm.loadAsmJSHeapRegisterFromGlobalData(); - Label ok; - masm.branchPtr(Assembler::Equal, HeapReg, scratch, &ok); - masm.breakpoint(); - masm.bind(&ok); -#endif } void diff --git a/js/src/jit/x86/Assembler-x86.h b/js/src/jit/x86/Assembler-x86.h index 498e133c7fa..f2c6fcbbf7a 100644 --- a/js/src/jit/x86/Assembler-x86.h +++ b/js/src/jit/x86/Assembler-x86.h @@ -288,7 +288,7 @@ class Assembler : public AssemblerX86Shared } void mov(wasm::SymbolicAddress imm, Register dest) { masm.movl_i32r(-1, dest.encoding()); - append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), imm)); + append(AsmJSAbsoluteAddress(CodeOffset(masm.currentOffset()), imm)); } void mov(const Operand& src, Register dest) { movl(src, dest); @@ -367,11 +367,11 @@ class Assembler : public AssemblerX86Shared } void cmpl(Register rhs, wasm::SymbolicAddress lhs) { masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1); - append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), lhs)); + append(AsmJSAbsoluteAddress(CodeOffset(masm.currentOffset()), lhs)); } void cmpl(Imm32 rhs, wasm::SymbolicAddress lhs) { JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1); - append(AsmJSAbsoluteLink(CodeOffset(src.offset()), lhs)); + append(AsmJSAbsoluteAddress(CodeOffset(src.offset()), lhs)); } void adcl(Imm32 imm, Register dest) { diff --git a/js/src/jsopcode.cpp b/js/src/jsopcode.cpp index f55cbf35324..3004532d765 100644 --- a/js/src/jsopcode.cpp +++ b/js/src/jsopcode.cpp @@ -33,7 +33,6 @@ #include "jstypes.h" #include "jsutil.h" -#include "asmjs/AsmJSModule.h" #include "frontend/BytecodeCompiler.h" #include "frontend/SourceNotes.h" #include "gc/GCInternals.h" diff --git a/js/src/jsscript.h b/js/src/jsscript.h index b4b90fd00c4..68a890ca710 100644 --- a/js/src/jsscript.h +++ b/js/src/jsscript.h @@ -825,6 +825,9 @@ class ScriptSourceHolder { ss->decref(); } + ScriptSource* get() const { + return ss; + } }; struct CompressedSourceHasher diff --git a/js/src/moz.build b/js/src/moz.build index 9e908b71b14..d67db7c81e2 100644 --- a/js/src/moz.build +++ b/js/src/moz.build @@ -142,14 +142,16 @@ EXPORTS.js += [ ] UNIFIED_SOURCES += [ - 'asmjs/AsmJSFrameIterator.cpp', 'asmjs/AsmJSLink.cpp', 'asmjs/AsmJSModule.cpp', - 'asmjs/AsmJSSignalHandlers.cpp', 'asmjs/AsmJSValidate.cpp', + 'asmjs/WasmFrameIterator.cpp', 'asmjs/WasmGenerator.cpp', 'asmjs/WasmIonCompile.cpp', + 'asmjs/WasmModule.cpp', + 'asmjs/WasmSignalHandlers.cpp', 'asmjs/WasmStubs.cpp', + 'asmjs/WasmTypes.cpp', 'builtin/AtomicsObject.cpp', 'builtin/Eval.cpp', 'builtin/Intl.cpp', diff --git a/js/src/vm/HelperThreads.cpp b/js/src/vm/HelperThreads.cpp index c9ae90fe2b3..e1b06308509 100644 --- a/js/src/vm/HelperThreads.cpp +++ b/js/src/vm/HelperThreads.cpp @@ -79,7 +79,7 @@ js::SetFakeCPUCount(size_t count) } bool -js::StartOffThreadWasmCompile(ExclusiveContext* cx, wasm::CompileTask* task) +js::StartOffThreadWasmCompile(ExclusiveContext* cx, wasm::IonCompileTask* task) { AutoLockHelperThreadState lock; @@ -737,7 +737,7 @@ GlobalHelperThreadState::canStartWasmCompile() // Honor the maximum allowed threads to compile wasm jobs at once, // to avoid oversaturating the machine. - if (!checkTaskThreadLimit(maxWasmCompilationThreads())) + if (!checkTaskThreadLimit(maxWasmCompilationThreads())) return false; return true; @@ -1201,11 +1201,11 @@ HelperThread::handleWasmWorkload() currentTask.emplace(HelperThreadState().wasmWorklist().popCopy()); bool success = false; - wasm::CompileTask* task = wasmTask(); + wasm::IonCompileTask* task = wasmTask(); { AutoUnlockHelperThreadState unlock; - PerThreadData::AutoEnterRuntime enter(threadData.ptr(), task->args().runtime); - success = wasm::CompileFunction(task); + PerThreadData::AutoEnterRuntime enter(threadData.ptr(), task->runtime()); + success = wasm::IonCompileFunction(task); } // On success, try to move work to the finished list. diff --git a/js/src/vm/HelperThreads.h b/js/src/vm/HelperThreads.h index 0c84c2c49f5..10f1471fad4 100644 --- a/js/src/vm/HelperThreads.h +++ b/js/src/vm/HelperThreads.h @@ -20,7 +20,6 @@ #include "jscntxt.h" #include "jslock.h" -#include "asmjs/WasmCompileArgs.h" #include "frontend/TokenStream.h" #include "jit/Ion.h" @@ -32,11 +31,10 @@ namespace jit { class IonBuilder; } // namespace jit namespace wasm { - struct CompileArgs; - class CompileTask; class FuncIR; class FunctionCompileResults; - typedef Vector CompileTaskVector; + class IonCompileTask; + typedef Vector IonCompileTaskVector; } // namespace wasm // Per-process state for off thread work items. @@ -70,7 +68,7 @@ class GlobalHelperThreadState IonBuilderList ionLazyLinkList_; // wasm worklist and finished jobs. - wasm::CompileTaskVector wasmWorklist_, wasmFinishedList_; + wasm::IonCompileTaskVector wasmWorklist_, wasmFinishedList_; public: // For now, only allow a single parallel asm.js compilation to happen at a @@ -153,11 +151,11 @@ class GlobalHelperThreadState return ionLazyLinkList_; } - wasm::CompileTaskVector& wasmWorklist() { + wasm::IonCompileTaskVector& wasmWorklist() { MOZ_ASSERT(isLocked()); return wasmWorklist_; } - wasm::CompileTaskVector& wasmFinishedList() { + wasm::IonCompileTaskVector& wasmFinishedList() { MOZ_ASSERT(isLocked()); return wasmFinishedList_; } @@ -296,7 +294,7 @@ struct HelperThread /* The current task being executed by this thread, if any. */ mozilla::Maybe(); + wasm::IonCompileTask* wasmTask() { + return maybeCurrentTaskAs(); } /* Any source being parsed/emitted on this thread. */ @@ -383,7 +381,7 @@ PauseCurrentHelperThread(); /* Perform MIR optimization and LIR generation on a single function. */ bool -StartOffThreadWasmCompile(ExclusiveContext* cx, wasm::CompileTask* task); +StartOffThreadWasmCompile(ExclusiveContext* cx, wasm::IonCompileTask* task); /* * Schedule an Ion compilation for a script, given a builder which has been diff --git a/js/src/vm/Runtime.cpp b/js/src/vm/Runtime.cpp index ba3fc488f46..8bef880960d 100644 --- a/js/src/vm/Runtime.cpp +++ b/js/src/vm/Runtime.cpp @@ -39,7 +39,7 @@ #include "jswin.h" #include "jswrapper.h" -#include "asmjs/AsmJSSignalHandlers.h" +#include "asmjs/WasmSignalHandlers.h" #include "jit/arm/Simulator-arm.h" #include "jit/arm64/vixl/Simulator-vixl.h" #include "jit/JitCompartment.h" @@ -202,7 +202,7 @@ JSRuntime::JSRuntime(JSRuntime* parentRuntime) destroyPrincipals(nullptr), readPrincipals(nullptr), errorReporter(nullptr), - linkedAsmJSModules(nullptr), + linkedWasmModules(nullptr), propertyRemovals(0), #if !EXPOSE_INTL_API thousandsSeparator(0), @@ -346,7 +346,7 @@ JSRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes) jitSupportsFloatingPoint = js::jit::JitSupportsFloatingPoint(); jitSupportsSimd = js::jit::JitSupportsSimd(); - signalHandlersInstalled_ = EnsureSignalHandlersInstalled(this); + signalHandlersInstalled_ = wasm::EnsureSignalHandlersInstalled(this); canUseSignalHandlers_ = signalHandlersInstalled_ && !SignalBasedTriggersDisabled(); if (!spsProfiler.init()) diff --git a/js/src/vm/Runtime.h b/js/src/vm/Runtime.h index d06f955d8e5..3e6e9d5fa5c 100644 --- a/js/src/vm/Runtime.h +++ b/js/src/vm/Runtime.h @@ -24,7 +24,7 @@ #include "jsscript.h" #ifdef XP_DARWIN -# include "asmjs/AsmJSSignalHandlers.h" +# include "asmjs/WasmSignalHandlers.h" #endif #include "builtin/AtomicsObject.h" #include "ds/FixedSizeHash.h" @@ -89,7 +89,6 @@ ReportOverRecursed(ExclusiveContext* cx); class Activation; class ActivationIterator; class AsmJSActivation; -class AsmJSModule; class MathCache; namespace jit { @@ -106,6 +105,10 @@ class Simulator; #endif } // namespace jit +namespace wasm { +class Module; +} // namespace wasm + /* * GetSrcNote cache to avoid O(n^2) growth in finding a source note for a * given pc in a script. We use the script->code pointer to tag the cache, @@ -1146,7 +1149,7 @@ struct JSRuntime : public JS::shadow::Runtime, void* data; #if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB) - js::AsmJSMachExceptionHandler asmJSMachExceptionHandler; + js::wasm::MachExceptionHandler wasmMachExceptionHandler; #endif private: @@ -1187,8 +1190,8 @@ struct JSRuntime : public JS::shadow::Runtime, /* AsmJSCache callbacks are runtime-wide. */ JS::AsmJSCacheOps asmJSCacheOps; - /* Head of the linked list of linked asm.js modules. */ - js::AsmJSModule* linkedAsmJSModules; + /* Head of the linked list of linked wasm modules. */ + js::wasm::Module* linkedWasmModules; /* * The propertyRemovals counter is incremented for every JSObject::clear, diff --git a/js/src/vm/SharedArrayObject.cpp b/js/src/vm/SharedArrayObject.cpp index 6c36c151a1e..c41cb95f5d4 100644 --- a/js/src/vm/SharedArrayObject.cpp +++ b/js/src/vm/SharedArrayObject.cpp @@ -28,6 +28,8 @@ #include "jsobjinlines.h" +#include "vm/NativeObject-inl.h" + using namespace js; static inline void* diff --git a/js/src/vm/Stack-inl.h b/js/src/vm/Stack-inl.h index 55d94bc4cef..285b85877d7 100644 --- a/js/src/vm/Stack-inl.h +++ b/js/src/vm/Stack-inl.h @@ -1023,12 +1023,6 @@ InterpreterActivation::resumeGeneratorFrame(HandleFunction callee, HandleValue n return true; } -inline JSContext* -AsmJSActivation::cx() -{ - return cx_->asJSContext(); -} - inline bool FrameIter::hasCachedSavedFrame() const { diff --git a/js/src/vm/Stack.cpp b/js/src/vm/Stack.cpp index 62860e05321..668c300ff2a 100644 --- a/js/src/vm/Stack.cpp +++ b/js/src/vm/Stack.cpp @@ -10,8 +10,8 @@ #include "jscntxt.h" -#include "asmjs/AsmJSFrameIterator.h" #include "asmjs/AsmJSModule.h" +#include "asmjs/WasmFrameIterator.h" #include "gc/Marking.h" #include "jit/BaselineFrame.h" #include "jit/JitcodeMap.h" @@ -608,7 +608,7 @@ FrameIter::settleOnActivation() } if (activation->isAsmJS()) { - data_.asmJSFrames_ = AsmJSFrameIterator(*data_.activations_->asAsmJS()); + data_.asmJSFrames_ = wasm::FrameIterator(*data_.activations_->asAsmJS()); if (data_.asmJSFrames_.done()) { ++data_.activations_; @@ -986,7 +986,7 @@ FrameIter::scriptFilename() const case JIT: return script()->filename(); case ASMJS: - return data_.activations_->asAsmJS()->module().scriptSource()->filename(); + return data_.activations_->asAsmJS()->module().wasm().filename(); } MOZ_CRASH("Unexpected state"); @@ -1744,12 +1744,12 @@ AsmJSActivation::AsmJSActivation(JSContext* cx, AsmJSModule& module) entrySP_(nullptr), resumePC_(nullptr), fp_(nullptr), - packedExitReason_(wasm::ExitReason(wasm::ExitReason::None).pack()) + exitReason_(wasm::ExitReason::None) { (void) entrySP_; // squelch GCC warning - prevAsmJSForModule_ = module.activation(); - module.activation() = this; + prevAsmJSForModule_ = module.wasm().activation(); + module.wasm().activation() = this; prevAsmJS_ = cx->runtime()->asmJSActivationStack_; cx->runtime()->asmJSActivationStack_ = this; @@ -1766,8 +1766,8 @@ AsmJSActivation::~AsmJSActivation() MOZ_ASSERT(fp_ == nullptr); - MOZ_ASSERT(module_.activation() == this); - module_.activation() = prevAsmJSForModule_; + MOZ_ASSERT(module_.wasm().activation() == this); + module_.wasm().activation() = prevAsmJSForModule_; JSContext* cx = cx_->asJSContext(); MOZ_ASSERT(cx->runtime()->asmJSActivationStack_ == this); @@ -1860,7 +1860,7 @@ JS::ProfilingFrameIterator::ProfilingFrameIterator(JSRuntime* rt, const Register MOZ_ASSERT(activation_->isProfiling()); - static_assert(sizeof(AsmJSProfilingFrameIterator) <= StorageSpace && + static_assert(sizeof(wasm::ProfilingFrameIterator) <= StorageSpace && sizeof(jit::JitProfilingFrameIterator) <= StorageSpace, "Need to increase storage"); @@ -1916,7 +1916,7 @@ JS::ProfilingFrameIterator::iteratorConstruct(const RegisterState& state) MOZ_ASSERT(activation_->isAsmJS() || activation_->isJit()); if (activation_->isAsmJS()) { - new (storage_.addr()) AsmJSProfilingFrameIterator(*activation_->asAsmJS(), state); + new (storage_.addr()) wasm::ProfilingFrameIterator(*activation_->asAsmJS(), state); // Set savedPrevJitTop_ to the actual jitTop_ from the runtime. savedPrevJitTop_ = activation_->cx()->runtime()->jitTop; return; @@ -1933,7 +1933,7 @@ JS::ProfilingFrameIterator::iteratorConstruct() MOZ_ASSERT(activation_->isAsmJS() || activation_->isJit()); if (activation_->isAsmJS()) { - new (storage_.addr()) AsmJSProfilingFrameIterator(*activation_->asAsmJS()); + new (storage_.addr()) wasm::ProfilingFrameIterator(*activation_->asAsmJS()); return; } @@ -1949,7 +1949,7 @@ JS::ProfilingFrameIterator::iteratorDestroy() MOZ_ASSERT(activation_->isAsmJS() || activation_->isJit()); if (activation_->isAsmJS()) { - asmJSIter().~AsmJSProfilingFrameIterator(); + asmJSIter().~ProfilingFrameIterator(); return; } diff --git a/js/src/vm/Stack.h b/js/src/vm/Stack.h index 3933621ff5c..5a41dd5a606 100644 --- a/js/src/vm/Stack.h +++ b/js/src/vm/Stack.h @@ -16,7 +16,7 @@ #include "jsscript.h" #include "jsutil.h" -#include "asmjs/AsmJSFrameIterator.h" +#include "asmjs/WasmFrameIterator.h" #include "gc/Rooting.h" #include "jit/JitFrameIterator.h" #ifdef CHECK_OSIPOINT_REGISTERS @@ -1804,13 +1804,12 @@ class AsmJSActivation : public Activation void* entrySP_; void* resumePC_; uint8_t* fp_; - uint32_t packedExitReason_; + wasm::ExitReason exitReason_; public: AsmJSActivation(JSContext* cx, AsmJSModule& module); ~AsmJSActivation(); - inline JSContext* cx(); AsmJSModule& module() const { return module_; } AsmJSActivation* prevAsmJS() const { return prevAsmJS_; } @@ -1823,7 +1822,7 @@ class AsmJSActivation : public Activation uint8_t* fp() const { return fp_; } // Returns the reason why asm.js code called out of asm.js code. - wasm::ExitReason exitReason() const { return wasm::ExitReason::unpack(packedExitReason_); } + wasm::ExitReason exitReason() const { return exitReason_; } // Read by JIT code: static unsigned offsetOfContext() { return offsetof(AsmJSActivation, cx_); } @@ -1832,7 +1831,7 @@ class AsmJSActivation : public Activation // Written by JIT code: static unsigned offsetOfEntrySP() { return offsetof(AsmJSActivation, entrySP_); } static unsigned offsetOfFP() { return offsetof(AsmJSActivation, fp_); } - static unsigned offsetOfPackedExitReason() { return offsetof(AsmJSActivation, packedExitReason_); } + static unsigned offsetOfExitReason() { return offsetof(AsmJSActivation, exitReason_); } // Read/written from SIGSEGV handler: void setResumePC(void* pc) { resumePC_ = pc; } @@ -1889,7 +1888,7 @@ class FrameIter jit::JitFrameIterator jitFrames_; unsigned ionInlineFrameNo_; - AsmJSFrameIterator asmJSFrames_; + wasm::FrameIterator asmJSFrames_; Data(JSContext* cx, SavedOption savedOption, ContextOption contextOption, DebuggerEvalOption debuggerEvalOption, JSPrincipals* principals);