Backed out 3 changesets (bug 992267) for OS X non-unified bustage

Backed out changeset fc9f83afab31 (bug 992267)
Backed out changeset 9afc72a12cb9 (bug 992267)
Backed out changeset 55fb5688e85c (bug 992267)
This commit is contained in:
Phil Ringnalda 2014-08-29 21:07:37 -07:00
parent 327b4f8fbf
commit 946cf3f06d
52 changed files with 379 additions and 2319 deletions

View File

@ -356,11 +356,11 @@ js::GenerateAsmJSStackOverflowExit(MacroAssembler &masm, Label *overflowExit, La
masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfFP())); masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfFP()));
// Prepare the stack for calling C++. // Prepare the stack for calling C++.
if (uint32_t d = StackDecrementForCall(ABIStackAlignment, sizeof(AsmJSFrame), ShadowStackSpace)) if (unsigned stackDec = StackDecrementForCall(sizeof(AsmJSFrame), ShadowStackSpace))
masm.subPtr(Imm32(d), StackPointer); masm.subPtr(Imm32(stackDec), StackPointer);
// No need to restore the stack; the throw stub pops everything. // No need to restore the stack; the throw stub pops everything.
masm.assertStackAlignment(ABIStackAlignment); masm.assertStackAlignment();
masm.call(AsmJSImmPtr(AsmJSImm_ReportOverRecursed)); masm.call(AsmJSImmPtr(AsmJSImm_ReportOverRecursed));
masm.jump(throwLabel); masm.jump(throwLabel);
} }

View File

@ -170,6 +170,7 @@ void
GenerateAsmJSExitEpilogue(jit::MacroAssembler &masm, unsigned framePushed, AsmJSExit::Reason reason, GenerateAsmJSExitEpilogue(jit::MacroAssembler &masm, unsigned framePushed, AsmJSExit::Reason reason,
jit::Label *profilingReturn); jit::Label *profilingReturn);
} // namespace js } // namespace js
#endif // asmjs_AsmJSFrameIterator_h #endif // asmjs_AsmJSFrameIterator_h

View File

@ -30,7 +30,6 @@
#include "jswrapper.h" #include "jswrapper.h"
#include "asmjs/AsmJSModule.h" #include "asmjs/AsmJSModule.h"
#include "builtin/SIMD.h"
#include "frontend/BytecodeCompiler.h" #include "frontend/BytecodeCompiler.h"
#include "jit/Ion.h" #include "jit/Ion.h"
#include "jit/JitCommon.h" #include "jit/JitCommon.h"
@ -97,75 +96,59 @@ GetDataProperty(JSContext *cx, HandleValue objVal, HandlePropertyName field, Mut
return true; return true;
} }
static bool
HasPureCoercion(JSContext *cx, HandleValue v)
{
if (IsVectorObject<Int32x4>(v) || IsVectorObject<Float32x4>(v))
return true;
// Ideally, we'd reject all non-SIMD non-primitives, but Emscripten has a
// bug that generates code that passes functions for some imports. To avoid
// breaking all the code that contains this bug, we make an exception for
// functions that don't have user-defined valueOf or toString, for their
// coercions are not observable and coercion via ToNumber/ToInt32
// definitely produces NaN/0. We should remove this special case later once
// most apps have been built with newer Emscripten.
jsid toString = NameToId(cx->names().toString);
if (v.toObject().is<JSFunction>() &&
HasObjectValueOf(&v.toObject(), cx) &&
ClassMethodIsNative(cx, &v.toObject(), &JSFunction::class_, toString, fun_toString))
{
return true;
}
return false;
}
static bool static bool
ValidateGlobalVariable(JSContext *cx, const AsmJSModule &module, AsmJSModule::Global &global, ValidateGlobalVariable(JSContext *cx, const AsmJSModule &module, AsmJSModule::Global &global,
HandleValue importVal) HandleValue importVal)
{ {
JS_ASSERT(global.which() == AsmJSModule::Global::Variable); JS_ASSERT(global.which() == AsmJSModule::Global::Variable);
void *datum = module.globalVarToGlobalDatum(global); void *datum = module.globalVarIndexToGlobalDatum(global.varIndex());
switch (global.varInitKind()) { switch (global.varInitKind()) {
case AsmJSModule::Global::InitConstant: { case AsmJSModule::Global::InitConstant: {
const AsmJSNumLit &lit = global.varInitNumLit(); const AsmJSNumLit &lit = global.varInitNumLit();
const Value &v = lit.value();
switch (lit.which()) { switch (lit.which()) {
case AsmJSNumLit::Fixnum: case AsmJSNumLit::Fixnum:
case AsmJSNumLit::NegativeInt: case AsmJSNumLit::NegativeInt:
case AsmJSNumLit::BigUnsigned: case AsmJSNumLit::BigUnsigned:
*(int32_t *)datum = lit.scalarValue().toInt32(); *(int32_t *)datum = v.toInt32();
break; break;
case AsmJSNumLit::Double: case AsmJSNumLit::Double:
*(double *)datum = lit.scalarValue().toDouble(); *(double *)datum = v.toDouble();
break; break;
case AsmJSNumLit::Float: case AsmJSNumLit::Float:
*(float *)datum = static_cast<float>(lit.scalarValue().toDouble()); *(float *)datum = static_cast<float>(v.toDouble());
break;
case AsmJSNumLit::Int32x4:
memcpy(datum, lit.simdValue().asInt32x4(), Simd128DataSize);
break;
case AsmJSNumLit::Float32x4:
memcpy(datum, lit.simdValue().asFloat32x4(), Simd128DataSize);
break; break;
case AsmJSNumLit::OutOfRangeInt: case AsmJSNumLit::OutOfRangeInt:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("OutOfRangeInt isn't valid in the first place"); MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("OutOfRangeInt isn't valid in the first place");
} }
break; break;
} }
case AsmJSModule::Global::InitImport: { case AsmJSModule::Global::InitImport: {
RootedPropertyName field(cx, global.varImportField()); RootedPropertyName field(cx, global.varImportField());
RootedValue v(cx); RootedValue v(cx);
if (!GetDataProperty(cx, importVal, field, &v)) if (!GetDataProperty(cx, importVal, field, &v))
return false; return false;
if (!v.isPrimitive() && !HasPureCoercion(cx, v)) if (!v.isPrimitive()) {
// Ideally, we'd reject all non-primitives, but Emscripten has a bug
// that generates code that passes functions for some imports. To
// avoid breaking all the code that contains this bug, we make an
// exception for functions that don't have user-defined valueOf or
// toString, for their coercions are not observable and coercion via
// ToNumber/ToInt32 definitely produces NaN/0. We should remove this
// special case later once most apps have been built with newer
// Emscripten.
jsid toString = NameToId(cx->names().toString);
if (!v.toObject().is<JSFunction>() ||
!HasObjectValueOf(&v.toObject(), cx) ||
!ClassMethodIsNative(cx, &v.toObject(), &JSFunction::class_, toString, fun_toString))
{
return LinkFail(cx, "Imported values must be primitives"); return LinkFail(cx, "Imported values must be primitives");
}
}
SimdConstant simdConstant;
switch (global.varInitCoercion()) { switch (global.varInitCoercion()) {
case AsmJS_ToInt32: case AsmJS_ToInt32:
if (!ToInt32(cx, v, (int32_t *)datum)) if (!ToInt32(cx, v, (int32_t *)datum))
@ -179,16 +162,6 @@ ValidateGlobalVariable(JSContext *cx, const AsmJSModule &module, AsmJSModule::Gl
if (!RoundFloat32(cx, v, (float *)datum)) if (!RoundFloat32(cx, v, (float *)datum))
return false; return false;
break; break;
case AsmJS_ToInt32x4:
if (!ToSimdConstant<Int32x4>(cx, v, &simdConstant))
return false;
memcpy(datum, simdConstant.asInt32x4(), Simd128DataSize);
break;
case AsmJS_ToFloat32x4:
if (!ToSimdConstant<Float32x4>(cx, v, &simdConstant))
return false;
memcpy(datum, simdConstant.asFloat32x4(), Simd128DataSize);
break;
} }
break; break;
} }
@ -268,103 +241,6 @@ ValidateMathBuiltinFunction(JSContext *cx, AsmJSModule::Global &global, HandleVa
return true; return true;
} }
static PropertyName *
SimdTypeToName(JSContext *cx, AsmJSSimdType type)
{
switch (type) {
case AsmJSSimdType_int32x4: return cx->names().int32x4;
case AsmJSSimdType_float32x4: return cx->names().float32x4;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected SIMD type");
}
static X4TypeDescr::Type
AsmJSSimdTypeToTypeDescrType(AsmJSSimdType type)
{
switch (type) {
case AsmJSSimdType_int32x4: return Int32x4::type;
case AsmJSSimdType_float32x4: return Float32x4::type;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected AsmJSSimdType");
}
static bool
ValidateSimdType(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal,
MutableHandleValue out)
{
RootedValue v(cx);
if (!GetDataProperty(cx, globalVal, cx->names().SIMD, &v))
return false;
AsmJSSimdType type;
if (global.which() == AsmJSModule::Global::SimdCtor)
type = global.simdCtorType();
else
type = global.simdOperationType();
RootedPropertyName simdTypeName(cx, SimdTypeToName(cx, type));
if (!GetDataProperty(cx, v, simdTypeName, &v))
return false;
if (!v.isObject())
return LinkFail(cx, "bad SIMD type");
RootedObject x4desc(cx, &v.toObject());
if (!x4desc->is<X4TypeDescr>())
return LinkFail(cx, "bad SIMD type");
if (AsmJSSimdTypeToTypeDescrType(type) != x4desc->as<X4TypeDescr>().type())
return LinkFail(cx, "bad SIMD type");
out.set(v);
return true;
}
static bool
ValidateSimdType(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal)
{
RootedValue _(cx);
return ValidateSimdType(cx, global, globalVal, &_);
}
static bool
ValidateSimdOperation(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal)
{
// SIMD operations are loaded from the SIMD type, so the type must have been
// validated before the operation.
RootedValue v(cx);
JS_ALWAYS_TRUE(ValidateSimdType(cx, global, globalVal, &v));
RootedPropertyName opName(cx, global.simdOperationName());
if (!GetDataProperty(cx, v, opName, &v))
return false;
Native native = nullptr;
switch (global.simdOperationType()) {
case AsmJSSimdType_int32x4:
switch (global.simdOperation()) {
case AsmJSSimdOperation_add: native = simd_int32x4_add; break;
case AsmJSSimdOperation_sub: native = simd_int32x4_sub; break;
case AsmJSSimdOperation_mul:
case AsmJSSimdOperation_div:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Mul and div shouldn't have been validated in "
"the first place");
}
break;
case AsmJSSimdType_float32x4:
switch (global.simdOperation()) {
case AsmJSSimdOperation_add: native = simd_float32x4_add; break;
case AsmJSSimdOperation_sub: native = simd_float32x4_sub; break;
case AsmJSSimdOperation_mul: native = simd_float32x4_mul; break;
case AsmJSSimdOperation_div: native = simd_float32x4_div; break;
}
break;
}
if (!native || !IsNativeFunction(v, native))
return LinkFail(cx, "bad SIMD.type.* operation");
return true;
}
static bool static bool
ValidateConstant(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal) ValidateConstant(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal)
{ {
@ -501,14 +377,6 @@ DynamicallyLinkModule(JSContext *cx, CallArgs args, AsmJSModule &module)
if (!ValidateConstant(cx, global, globalVal)) if (!ValidateConstant(cx, global, globalVal))
return false; return false;
break; break;
case AsmJSModule::Global::SimdCtor:
if (!ValidateSimdType(cx, global, globalVal))
return false;
break;
case AsmJSModule::Global::SimdOperation:
if (!ValidateSimdOperation(cx, global, globalVal))
return false;
break;
} }
} }
@ -569,14 +437,14 @@ CallAsmJS(JSContext *cx, unsigned argc, Value *vp)
const AsmJSModule::ExportedFunction &func = FunctionToExportedFunction(callee, module); const AsmJSModule::ExportedFunction &func = FunctionToExportedFunction(callee, module);
// The calling convention for an external call into asm.js is to pass an // The calling convention for an external call into asm.js is to pass an
// array of 16-byte values where each value contains either a coerced int32 // array of 8-byte values where each value contains either a coerced int32
// (in the low word), a double value (in the low dword) or a SIMD vector // (in the low word) or double value, with the coercions specified by the
// value, with the coercions specified by the asm.js signature. The // asm.js signature. The external entry point unpacks this array into the
// external entry point unpacks this array into the system-ABI-specified // system-ABI-specified registers and stack memory and then calls into the
// registers and stack memory and then calls into the internal entry point. // internal entry point. The return value is stored in the first element of
// The return value is stored in the first element of the array (which, // the array (which, therefore, must have length >= 1).
// therefore, must have length >= 1).
js::Vector<AsmJSModule::EntryArg, 8> coercedArgs(cx); js::Vector<uint64_t, 8> coercedArgs(cx);
if (!coercedArgs.resize(Max<size_t>(1, func.numArgs()))) if (!coercedArgs.resize(Max<size_t>(1, func.numArgs())))
return false; return false;
@ -596,20 +464,6 @@ CallAsmJS(JSContext *cx, unsigned argc, Value *vp)
if (!RoundFloat32(cx, v, (float *)&coercedArgs[i])) if (!RoundFloat32(cx, v, (float *)&coercedArgs[i]))
return false; return false;
break; break;
case AsmJS_ToInt32x4: {
SimdConstant simd;
if (!ToSimdConstant<Int32x4>(cx, v, &simd))
return false;
memcpy(&coercedArgs[i], simd.asInt32x4(), Simd128DataSize);
break;
}
case AsmJS_ToFloat32x4: {
SimdConstant simd;
if (!ToSimdConstant<Float32x4>(cx, v, &simd))
return false;
memcpy(&coercedArgs[i], simd.asFloat32x4(), Simd128DataSize);
break;
}
} }
} }
@ -647,7 +501,6 @@ CallAsmJS(JSContext *cx, unsigned argc, Value *vp)
return true; return true;
} }
JSObject *x4obj;
switch (func.returnType()) { switch (func.returnType()) {
case AsmJSModule::Return_Void: case AsmJSModule::Return_Void:
callArgs.rval().set(UndefinedValue()); callArgs.rval().set(UndefinedValue());
@ -658,18 +511,6 @@ CallAsmJS(JSContext *cx, unsigned argc, Value *vp)
case AsmJSModule::Return_Double: case AsmJSModule::Return_Double:
callArgs.rval().set(NumberValue(*(double*)&coercedArgs[0])); callArgs.rval().set(NumberValue(*(double*)&coercedArgs[0]));
break; break;
case AsmJSModule::Return_Int32x4:
x4obj = CreateSimd<Int32x4>(cx, (int32_t*)&coercedArgs[0]);
if (!x4obj)
return false;
callArgs.rval().set(ObjectValue(*x4obj));
break;
case AsmJSModule::Return_Float32x4:
x4obj = CreateSimd<Float32x4>(cx, (float*)&coercedArgs[0]);
if (!x4obj)
return false;
callArgs.rval().set(ObjectValue(*x4obj));
break;
} }
return true; return true;

View File

@ -303,8 +303,8 @@ AsmJSModule::finish(ExclusiveContext *cx, TokenStream &tokenStream, MacroAssembl
// The global data section sits immediately after the executable (and // The global data section sits immediately after the executable (and
// other) data allocated by the MacroAssembler, so ensure it is // other) data allocated by the MacroAssembler, so ensure it is
// SIMD-aligned. // double-aligned.
pod.codeBytes_ = AlignBytes(masm.bytesNeeded(), SimdStackAlignment); pod.codeBytes_ = AlignBytes(masm.bytesNeeded(), sizeof(double));
// The entire region is allocated via mmap/VirtualAlloc which requires // The entire region is allocated via mmap/VirtualAlloc which requires
// units of pages. // units of pages.
@ -518,7 +518,7 @@ TryEnablingIon(JSContext *cx, AsmJSModule &module, HandleFunction fun, uint32_t
if (fun->nargs() > size_t(argc)) if (fun->nargs() > size_t(argc))
return true; return true;
// Normally the types should correspond, since we just ran with those types, // Normally the types should corresond, since we just ran with those types,
// but there are reports this is asserting. Therefore doing it as a check, instead of DEBUG only. // but there are reports this is asserting. Therefore doing it as a check, instead of DEBUG only.
if (!types::TypeScript::ThisTypes(script)->hasType(types::Type::UndefinedType())) if (!types::TypeScript::ThisTypes(script)->hasType(types::Type::UndefinedType()))
return true; return true;

View File

@ -27,10 +27,8 @@
#include "asmjs/AsmJSFrameIterator.h" #include "asmjs/AsmJSFrameIterator.h"
#include "asmjs/AsmJSValidate.h" #include "asmjs/AsmJSValidate.h"
#include "builtin/SIMD.h"
#include "gc/Marking.h" #include "gc/Marking.h"
#include "jit/IonMacroAssembler.h" #include "jit/IonMacroAssembler.h"
#include "jit/IonTypes.h"
#ifdef JS_ION_PERF #ifdef JS_ION_PERF
# include "jit/PerfSpewer.h" # include "jit/PerfSpewer.h"
#endif #endif
@ -49,9 +47,7 @@ enum AsmJSCoercion
{ {
AsmJS_ToInt32, AsmJS_ToInt32,
AsmJS_ToNumber, AsmJS_ToNumber,
AsmJS_FRound, AsmJS_FRound
AsmJS_ToInt32x4,
AsmJS_ToFloat32x4
}; };
// The asm.js spec recognizes this set of builtin Math functions. // The asm.js spec recognizes this set of builtin Math functions.
@ -66,22 +62,6 @@ enum AsmJSMathBuiltinFunction
AsmJSMathBuiltin_clz32 AsmJSMathBuiltin_clz32
}; };
// Set of known global object SIMD's attributes, i.e. types
enum AsmJSSimdType
{
AsmJSSimdType_int32x4,
AsmJSSimdType_float32x4
};
// Set of known operations, for a given SIMD type (int32x4, float32x4,...)
enum AsmJSSimdOperation
{
AsmJSSimdOperation_add,
AsmJSSimdOperation_sub,
AsmJSSimdOperation_mul,
AsmJSSimdOperation_div
};
// These labels describe positions in the prologue/epilogue of functions while // These labels describe positions in the prologue/epilogue of functions while
// compiling an AsmJSModule. // compiling an AsmJSModule.
struct AsmJSFunctionLabels struct AsmJSFunctionLabels
@ -118,32 +98,18 @@ class AsmJSNumLit
BigUnsigned, BigUnsigned,
Double, Double,
Float, Float,
Int32x4,
Float32x4,
OutOfRangeInt = -1 OutOfRangeInt = -1
}; };
private: private:
Which which_; Which which_;
union { Value value_;
Value scalar_;
jit::SimdConstant simd_;
} value;
public: public:
static AsmJSNumLit Create(Which w, Value v) { static AsmJSNumLit Create(Which w, Value v) {
AsmJSNumLit lit; AsmJSNumLit lit;
lit.which_ = w; lit.which_ = w;
lit.value.scalar_ = v; lit.value_ = v;
JS_ASSERT(!lit.isSimd());
return lit;
}
static AsmJSNumLit Create(Which w, jit::SimdConstant c) {
AsmJSNumLit lit;
lit.which_ = w;
lit.value.simd_ = c;
JS_ASSERT(lit.isSimd());
return lit; return lit;
} }
@ -153,31 +119,22 @@ class AsmJSNumLit
int32_t toInt32() const { int32_t toInt32() const {
JS_ASSERT(which_ == Fixnum || which_ == NegativeInt || which_ == BigUnsigned); JS_ASSERT(which_ == Fixnum || which_ == NegativeInt || which_ == BigUnsigned);
return value.scalar_.toInt32(); return value_.toInt32();
} }
double toDouble() const { double toDouble() const {
JS_ASSERT(which_ == Double); JS_ASSERT(which_ == Double);
return value.scalar_.toDouble(); return value_.toDouble();
} }
float toFloat() const { float toFloat() const {
JS_ASSERT(which_ == Float); JS_ASSERT(which_ == Float);
return float(value.scalar_.toDouble()); return float(value_.toDouble());
} }
Value scalarValue() const { Value value() const {
JS_ASSERT(which_ != OutOfRangeInt); JS_ASSERT(which_ != OutOfRangeInt);
return value.scalar_; return value_;
}
bool isSimd() const {
return which_ == Int32x4 || which_ == Float32x4;
}
const jit::SimdConstant &simdValue() const {
JS_ASSERT(isSimd());
return value.simd_;
} }
bool hasType() const { bool hasType() const {
@ -201,8 +158,7 @@ class AsmJSModule
class Global class Global
{ {
public: public:
enum Which { Variable, FFI, ArrayView, MathBuiltinFunction, Constant, enum Which { Variable, FFI, ArrayView, MathBuiltinFunction, Constant };
SimdCtor, SimdOperation};
enum VarInitKind { InitConstant, InitImport }; enum VarInitKind { InitConstant, InitImport };
enum ConstantKind { GlobalConstant, MathConstant }; enum ConstantKind { GlobalConstant, MathConstant };
@ -221,11 +177,6 @@ class AsmJSModule
uint32_t ffiIndex_; uint32_t ffiIndex_;
Scalar::Type viewType_; Scalar::Type viewType_;
AsmJSMathBuiltinFunction mathBuiltinFunc_; AsmJSMathBuiltinFunction mathBuiltinFunc_;
AsmJSSimdType simdCtorType_;
struct {
AsmJSSimdType type_;
AsmJSSimdOperation which_;
} simdOp;
struct { struct {
ConstantKind kind_; ConstantKind kind_;
double value_; double value_;
@ -246,7 +197,7 @@ class AsmJSModule
if (name_) if (name_)
MarkStringUnbarriered(trc, &name_, "asm.js global name"); MarkStringUnbarriered(trc, &name_, "asm.js global name");
JS_ASSERT_IF(pod.which_ == Variable && pod.u.var.initKind_ == InitConstant, JS_ASSERT_IF(pod.which_ == Variable && pod.u.var.initKind_ == InitConstant,
!pod.u.var.u.numLit_.scalarValue().isMarkable()); !pod.u.var.u.numLit_.value().isMarkable());
} }
public: public:
@ -301,26 +252,6 @@ class AsmJSModule
JS_ASSERT(pod.which_ == MathBuiltinFunction); JS_ASSERT(pod.which_ == MathBuiltinFunction);
return pod.u.mathBuiltinFunc_; return pod.u.mathBuiltinFunc_;
} }
AsmJSSimdType simdCtorType() const {
JS_ASSERT(pod.which_ == SimdCtor);
return pod.u.simdCtorType_;
}
PropertyName *simdCtorName() const {
JS_ASSERT(pod.which_ == SimdCtor);
return name_;
}
PropertyName *simdOperationName() const {
JS_ASSERT(pod.which_ == SimdOperation);
return name_;
}
AsmJSSimdOperation simdOperation() const {
JS_ASSERT(pod.which_ == SimdOperation);
return pod.u.simdOp.which_;
}
AsmJSSimdType simdOperationType() const {
JS_ASSERT(pod.which_ == SimdOperation);
return pod.u.simdOp.type_;
}
PropertyName *constantName() const { PropertyName *constantName() const {
JS_ASSERT(pod.which_ == Constant); JS_ASSERT(pod.which_ == Constant);
return name_; return name_;
@ -379,13 +310,7 @@ class AsmJSModule
const uint8_t *deserialize(ExclusiveContext *cx, const uint8_t *cursor); const uint8_t *deserialize(ExclusiveContext *cx, const uint8_t *cursor);
bool clone(ExclusiveContext *cx, Exit *out) const; bool clone(ExclusiveContext *cx, Exit *out) const;
}; };
typedef int32_t (*CodePtr)(uint64_t *args, uint8_t *global);
struct EntryArg {
uint64_t lo;
uint64_t hi;
};
JS_STATIC_ASSERT(sizeof(EntryArg) >= jit::Simd128DataSize);
typedef int32_t (*CodePtr)(EntryArg *args, uint8_t *global);
// An Exit holds bookkeeping information about an exit; the ExitDatum // An Exit holds bookkeeping information about an exit; the ExitDatum
// struct overlays the actual runtime data stored in the global data // struct overlays the actual runtime data stored in the global data
@ -398,7 +323,7 @@ class AsmJSModule
typedef Vector<AsmJSCoercion, 0, SystemAllocPolicy> ArgCoercionVector; typedef Vector<AsmJSCoercion, 0, SystemAllocPolicy> ArgCoercionVector;
enum ReturnType { Return_Int32, Return_Double, Return_Int32x4, Return_Float32x4, Return_Void }; enum ReturnType { Return_Int32, Return_Double, Return_Void };
class ExportedFunction class ExportedFunction
{ {
@ -748,8 +673,7 @@ class AsmJSModule
size_t codeBytes_; // function bodies and stubs size_t codeBytes_; // function bodies and stubs
size_t totalBytes_; // function bodies, stubs, and global data size_t totalBytes_; // function bodies, stubs, and global data
uint32_t minHeapLength_; uint32_t minHeapLength_;
uint32_t numGlobalScalarVars_; uint32_t numGlobalVars_;
uint32_t numGlobalSimdVars_;
uint32_t numFFIs_; uint32_t numFFIs_;
uint32_t srcLength_; uint32_t srcLength_;
uint32_t srcLengthWithRightBrace_; uint32_t srcLengthWithRightBrace_;
@ -896,43 +820,20 @@ class AsmJSModule
} }
bool addGlobalVarInit(const AsmJSNumLit &lit, uint32_t *globalIndex) { bool addGlobalVarInit(const AsmJSNumLit &lit, uint32_t *globalIndex) {
JS_ASSERT(!isFinishedWithModulePrologue()); JS_ASSERT(!isFinishedWithModulePrologue());
if (pod.numGlobalVars_ == UINT32_MAX)
return false;
Global g(Global::Variable, nullptr); Global g(Global::Variable, nullptr);
g.pod.u.var.initKind_ = Global::InitConstant; g.pod.u.var.initKind_ = Global::InitConstant;
g.pod.u.var.u.numLit_ = lit; g.pod.u.var.u.numLit_ = lit;
g.pod.u.var.index_ = *globalIndex = pod.numGlobalVars_++;
if (lit.isSimd()) {
if (pod.numGlobalSimdVars_ == UINT32_MAX)
return false;
*globalIndex = pod.numGlobalSimdVars_++;
} else {
if (pod.numGlobalScalarVars_ == UINT32_MAX)
return false;
*globalIndex = pod.numGlobalScalarVars_++;
}
g.pod.u.var.index_ = *globalIndex;
return globals_.append(g); return globals_.append(g);
} }
static bool IsSimdCoercion(AsmJSCoercion c) {
switch (c) {
case AsmJS_ToInt32:
case AsmJS_ToNumber:
case AsmJS_FRound:
return false;
case AsmJS_ToInt32x4:
case AsmJS_ToFloat32x4:
return true;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected AsmJSCoercion");
}
bool addGlobalVarImport(PropertyName *name, AsmJSCoercion coercion, uint32_t *globalIndex) { bool addGlobalVarImport(PropertyName *name, AsmJSCoercion coercion, uint32_t *globalIndex) {
JS_ASSERT(!isFinishedWithModulePrologue()); JS_ASSERT(!isFinishedWithModulePrologue());
Global g(Global::Variable, name); Global g(Global::Variable, name);
g.pod.u.var.initKind_ = Global::InitImport; g.pod.u.var.initKind_ = Global::InitImport;
g.pod.u.var.u.coercion_ = coercion; g.pod.u.var.u.coercion_ = coercion;
*globalIndex = IsSimdCoercion(coercion) ? pod.numGlobalSimdVars_++ g.pod.u.var.index_ = *globalIndex = pod.numGlobalVars_++;
: pod.numGlobalScalarVars_++;
g.pod.u.var.index_ = *globalIndex;
return globals_.append(g); return globals_.append(g);
} }
bool addFFI(PropertyName *field, uint32_t *ffiIndex) { bool addFFI(PropertyName *field, uint32_t *ffiIndex) {
@ -963,17 +864,6 @@ class AsmJSModule
g.pod.u.constant.kind_ = Global::MathConstant; g.pod.u.constant.kind_ = Global::MathConstant;
return globals_.append(g); return globals_.append(g);
} }
bool addSimdCtor(AsmJSSimdType type, PropertyName *field) {
Global g(Global::SimdCtor, field);
g.pod.u.simdCtorType_ = type;
return globals_.append(g);
}
bool addSimdOperation(AsmJSSimdType type, AsmJSSimdOperation op, PropertyName *field) {
Global g(Global::SimdOperation, field);
g.pod.u.simdOp.type_ = type;
g.pod.u.simdOp.which_ = op;
return globals_.append(g);
}
bool addGlobalConstant(double value, PropertyName *name) { bool addGlobalConstant(double value, PropertyName *name) {
JS_ASSERT(!isFinishedWithModulePrologue()); JS_ASSERT(!isFinishedWithModulePrologue());
Global g(Global::Constant, name); Global g(Global::Constant, name);
@ -1220,11 +1110,10 @@ class AsmJSModule
// are laid out in this order: // are laid out in this order:
// 0. a pointer to the current AsmJSActivation // 0. a pointer to the current AsmJSActivation
// 1. a pointer to the heap that was linked to the module // 1. a pointer to the heap that was linked to the module
// 2. the double float constant NaN // 2. the double float constant NaN.
// 3. the float32 constant NaN, padded to Simd128DataSize // 3. the float32 constant NaN, padded to sizeof(double).
// 4. global SIMD variable state (elements are Simd128DataSize) // 4. global variable state (elements are sizeof(uint64_t))
// 5. global variable state (elements are sizeof(uint64_t)) // 5. interleaved function-pointer tables and exits. These are allocated
// 6. interleaved function-pointer tables and exits. These are allocated
// while type checking function bodies (as exits and uses of // while type checking function bodies (as exits and uses of
// function-pointer tables are encountered). // function-pointer tables are encountered).
size_t offsetOfGlobalData() const { size_t offsetOfGlobalData() const {
@ -1235,18 +1124,13 @@ class AsmJSModule
JS_ASSERT(isFinished()); JS_ASSERT(isFinished());
return code_ + offsetOfGlobalData(); return code_ + offsetOfGlobalData();
} }
size_t globalSimdVarsOffset() const {
return AlignBytes(/* 0 */ sizeof(void*) +
/* 1 */ sizeof(void*) +
/* 2 */ sizeof(double) +
/* 3 */ sizeof(float),
jit::Simd128DataSize);
}
size_t globalDataBytes() const { size_t globalDataBytes() const {
return globalSimdVarsOffset() + return sizeof(void*) +
/* 4 */ pod.numGlobalSimdVars_ * jit::Simd128DataSize + sizeof(void*) +
/* 5 */ pod.numGlobalScalarVars_ * sizeof(uint64_t) + sizeof(double) +
/* 6 */ pod.funcPtrTableAndExitBytes_; sizeof(double) +
pod.numGlobalVars_ * sizeof(uint64_t) +
pod.funcPtrTableAndExitBytes_;
} }
static unsigned activationGlobalDataOffset() { static unsigned activationGlobalDataOffset() {
JS_STATIC_ASSERT(jit::AsmJSActivationGlobalDataOffset == 0); JS_STATIC_ASSERT(jit::AsmJSActivationGlobalDataOffset == 0);
@ -1281,39 +1165,20 @@ class AsmJSModule
*(double *)(globalData() + nan64GlobalDataOffset()) = GenericNaN(); *(double *)(globalData() + nan64GlobalDataOffset()) = GenericNaN();
*(float *)(globalData() + nan32GlobalDataOffset()) = GenericNaN(); *(float *)(globalData() + nan32GlobalDataOffset()) = GenericNaN();
} }
unsigned globalSimdVarIndexToGlobalDataOffset(unsigned i) const { unsigned globalVariableOffset() const {
JS_ASSERT(isFinishedWithModulePrologue()); static_assert((2 * sizeof(void*) + 2 * sizeof(double)) % sizeof(double) == 0,
JS_ASSERT(i < pod.numGlobalSimdVars_); "Global data should be aligned");
return globalSimdVarsOffset() + return 2 * sizeof(void*) + 2 * sizeof(double);
i * jit::Simd128DataSize;
} }
unsigned globalScalarVarIndexToGlobalDataOffset(unsigned i) const { unsigned globalVarIndexToGlobalDataOffset(unsigned i) const {
JS_ASSERT(isFinishedWithModulePrologue()); JS_ASSERT(isFinishedWithModulePrologue());
JS_ASSERT(i < pod.numGlobalScalarVars_); JS_ASSERT(i < pod.numGlobalVars_);
return globalSimdVarsOffset() + return globalVariableOffset() +
pod.numGlobalSimdVars_ * jit::Simd128DataSize +
i * sizeof(uint64_t); i * sizeof(uint64_t);
} }
void *globalScalarVarIndexToGlobalDatum(unsigned i) const { void *globalVarIndexToGlobalDatum(unsigned i) const {
JS_ASSERT(isFinished()); JS_ASSERT(isFinished());
return (void *)(globalData() + globalScalarVarIndexToGlobalDataOffset(i)); return (void *)(globalData() + globalVarIndexToGlobalDataOffset(i));
}
void *globalSimdVarIndexToGlobalDatum(unsigned i) const {
JS_ASSERT(isFinished());
return (void *)(globalData() + globalSimdVarIndexToGlobalDataOffset(i));
}
void *globalVarToGlobalDatum(const Global &g) const {
unsigned index = g.varIndex();
if (g.varInitKind() == Global::VarInitKind::InitConstant) {
return g.varInitNumLit().isSimd()
? globalSimdVarIndexToGlobalDatum(index)
: globalScalarVarIndexToGlobalDatum(index);
}
JS_ASSERT(g.varInitKind() == Global::VarInitKind::InitImport);
return IsSimdCoercion(g.varInitCoercion())
? globalSimdVarIndexToGlobalDatum(index)
: globalScalarVarIndexToGlobalDatum(index);
} }
uint8_t **globalDataOffsetToFuncPtrTable(unsigned globalDataOffset) const { uint8_t **globalDataOffsetToFuncPtrTable(unsigned globalDataOffset) const {
JS_ASSERT(isFinished()); JS_ASSERT(isFinished());

File diff suppressed because it is too large Load Diff

View File

@ -39,8 +39,8 @@ extern const JSFunctionSpec Int32x4Methods[];
static const char *laneNames[] = {"lane 0", "lane 1", "lane 2", "lane3"}; static const char *laneNames[] = {"lane 0", "lane 1", "lane 2", "lane3"};
template<typename V> template<typename V>
bool static bool
js::IsVectorObject(HandleValue v) IsVectorObject(HandleValue v)
{ {
if (!v.isObject()) if (!v.isObject())
return false; return false;
@ -56,24 +56,6 @@ js::IsVectorObject(HandleValue v)
return typeRepr.as<X4TypeDescr>().type() == V::type; return typeRepr.as<X4TypeDescr>().type() == V::type;
} }
template<typename V>
bool
js::ToSimdConstant(JSContext *cx, HandleValue v, jit::SimdConstant *out)
{
typedef typename V::Elem Elem;
if (!IsVectorObject<V>(v)) {
JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_SIMD_NOT_A_VECTOR);
return false;
}
Elem *mem = reinterpret_cast<Elem *>(v.toObject().as<TypedObject>().typedMem());
*out = jit::SimdConstant::CreateX4(mem);
return true;
}
template bool js::ToSimdConstant<Int32x4>(JSContext *cx, HandleValue v, jit::SimdConstant *out);
template bool js::ToSimdConstant<Float32x4>(JSContext *cx, HandleValue v, jit::SimdConstant *out);
template<typename Elem> template<typename Elem>
static Elem static Elem
TypedObjectMemory(HandleValue v) TypedObjectMemory(HandleValue v)

View File

@ -166,12 +166,6 @@ struct Int32x4 {
template<typename V> template<typename V>
JSObject *CreateSimd(JSContext *cx, typename V::Elem *data); JSObject *CreateSimd(JSContext *cx, typename V::Elem *data);
template<typename V>
bool IsVectorObject(HandleValue v);
template<typename V>
bool ToSimdConstant(JSContext *cx, HandleValue v, jit::SimdConstant *out);
#define DECLARE_SIMD_FLOAT32X4_FUNCTION(Name, Func, Operands, Flags) \ #define DECLARE_SIMD_FLOAT32X4_FUNCTION(Name, Func, Operands, Flags) \
extern bool \ extern bool \
simd_float32x4_##Name(JSContext *cx, unsigned argc, Value *vp); simd_float32x4_##Name(JSContext *cx, unsigned argc, Value *vp);

View File

@ -1974,19 +1974,6 @@ EvalReturningScope(JSContext *cx, unsigned argc, jsval *vp)
return true; return true;
} }
static bool
IsSimdAvailable(JSContext *cx, unsigned argc, Value *vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
#ifdef JS_CODEGEN_NONE
bool available = false;
#else
bool available = cx->jitSupportsSimd();
#endif
args.rval().set(BooleanValue(available));
return true;
}
static const JSFunctionSpecWithHelp TestingFunctions[] = { static const JSFunctionSpecWithHelp TestingFunctions[] = {
JS_FN_HELP("gc", ::GC, 0, 0, JS_FN_HELP("gc", ::GC, 0, 0,
"gc([obj] | 'compartment')", "gc([obj] | 'compartment')",
@ -2170,10 +2157,6 @@ static const JSFunctionSpecWithHelp TestingFunctions[] = {
" Returns whether asm.js compilation is currently available or whether it is disabled\n" " Returns whether asm.js compilation is currently available or whether it is disabled\n"
" (e.g., by the debugger)."), " (e.g., by the debugger)."),
JS_FN_HELP("isSimdAvailable", IsSimdAvailable, 0, 0,
"isSimdAvailable",
" Returns true if SIMD extensions are supported on this platform."),
JS_FN_HELP("getJitCompilerOptions", GetJitCompilerOptions, 0, 0, JS_FN_HELP("getJitCompilerOptions", GetJitCompilerOptions, 0, 0,
"getCompilerOptions()", "getCompilerOptions()",
"Return an object describing some of the JIT compiler options.\n"), "Return an object describing some of the JIT compiler options.\n"),

View File

@ -144,7 +144,7 @@ NativeRegExpMacroAssembler::GenerateCode(JSContext *cx)
#endif #endif
size_t frameSize = sizeof(FrameData) + num_registers_ * sizeof(void *); size_t frameSize = sizeof(FrameData) + num_registers_ * sizeof(void *);
frameSize = JS_ROUNDUP(frameSize + masm.framePushed(), ABIStackAlignment) - masm.framePushed(); frameSize = JS_ROUNDUP(frameSize + masm.framePushed(), StackAlignment) - masm.framePushed();
// Actually emit code to start a new stack frame. // Actually emit code to start a new stack frame.
masm.reserveStack(frameSize); masm.reserveStack(frameSize);

View File

@ -108,8 +108,8 @@ function assertAsmLinkFail(f)
assertEq(isAsmJSFunction(ret), false); assertEq(isAsmJSFunction(ret), false);
if (typeof ret === 'object') if (typeof ret === 'object')
for (var i in ret) for (f of ret)
assertEq(isAsmJSFunction(ret[i]), false); assertEq(isAsmJSFunction(f), false);
// Turn on warnings-as-errors // Turn on warnings-as-errors
var oldOpts = options("werror"); var oldOpts = options("werror");

View File

@ -1,657 +0,0 @@
load(libdir + "asm.js");
var heap = new ArrayBuffer(4096);
// Set to true to see more JS debugging spew
const DEBUG = false;
if (!isSimdAvailable() || typeof SIMD === 'undefined') {
DEBUG && print("won't run tests as simd extensions aren't activated yet");
quit(0);
}
const I32 = 'var i4 = glob.SIMD.int32x4;'
const I32A = 'var i4a = i4.add;'
const I32S = 'var i4s = i4.sub;'
const F32 = 'var f4 = glob.SIMD.float32x4;'
const F32A = 'var f4a = f4.add;'
const F32S = 'var f4s = f4.sub;'
const F32M = 'var f4m = f4.mul;'
const F32D = 'var f4d = f4.div;'
const FROUND = 'var f32=glob.Math.fround;'
const INT32_MAX = Math.pow(2, 31) - 1;
const INT32_MIN = INT32_MAX + 1 | 0;
const assertEqFFI = {assertEq:assertEq};
function assertEqX4(real, expected) {
assertEq(real.x, expected[0]);
assertEq(real.y, expected[1]);
assertEq(real.z, expected[2]);
assertEq(real.w, expected[3]);
}
function CheckI4(header, code, expected) {
// code needs to contain a local called x
header = USE_ASM + I32 + header;
var lanes = ['x', 'y', 'z', 'w'];
for (var i = 0; i < 4; ++i) {
var lane = lanes[i];
assertEq(asmLink(asmCompile('glob', header + ';function f() {' + code + ';return x.' + lane + '|0} return f'), this)(), expected[i]);
}
}
function CheckF4(header, code, expected) {
// code needs to contain a local called x
var lanes = ['x', 'y', 'z', 'w'];
header = USE_ASM + F32 + header;
for (var i = 0; i < 4; ++i) {
var lane = lanes[i];
assertEq(asmLink(asmCompile('glob', header + ';function f() {' + code + ';return +x.' + lane + '} return f'), this)(), Math.fround(expected[i]));
}
}
try {
// 1. Constructors
// 1.1 Compilation
assertAsmTypeFail('glob', USE_ASM + "var i4 = int32x4 ; return {}") ;
assertAsmTypeFail('glob', USE_ASM + "var i4 = glob.int32x4 ; return {}") ;
assertAsmTypeFail('glob', USE_ASM + "var i4 = glob.globglob.int32x4 ; return {}") ;
assertAsmTypeFail('glob', USE_ASM + "var i4 = glob.Math.int32x4 ; return {}") ;
assertAsmTypeFail('glob', USE_ASM + "var herd = glob.SIMD.ponyX4 ; return {}") ;
// 1.2 Linking
assertAsmLinkAlwaysFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: 42});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: Math.fround});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {int32x4: 42}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {int32x4: Math.fround}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {int32x4: new Array}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {int32x4: SIMD.float32x4}});
[Type, int32] = [TypedObject.StructType, TypedObject.int32];
var MyStruct = new Type({'x': int32, 'y': int32, 'z': int32, 'w': int32});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {int32x4: MyStruct}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {int32x4: new MyStruct}});
assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {} return f"), {SIMD:{int32x4: SIMD.int32x4}})(), undefined);
assertAsmLinkFail(asmCompile('glob', USE_ASM + F32 + "return {}"), {SIMD: {float32x4: 42}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + F32 + "return {}"), {SIMD: {float32x4: Math.fround}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + F32 + "return {}"), {SIMD: {float32x4: new Array}});
assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {} return f"), {SIMD:{float32x4: SIMD.float32x4}})(), undefined);
// 1.3 Correctness
// 1.3.1 Local variables declarations
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Int32x4(1,2,3,4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4();} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2, 3);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2, 3, 4.0);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2.0, 3, 4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4a(1,2,3,4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,2+2|0);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3," + (INT32_MIN - 1) + ");} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(i4(1,2,3,4));} return f");
assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,4);} return f"), this)(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3," + (INT32_MAX + 1) + ");} return f"), this)(), undefined);
assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4();} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1.,2.,3.);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1.,2.,f32(3.),4.);} return f");
assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1.,2.,3.,4.);} return f"), this)(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4);} return f"), this)(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3," + (INT32_MIN - 1) + ");} return f"), this)(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3," + (INT32_MAX + 1) + ");} return f"), this)(), undefined);
// Places where NumLit can creep in
assertAsmTypeFail('glob', USE_ASM + I32 + "function f(i) {i=i|0; var z=0; switch(i|0) {case i4(1,2,3,4): z=1; break; default: z=2; break;}} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f(i) {i=i|0; var z=0; return i * i4(1,2,3,4) | 0;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f(i) {var x=i4(1,2,3,i4(4,5,6,7))} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "function f(i) {var x=i4(1,2,3,f4(4,5,6,7))} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "function f(i) {var x=f4(1,2,3,i4(4,5,6,7))} return f");
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {return i4(1,2,3,4);} return f"), this)(), [1, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {return i4(i4(1,2,3,4));} return f"), this)(), [1, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {return f4(1,2,3,4);} return f"), this)(), [1, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {return f4(f4(1,2,3,4));} return f"), this)(), [1, 2, 3, 4]);
// Int32x4 ctor should accept int?
assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + I32 + "var i32=new glob.Int32Array(heap); function f(i) {i=i|0; return i4(i4(i32[i>>2], 2, 3, 4))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [0, 2, 3, 4]);
// Float32x4 ctor should accept floatish, i.e. float || float? || floatish
assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + F32 + "var f32=new glob.Float32Array(heap); function f(i) {i=i|0; return f4(f4(f32[i>>2], 2, 3, 4))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [NaN, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + "var f32=glob.Math.fround; function f(i) {i=i|0; return f4(f4(f32(1) + f32(2), 2, 3, 4))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [3, 2, 3, 4]);
// 1.3.2 Reading values out of lanes
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=1; return x.y | 0;} return f");
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=1; return (x + x).y | 0;} return f");
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=1.; return x.y | 0;} return f");
assertAsmTypeFail('glob', USE_ASM + "var f32=glob.Math.fround;" + I32 + "function f() {var x=f32(1); return x.y | 0;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,4); return x.length|0;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,4).y; return x|0;} return f");
CheckI4('', 'var x=i4(0,0,0,0)', [0,0,0,0]);
CheckI4('', 'var x=i4(1,2,3,4)', [1,2,3,4]);
CheckI4('', 'var x=i4(' + INT32_MIN + ',2,3,' + INT32_MAX + ')', [INT32_MIN,2,3,INT32_MAX]);
CheckI4('', 'var x=i4(1,2,3,4); var y=i4(5,6,7,8)', [1,2,3,4]);
CheckI4('', 'var a=1; var b=i4(9,8,7,6); var c=13.37; var x=i4(1,2,3,4); var y=i4(5,6,7,8)', [1,2,3,4]);
CheckI4('', 'var y=i4(5,6,7,8); var x=i4(1,2,3,4)', [1,2,3,4]);
CheckF4('', 'var x=f4(' + INT32_MAX + ', 2, 3, ' + INT32_MIN + ')', [INT32_MAX, 2, 3, INT32_MIN]);
CheckF4('', 'var x=f4(' + (INT32_MAX + 1) + ', 2, 3, 4)', [INT32_MAX + 1, 2, 3, 4]);
CheckF4('', 'var x=f4(1.3, 2.4, 3.5, 98.76)', [1.3, 2.4, 3.5, 98.76]);
CheckF4('', 'var x=f4(13.37, 2., 3., -0)', [13.37, 2, 3, -0]);
// 1.3.3. Variable assignments
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4();} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1.0, 2, 3, 4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2.0, 3, 4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3.0, 4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3, 4.0);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3, x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); var c=4.0; x=i4(1, 2, 3, +c);} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + I32 + "var i32=new glob.Int32Array(heap); function f() {var x=i4(1,2,3,4); i32[0] = x;} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + I32 + "var i32=new glob.Int32Array(heap); function f() {var x=i4(1,2,3,4); x = i32[0];} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + F32 + "var f32=new glob.Float32Array(heap); function f() {var x=f4(1,2,3,4); f32[0] = x;} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + F32 + "var f32=new glob.Int32Array(heap); function f() {var x=f4(1,2,3,4); x = f32[0];} return f");
CheckI4('', 'var x=i4(1,2,3,4); x=i4(5,6,7,8)', [5, 6, 7, 8]);
CheckI4('', 'var x=i4(1,2,3,4); var c=6; x=i4(5,c|0,7,8)', [5, 6, 7, 8]);
CheckI4('', 'var x=i4(8,7,6,5); x=i4(x.w|0,x.z|0,x.y|0,x.x|0)', [5, 6, 7, 8]);
CheckF4(FROUND, 'var x=f4(1,2,3,4); var y=f32(7.); x=f4(5,6,y,8)', [5, 6, 7, 8]);
CheckF4(FROUND, 'var x=f4(1,2,3,4); x=f4(f32(5.),6.,7.,8.)', [5, 6, 7, 8]);
CheckF4(FROUND, 'var x=f4(1,2,3,4); x=f4(f32(5),6,7,8)', [5, 6, 7, 8]);
CheckF4(FROUND, 'var x=f4(1,2,3,4); x=f4(f32(5.),f32(6.),f32(7.),f32(8.))', [5, 6, 7, 8]);
CheckF4('', 'var x=f4(1.,2.,3.,4.); x=f4(5.,6.,7.,8.)', [5, 6, 7, 8]);
CheckF4('', 'var x=f4(1.,2.,3.,4.); x=f4(1,2,3,4)', [1, 2, 3, 4]);
CheckF4(FROUND, 'var x=f4(1.,2.,3.,4.); var y=f32(7.); x=f4(9, 4, 2, 1)', [9, 4, 2, 1]);
CheckF4('', 'var x=f4(8.,7.,6.,5.); x=f4(x.w, x.z, x.y, x.x)', [5, 6, 7, 8]);
// 1.3.4 Return values
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=1; return i4(x)} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=1; return i4(x + x)} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=1.; return i4(x)} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + FROUND + "function f() {var x=f32(1.); return i4(x)} return f");
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,4); return i4(x)} return f"), this)(), [1,2,3,4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); return f4(x)} return f"), this)(), [1,2,3,4]);
// 1.3.5 Coerce and pass arguments
assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {x=i4();} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {x=i4(1,2,3,4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x,y) {x=i4(y);y=+y} return f");
var i32x4 = SIMD.int32x4(1, 3, 3, 7);
assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f(x) {x=i4(x)} return f"), this)(i32x4), undefined);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + "function f(x) {x=i4(x); return i4(x);} return f"), this)(i32x4), [1,3,3,7]);
var f32x4 = SIMD.float32x4(13.37, 42.42, -0, NaN);
assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f(x) {x=f4(x)} return f"), this)(f32x4), undefined);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + "function f(x) {x=f4(x); return f4(x);} return f"), this)(f32x4),
[Math.fround(13.37), Math.fround(42.42), -0, NaN]);
function assertCaught(f) {
var caught = false;
try {
f.apply(null, Array.prototype.slice.call(arguments, 1));
} catch (e) {
DEBUG && print('Assert caught: ', e, '\n', e.stack);
assertEq(e instanceof TypeError, true);
caught = true;
}
assertEq(caught, true);
}
var f = asmLink(asmCompile('glob', USE_ASM + F32 + "function f(x) {x=f4(x); return f4(x);} return f"), this);
assertCaught(f);
assertCaught(f, 1);
assertCaught(f, {});
assertCaught(f, "I sincerely am a SIMD typed object.");
assertCaught(f, SIMD.int32x4(1,2,3,4));
var f = asmLink(asmCompile('glob', USE_ASM + I32 + "function f(x) {x=i4(x); return i4(x);} return f"), this);
assertCaught(f);
assertCaught(f, 1);
assertCaught(f, {});
assertCaught(f, "I sincerely am a SIMD typed object.");
assertCaught(f, SIMD.float32x4(4,3,2,1));
// 1.3.6 Globals
// 1.3.6.1 Local globals
// Read
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4; x=g|0;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4.; x=+g;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); var f32=glob.Math.fround; function f() {var x=f32(4.); x=f32(g);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4; x=g|0;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4.; x=+g;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); var f32=glob.Math.fround; function f() {var x=f32(4.); x=f32(g);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + I32 + "var g=f4(1., 2., 3., 4.); function f() {var x=i4(1,2,3,4); x=i4(g);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "var g=i4(1,2,3,4); function f() {var x=f4(1.,2.,3.,4.); x=f4(g);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=0; function f() {var x=i4(1,2,3,4); x=g|0;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=0.; function f() {var x=i4(1,2,3,4); x=+g;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var f32=glob.Math.fround; var g=f32(0.); function f() {var x=i4(1,2,3,4); x=f32(g);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=0; function f() {var x=f4(0.,0.,0.,0.); x=g|0;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=0.; function f() {var x=f4(0.,0.,0.,0.); x=+g;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var f32=glob.Math.fround; var g=f32(0.); function f() {var x=f4(0.,0.,0.,0.); x=f32(g);} return f");
CheckI4('var x=i4(1,2,3,4)', '', [1, 2, 3, 4]);
CheckI4('var _=42; var h=i4(5,5,5,5); var __=13.37; var x=i4(4,7,9,2);', '', [4,7,9,2]);
CheckF4('var x=f4(1.,2.,3.,4.)', '', [1, 2, 3, 4]);
CheckF4('var _=42; var h=f4(5.,5.,5.,5.); var __=13.37; var x=f4(4.,13.37,9.,-0.);', '', [4, 13.37, 9, -0]);
CheckF4('var x=f4(1,2,3,4)', '', [1, 2, 3, 4]);
// Write
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4; g=x|0;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4.; g=+x;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); var f32=glob.Math.fround; function f() {var x=f32(4.); g=f32(x);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4; g=x|0;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4.; g=+x;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); var f32=glob.Math.fround; function f() {var x=f32(4.); g=f32(x);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + I32 + "var g=f4(1., 2., 3., 4.); function f() {var x=i4(1,2,3,4); g=i4(x);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + I32 + "var g=f4(1., 2., 3., 4.); function f() {var x=i4(1,2,3,4); g=f4(x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "var g=i4(1,2,3,4); function f() {var x=f4(1.,2.,3.,4.); g=f4(x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "var g=i4(1,2,3,4); function f() {var x=f4(1.,2.,3.,4.); g=i4(x);} return f");
CheckI4('var x=i4(0,0,0,0);', 'x=i4(1,2,3,4)', [1,2,3,4]);
CheckF4('var x=f4(0.,0.,0.,0.);', 'x=f4(5.,3.,4.,2.)', [5,3,4,2]);
CheckI4('var x=i4(0,0,0,0); var y=42; var z=3.9; var w=13.37', 'x=i4(1,2,3,4); y=24; z=4.9; w=23.10;', [1,2,3,4]);
CheckF4('var x=f4(0,0,0,0); var y=42; var z=3.9; var w=13.37', 'x=f4(1,2,3,4); y=24; z=4.9; w=23.10;', [1,2,3,4]);
// 1.3.6.2 Imported globals
// Read
var int32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + I32 + "var g=i4(ffi.g); function f() {return i4(g)} return f"), this, {g: SIMD.int32x4(1,2,3,4)})();
assertEq(int32x4.x, 1);
assertEq(int32x4.y, 2);
assertEq(int32x4.z, 3);
assertEq(int32x4.w, 4);
for (var v of [1, {}, "totally legit SIMD variable", SIMD.float32x4(1,2,3,4)])
assertCaught(asmCompile('glob', 'ffi', USE_ASM + I32 + "var g=i4(ffi.g); function f() {return i4(g)} return f"), this, {g: v});
var float32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + F32 + "var g=f4(ffi.g); function f() {return f4(g)} return f"), this, {g: SIMD.float32x4(1,2,3,4)})();
assertEq(float32x4.x, 1);
assertEq(float32x4.y, 2);
assertEq(float32x4.z, 3);
assertEq(float32x4.w, 4);
for (var v of [1, {}, "totally legit SIMD variable", SIMD.int32x4(1,2,3,4)])
assertCaught(asmCompile('glob', 'ffi', USE_ASM + F32 + "var g=f4(ffi.g); function f() {return f4(g)} return f"), this, {g: v});
// Write
var int32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + I32 + "var g=i4(ffi.g); function f() {g=i4(4,5,6,7); return i4(g)} return f"), this, {g: SIMD.int32x4(1,2,3,4)})();
assertEq(int32x4.x, 4);
assertEq(int32x4.y, 5);
assertEq(int32x4.z, 6);
assertEq(int32x4.w, 7);
var float32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + F32 + "var g=f4(ffi.g); function f() {g=f4(4.,5.,6.,7.); return f4(g)} return f"), this, {g: SIMD.float32x4(1,2,3,4)})();
assertEq(float32x4.x, 4);
assertEq(float32x4.y, 5);
assertEq(float32x4.z, 6);
assertEq(float32x4.w, 7);
// 2. SIMD operations
// 2.1 Compilation
assertAsmTypeFail('glob', USE_ASM + "var add = int32x4.add; return {}");
assertAsmTypeFail('glob', USE_ASM + I32A + I32 + "return {}");
assertAsmTypeFail('glob', USE_ASM + "var g = 3; var add = g.add; return {}");
assertAsmTypeFail('glob', USE_ASM + I32 + "var func = i4.doTheHarlemShake; return {}");
assertAsmTypeFail('glob', USE_ASM + I32 + "var div = i4.div; return {}");
assertAsmTypeFail('glob', USE_ASM + "var f32 = glob.Math.fround; var i4a = f32.add; return {}");
// 2.2 Linking
assertAsmLinkAlwaysFail(asmCompile('glob', USE_ASM + I32 + I32A + "function f() {} return f"), {});
assertAsmLinkAlwaysFail(asmCompile('glob', USE_ASM + I32 + I32A + "function f() {} return f"), {SIMD: Math.fround});
var oldInt32x4Add = SIMD.int32x4.add;
var code = asmCompile('glob', USE_ASM + I32 + I32A + "return {}");
for (var v of [42, Math.fround, SIMD.float32x4.add, function(){}, SIMD.int32x4.mul]) {
SIMD.int32x4.add = v;
assertAsmLinkFail(code, {SIMD: {int32x4: SIMD.int32x4}});
}
SIMD.int32x4.add = oldInt32x4Add; // finally replace the add function with the original one
assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + I32A + "function f() {} return f"), {SIMD: {int32x4: SIMD.int32x4}})(), undefined);
// 2.3. Binary arithmetic operations
// 2.3.1 Additions
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a();} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x, x, x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(13, 37);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(23.10, 19.89);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x, 42);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x, 13.37);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); var y=4; x=i4a(x, y);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(0,0,0,0); var y=4; x=i4a(y, y);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(0,0,0,0); var y=4; y=i4a(x, x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); x=i4a(x, y);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=i4a(x, y);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=i4a(x, x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + F32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=f4a(x, x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + F32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=f4a(x, y);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + F32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); x=f4a(y, y);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + 'function f() {var x=i4(1,2,3,4); var y=0; y=i4(x,x)|0} return f');
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + 'function f() {var x=i4(1,2,3,4); var y=0.; y=+i4(x,x)} return f');
CheckI4(I32A, 'var z=i4(1,2,3,4); var y=i4(0,1,0,3); var x=i4(0,0,0,0); x=i4a(z,y)', [1,3,3,7]);
CheckI4(I32A, 'var x=i4(2,3,4,5); var y=i4(0,1,0,3); x=i4a(x,y)', [2,4,4,8]);
CheckI4(I32A, 'var x=i4(1,2,3,4); x=i4a(x,x)', [2,4,6,8]);
CheckI4(I32A, 'var x=i4(' + INT32_MAX + ',2,3,4); var y=i4(1,1,0,3); x=i4a(x,y)', [INT32_MIN,3,3,7]);
CheckI4(I32A, 'var x=i4(' + INT32_MAX + ',2,3,4); var y=i4(1,1,0,3); x=i4(i4a(x,y))', [INT32_MIN,3,3,7]);
CheckF4(F32A, 'var x=f4(1,2,3,4); x=f4a(x,x)', [2,4,6,8]);
CheckF4(F32A, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4a(x,y)', [5,5,8,6]);
CheckF4(F32A, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4a(x,y)', [Math.fround(13.37) + 4,5,8,6]);
CheckF4(F32A, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4(f4a(x,y))', [Math.fround(13.37) + 4,5,8,6]);
// 2.3.2. Subtracts
CheckI4(I32S, 'var x=i4(1,2,3,4); var y=i4(-1,1,0,2); x=i4s(x,y)', [2,1,3,2]);
CheckI4(I32S, 'var x=i4(5,4,3,2); var y=i4(1,2,3,4); x=i4s(x,y)', [4,2,0,-2]);
CheckI4(I32S, 'var x=i4(1,2,3,4); x=i4s(x,x)', [0,0,0,0]);
CheckI4(I32S, 'var x=i4(' + INT32_MIN + ',2,3,4); var y=i4(1,1,0,3); x=i4s(x,y)', [INT32_MAX,1,3,1]);
CheckI4(I32S, 'var x=i4(' + INT32_MIN + ',2,3,4); var y=i4(1,1,0,3); x=i4(i4s(x,y))', [INT32_MAX,1,3,1]);
CheckF4(F32S, 'var x=f4(1,2,3,4); x=f4s(x,x)', [0,0,0,0]);
CheckF4(F32S, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4s(x,y)', [-3,-1,-2,2]);
CheckF4(F32S, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4s(x,y)', [Math.fround(13.37) - 4,-1,-2,2]);
CheckF4(F32S, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4(f4s(x,y))', [Math.fround(13.37) - 4,-1,-2,2]);
// 2.3.3. Multiplications / Divisions
assertAsmTypeFail('glob', USE_ASM + I32 + "var f4m=i4.mul; function f() {} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var f4d=i4.div; function f() {} return f");
CheckF4(F32M, 'var x=f4(1,2,3,4); x=f4m(x,x)', [1,4,9,16]);
CheckF4(F32M, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4m(x,y)', [4,6,15,8]);
CheckF4(F32M, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4m(x,y)', [Math.fround(13.37) * 4,6,15,8]);
CheckF4(F32M, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4(f4m(x,y))', [Math.fround(13.37) * 4,6,15,8]);
// Test NaN
var f32x4 = SIMD.float32x4(0, NaN, -0, NaN);
var another = SIMD.float32x4(NaN, -1, -0, NaN);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + F32M + "function f(x, y) {x=f4(x); y=f4(y); x=f4m(x,y); return f4(x);} return f"), this)(f32x4, another), [NaN, NaN, 0, NaN]);
CheckF4(F32D, 'var x=f4(1,2,3,4); x=f4d(x,x)', [1,1,1,1]);
CheckF4(F32D, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4d(x,y)', [1/4,2/3,3/5,2]);
CheckF4(F32D, 'var x=f4(13.37,1,1,4); var y=f4(4,0,-0.,2); x=f4d(x,y)', [Math.fround(13.37) / 4,+Infinity,-Infinity,2]);
// Test NaN
var f32x4 = SIMD.float32x4(0, 0, -0, NaN);
var another = SIMD.float32x4(0, -0, 0, 0);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + F32D + "function f(x,y) {x=f4(x); y=f4(y); x=f4d(x,y); return f4(x);} return f"), this)(f32x4, another), [NaN, NaN, NaN, NaN]);
// Dead code
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + 'function f(){var x=i4(1,2,3,4); return i4(x); x=i4(5,6,7,8); return i4(x);} return f'), this)(), [1, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + 'function f(){var x=i4(1,2,3,4); var c=0; return i4(x); c=x.x|0; return i4(x);} return f'), this)(), [1, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + I32A + 'function f(){var x=i4(1,2,3,4); var c=0; return i4(x); x=i4a(x,x); return i4(x);} return f'), this)(), [1, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + I32S + 'function f(){var x=i4(1,2,3,4); var c=0; return i4(x); x=i4s(x,x); return i4(x);} return f'), this)(), [1, 2, 3, 4]);
// 3. Function calls
// 3.1. No math builtins
assertAsmTypeFail('glob', USE_ASM + I32 + "var fround=glob.Math.fround; function f() {var x=i4(1,2,3,4); return +fround(x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var sin=glob.Math.sin; function f() {var x=i4(1,2,3,4); return +sin(x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var ceil=glob.Math.ceil; function f() {var x=i4(1,2,3,4); return +ceil(x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var pow=glob.Math.pow; function f() {var x=i4(1,2,3,4); return +pow(1.0, x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var fround=glob.Math.fround; function f() {var x=i4(1,2,3,4); x=i4(fround(3));} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var sin=glob.Math.sin; function f() {var x=i4(1,2,3,4); x=i4(sin(3.0));} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var ceil=glob.Math.sin; function f() {var x=i4(1,2,3,4); x=i4(ceil(3.0));} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var pow=glob.Math.pow; function f() {var x=i4(1,2,3,4); x=i4(pow(1.0, 2.0));} return f");
// 3.2. FFI calls
// Can't pass SIMD arguments to FFI
assertAsmTypeFail('glob', 'ffi', USE_ASM + I32 + "var func=ffi.func; function f() {var x=i4(1,2,3,4); func(x);} return f");
assertAsmTypeFail('glob', 'ffi', USE_ASM + F32 + "var func=ffi.func; function f() {var x=f4(1,2,3,4); func(x);} return f");
// Can't have FFI return SIMD values
assertAsmTypeFail('glob', 'ffi', USE_ASM + I32 + "var func=ffi.func; function f() {var x=i4(1,2,3,4); x=i4(func());} return f");
assertAsmTypeFail('glob', 'ffi', USE_ASM + F32 + "var func=ffi.func; function f() {var x=f4(1,2,3,4); x=f4(func());} return f");
// 3.3 Internal calls
// asm.js -> asm.js
// Retrieving values from asm.js
var code = USE_ASM + I32 + I32A + `
var check = ffi.check;
function g() {
var i = 0;
var y = i4(0,0,0,0);
var tmp = i4(0,0,0,0); var z = i4(1,1,1,1);
var w = i4(5,5,5,5);
for (; (i|0) < 30; i = i + 1 |0)
y = i4a(z, y);
y = i4a(w, y);
check(y.x | 0, y.y | 0, y.z | 0, y.w | 0);
return i4(y);
}
function f(x) {
x = i4(x);
var y = i4(0,0,0,0);
y = i4(g());
check(y.x | 0, y.y | 0, y.z | 0, y.w | 0);
return i4(x);
}
return f;
`;
var v4 = SIMD.int32x4(1,2,3,4);
function check(x, y, z, w) {
assertEq(x, 35);
assertEq(y, 35);
assertEq(z, 35);
assertEq(w, 35);
}
var ffi = {check};
assertEqX4(asmLink(asmCompile('glob', 'ffi', code), this, ffi)(v4), [1,2,3,4]);
// Passing arguments from asm.js to asm.js
// TODO make this code look better with templatized strings
var code = USE_ASM + I32 + I32A + `
var assertEq = ffi.assertEq;
function internal([args]) {
[coerc]
assertEq([last].x | 0, [i] | 0);
assertEq([last].y | 0, [i] + 1 |0);
assertEq([last].z | 0, [i] + 2 |0);
assertEq([last].w | 0, [i] + 3 |0);
}
function external() {
[decls]
internal([args]);
}
return external;
`;
var ffi = {assertEq};
var args = '';
var decls = '';
var coerc = '';
for (var i = 1; i < 10; ++i) {
var j = i;
args += ((i > 1) ? ', ':'') + 'x' + i;
decls += 'var x' + i + ' = i4(' + j++ + ', ' + j++ + ', ' + j++ + ', ' + j++ + ');\n';
coerc += 'x' + i + ' = i4(x' + i + ');\n';
last = 'x' + i;
var c = code.replace(/\[args\]/g, args)
.replace(/\[last\]/g, last)
.replace(/\[decls\]/i, decls)
.replace(/\[coerc\]/i, coerc)
.replace(/\[i\]/g, i);
asmLink(asmCompile('glob', 'ffi', c), this, ffi)();
}
// Stress-test for register spilling code and stack depth checks
var code = `
"use asm";
var i4 = glob.SIMD.int32x4;
var i4a = i4.add;
var assertEq = ffi.assertEq;
function g() {
var x = i4(1,2,3,4);
var y = i4(2,3,4,5);
var z = i4(0,0,0,0);
z = i4a(x, y);
assertEq(z.x | 0, 3);
assertEq(z.y | 0, 5);
assertEq(z.z | 0, 7);
assertEq(z.w | 0, 9);
}
return g
`
asmLink(asmCompile('glob', 'ffi', code), this, assertEqFFI)();
(function() {
var code = `
"use asm";
var i4 = glob.SIMD.int32x4;
var i4a = i4.add;
var assertEq = ffi.assertEq;
var one = ffi.one;
// Function call with arguments on the stack (1 on x64, 3 on x86)
function h(x1, x2, x3, x4, x5, x6, x7) {
x1=x1|0
x2=x2|0
x3=x3|0
x4=x4|0
x5=x5|0
x6=x6|0
x7=x7|0
return x1 + x2 |0
}
function g() {
var x = i4(1,2,3,4);
var y = i4(2,3,4,5);
var z = i4(0,0,0,0);
var w = 1;
z = i4a(x, y);
w = w + (one() | 0) | 0;
assertEq(z.x | 0, 3);
assertEq(z.y | 0, 5);
assertEq(z.z | 0, 7);
assertEq(z.w | 0, 9);
h(1, 2, 3, 4, 42, 42, 42)|0
return w | 0;
}
return g
`;
asmLink(asmCompile('glob', 'ffi', code), this, {assertEq: assertEq, one: () => 1})();
})();
// Function calls with mixed arguments on the stack (SIMD and scalar). In the
// worst case (x64), we have 6 int arg registers and 8 float registers.
(function() {
var code = `
"use asm";
var i4 = glob.SIMD.int32x4;
function h(
// In registers:
gpr1, gpr2, gpr3, gpr4, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8,
// On the stack:
sint1, ssimd1, sdouble1, ssimd2, sint2, sint3, sint4, ssimd3, sdouble2
)
{
gpr1=gpr1|0;
gpr2=gpr2|0;
gpr3=gpr3|0;
gpr4=gpr4|0;
xmm1=+xmm1;
xmm2=+xmm2;
xmm3=+xmm3;
xmm4=+xmm4;
xmm5=+xmm5;
xmm6=+xmm6;
xmm7=+xmm7;
xmm8=+xmm8;
sint1=sint1|0;
ssimd1=i4(ssimd1);
sdouble1=+sdouble1;
ssimd2=i4(ssimd2);
sint2=sint2|0;
sint3=sint3|0;
sint4=sint4|0;
ssimd3=i4(ssimd3);
sdouble2=+sdouble2;
return (ssimd1.x|0) + (ssimd2.y|0) + (ssimd3.z|0) + sint2 + gpr3 | 0;
}
function g() {
var simd1 = i4(1,2,3,4);
var simd2 = i4(5,6,7,8);
var simd3 = i4(9,10,11,12);
return h(1, 2, 3, 4,
1., 2., 3., 4., 5., 6., 7., 8.,
5, simd1, 9., simd2, 6, 7, 8, simd3, 10.) | 0;
}
return g
`;
assertEq(asmLink(asmCompile('glob', 'ffi', code), this)(), 1 + 6 + 11 + 6 + 3);
})();
// Check that the interrupt callback doesn't erase high components of simd
// registers:
// WARNING: must be the last test in this file
(function() {
var iters = 2000000;
var code = `
"use asm";
var i4 = glob.SIMD.int32x4;
var i4a = i4.add;
function _() {
var i = 0;
var n = i4(0,0,0,0);
var one = i4(1,1,1,1);
for (; (i>>>0) < ` + iters + `; i=(i+1)>>>0) {
n = i4a(n, one);
}
return i4(n);
}
return _;`;
// This test relies on the fact that setting the timeout will call the
// interrupt callback at fixed intervals, even before the timeout.
timeout(1000);
var x4 = asmLink(asmCompile('glob', code), this)();
assertEq(x4.x, iters);
assertEq(x4.y, iters);
assertEq(x4.z, iters);
assertEq(x4.w, iters);
})();
} catch(e) {
print('Stack:', e.stack)
print('Error:', e)
throw e;
}

View File

@ -8769,15 +8769,12 @@ CodeGenerator::visitAsmJSCall(LAsmJSCall *ins)
if (mir->spIncrement()) if (mir->spIncrement())
masm.freeStack(mir->spIncrement()); masm.freeStack(mir->spIncrement());
JS_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % AsmJSStackAlignment == 0); JS_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % StackAlignment == 0);
#ifdef DEBUG #ifdef DEBUG
static_assert(AsmJSStackAlignment >= ABIStackAlignment,
"The asm.js stack alignment should subsume the ABI-required alignment");
static_assert(AsmJSStackAlignment % ABIStackAlignment == 0,
"The asm.js stack alignment should subsume the ABI-required alignment");
Label ok; Label ok;
masm.branchTestPtr(Assembler::Zero, StackPointer, Imm32(AsmJSStackAlignment - 1), &ok); JS_ASSERT(IsPowerOfTwo(StackAlignment));
masm.branchTestPtr(Assembler::Zero, StackPointer, Imm32(StackAlignment - 1), &ok);
masm.breakpoint(); masm.breakpoint();
masm.bind(&ok); masm.bind(&ok);
#endif #endif
@ -9016,7 +9013,7 @@ CodeGenerator::visitAsmJSInterruptCheck(LAsmJSInterruptCheck *lir)
masm.branch32(Assembler::Equal, scratch, Imm32(0), &rejoin); masm.branch32(Assembler::Equal, scratch, Imm32(0), &rejoin);
{ {
uint32_t stackFixup = ComputeByteAlignment(masm.framePushed() + sizeof(AsmJSFrame), uint32_t stackFixup = ComputeByteAlignment(masm.framePushed() + sizeof(AsmJSFrame),
ABIStackAlignment); StackAlignment);
masm.reserveStack(stackFixup); masm.reserveStack(stackFixup);
masm.call(lir->funcDesc(), lir->interruptExit()); masm.call(lir->funcDesc(), lir->interruptExit());
masm.freeStack(stackFixup); masm.freeStack(stackFixup);

View File

@ -3214,9 +3214,3 @@ jit::JitSupportsFloatingPoint()
{ {
return js::jit::MacroAssembler::SupportsFloatingPoint(); return js::jit::MacroAssembler::SupportsFloatingPoint();
} }
bool
jit::JitSupportsSimd()
{
return js::jit::MacroAssembler::SupportsSimd();
}

View File

@ -207,7 +207,6 @@ bool UpdateForDebugMode(JSContext *maybecx, JSCompartment *comp,
AutoDebugModeInvalidation &invalidate); AutoDebugModeInvalidation &invalidate);
bool JitSupportsFloatingPoint(); bool JitSupportsFloatingPoint();
bool JitSupportsSimd();
} // namespace jit } // namespace jit
} // namespace js } // namespace js

View File

@ -1075,7 +1075,7 @@ uint8_t *
alignDoubleSpillWithOffset(uint8_t *pointer, int32_t offset) alignDoubleSpillWithOffset(uint8_t *pointer, int32_t offset)
{ {
uint32_t address = reinterpret_cast<uint32_t>(pointer); uint32_t address = reinterpret_cast<uint32_t>(pointer);
address = (address - offset) & ~(ABIStackAlignment - 1); address = (address - offset) & ~(StackAlignment - 1);
return reinterpret_cast<uint8_t *>(address); return reinterpret_cast<uint8_t *>(address);
} }

View File

@ -1426,11 +1426,11 @@ class MacroAssembler : public MacroAssemblerSpecific
PopRegsInMask(liveRegs); PopRegsInMask(liveRegs);
} }
void assertStackAlignment(uint32_t alignment) { void assertStackAlignment() {
#ifdef DEBUG #ifdef DEBUG
Label ok; Label ok;
JS_ASSERT(IsPowerOfTwo(alignment)); JS_ASSERT(IsPowerOfTwo(StackAlignment));
branchTestPtr(Assembler::Zero, StackPointer, Imm32(alignment - 1), &ok); branchTestPtr(Assembler::Zero, StackPointer, Imm32(StackAlignment - 1), &ok);
breakpoint(); breakpoint();
bind(&ok); bind(&ok);
#endif #endif
@ -1508,10 +1508,10 @@ JSOpToCondition(JSOp op, bool isSigned)
} }
static inline size_t static inline size_t
StackDecrementForCall(uint32_t alignment, size_t bytesAlreadyPushed, size_t bytesToPush) StackDecrementForCall(size_t bytesAlreadyPushed, size_t bytesToPush)
{ {
return bytesToPush + return bytesToPush +
ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, alignment); ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, StackAlignment);
} }
} // namespace jit } // namespace jit

View File

@ -1586,10 +1586,10 @@ class LIRGraph
// platform stack alignment requirement, and so that it's a multiple of // platform stack alignment requirement, and so that it's a multiple of
// the number of slots per Value. // the number of slots per Value.
uint32_t paddedLocalSlotCount() const { uint32_t paddedLocalSlotCount() const {
// Round to ABIStackAlignment, but also round to at least sizeof(Value) // Round to StackAlignment, but also round to at least sizeof(Value) in
// in case that's greater, because StackOffsetOfPassedArg rounds // case that's greater, because StackOffsetOfPassedArg rounds argument
// argument slots to 8-byte boundaries. // slots to 8-byte boundaries.
size_t Alignment = Max(size_t(ABIStackAlignment), sizeof(Value)); size_t Alignment = Max(size_t(StackAlignment), sizeof(Value));
return AlignBytes(localSlotCount(), Alignment); return AlignBytes(localSlotCount(), Alignment);
} }
size_t paddedLocalSlotsSize() const { size_t paddedLocalSlotsSize() const {

View File

@ -3553,7 +3553,7 @@ LIRGenerator::visitAsmJSParameter(MAsmJSParameter *ins)
if (abi.argInRegister()) if (abi.argInRegister())
return defineFixed(new(alloc()) LAsmJSParameter, ins, LAllocation(abi.reg())); return defineFixed(new(alloc()) LAsmJSParameter, ins, LAllocation(abi.reg()));
JS_ASSERT(IsNumberType(ins->type()) || IsSimdType(ins->type())); JS_ASSERT(IsNumberType(ins->type()));
return defineFixed(new(alloc()) LAsmJSParameter, ins, LArgument(abi.offsetFromArgBase())); return defineFixed(new(alloc()) LAsmJSParameter, ins, LArgument(abi.offsetFromArgBase()));
} }
@ -3566,8 +3566,6 @@ LIRGenerator::visitAsmJSReturn(MAsmJSReturn *ins)
lir->setOperand(0, useFixed(rval, ReturnFloat32Reg)); lir->setOperand(0, useFixed(rval, ReturnFloat32Reg));
else if (rval->type() == MIRType_Double) else if (rval->type() == MIRType_Double)
lir->setOperand(0, useFixed(rval, ReturnDoubleReg)); lir->setOperand(0, useFixed(rval, ReturnDoubleReg));
else if (IsSimdType(rval->type()))
lir->setOperand(0, useFixed(rval, ReturnSimdReg));
else if (rval->type() == MIRType_Int32) else if (rval->type() == MIRType_Int32)
lir->setOperand(0, useFixed(rval, ReturnReg)); lir->setOperand(0, useFixed(rval, ReturnReg));
else else
@ -3584,7 +3582,7 @@ LIRGenerator::visitAsmJSVoidReturn(MAsmJSVoidReturn *ins)
bool bool
LIRGenerator::visitAsmJSPassStackArg(MAsmJSPassStackArg *ins) LIRGenerator::visitAsmJSPassStackArg(MAsmJSPassStackArg *ins)
{ {
if (IsFloatingPointType(ins->arg()->type()) || IsSimdType(ins->arg()->type())) { if (IsFloatingPointType(ins->arg()->type())) {
JS_ASSERT(!ins->arg()->isEmittedAtUses()); JS_ASSERT(!ins->arg()->isEmittedAtUses());
return add(new(alloc()) LAsmJSPassStackArg(useRegisterAtStart(ins->arg())), ins); return add(new(alloc()) LAsmJSPassStackArg(useRegisterAtStart(ins->arg())), ins);
} }

View File

@ -493,7 +493,6 @@ MConstant::New(TempAllocator &alloc, const Value &v, types::CompilerConstraintLi
MConstant * MConstant *
MConstant::NewAsmJS(TempAllocator &alloc, const Value &v, MIRType type) MConstant::NewAsmJS(TempAllocator &alloc, const Value &v, MIRType type)
{ {
JS_ASSERT(!IsSimdType(type));
MConstant *constant = new(alloc) MConstant(v, nullptr); MConstant *constant = new(alloc) MConstant(v, nullptr);
constant->setResultType(type); constant->setResultType(type);
return constant; return constant;

View File

@ -11187,7 +11187,7 @@ class MAsmJSLoadGlobalVar : public MNullaryInstruction
MAsmJSLoadGlobalVar(MIRType type, unsigned globalDataOffset, bool isConstant) MAsmJSLoadGlobalVar(MIRType type, unsigned globalDataOffset, bool isConstant)
: globalDataOffset_(globalDataOffset), isConstant_(isConstant) : globalDataOffset_(globalDataOffset), isConstant_(isConstant)
{ {
JS_ASSERT(IsNumberType(type) || IsSimdType(type)); JS_ASSERT(IsNumberType(type));
setResultType(type); setResultType(type);
setMovable(); setMovable();
} }

View File

@ -145,8 +145,9 @@ static MOZ_CONSTEXPR_VAR FloatRegister d15 = {FloatRegisters::d15, VFPRegister::
// load/store) operate in a single cycle when the address they are dealing with // load/store) operate in a single cycle when the address they are dealing with
// is 8 byte aligned. Also, the ARM abi wants the stack to be 8 byte aligned at // is 8 byte aligned. Also, the ARM abi wants the stack to be 8 byte aligned at
// function boundaries. I'm trying to make sure this is always true. // function boundaries. I'm trying to make sure this is always true.
static const uint32_t ABIStackAlignment = 8; static const uint32_t StackAlignment = 8;
static const uint32_t CodeAlignment = 8; static const uint32_t CodeAlignment = 8;
static const bool StackKeptAligned = true;
// This boolean indicates whether we support SIMD instructions flavoured for // This boolean indicates whether we support SIMD instructions flavoured for
// this architecture or not. Rather than a method in the LIRGenerator, it is // this architecture or not. Rather than a method in the LIRGenerator, it is
@ -155,8 +156,6 @@ static const uint32_t CodeAlignment = 8;
static const bool SupportsSimd = false; static const bool SupportsSimd = false;
static const uint32_t SimdStackAlignment = 8; static const uint32_t SimdStackAlignment = 8;
static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
static const Scale ScalePointer = TimesFour; static const Scale ScalePointer = TimesFour;
class Instruction; class Instruction;
@ -1553,9 +1552,6 @@ class Assembler : public AssemblerShared
static bool SupportsFloatingPoint() { static bool SupportsFloatingPoint() {
return HasVFP(); return HasVFP();
} }
static bool SupportsSimd() {
return js::jit::SupportsSimd;
}
protected: protected:
void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) { void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) {

View File

@ -1880,7 +1880,7 @@ MacroAssemblerARMCompat::freeStack(Register amount)
void void
MacroAssembler::PushRegsInMask(RegisterSet set, FloatRegisterSet simdSet) MacroAssembler::PushRegsInMask(RegisterSet set, FloatRegisterSet simdSet)
{ {
JS_ASSERT(!SupportsSimd() && simdSet.size() == 0); JS_ASSERT(!SupportsSimd && simdSet.size() == 0);
int32_t diffF = set.fpus().getPushSizeInBytes(); int32_t diffF = set.fpus().getPushSizeInBytes();
int32_t diffG = set.gprs().size() * sizeof(intptr_t); int32_t diffG = set.gprs().size() * sizeof(intptr_t);
@ -1909,7 +1909,7 @@ MacroAssembler::PushRegsInMask(RegisterSet set, FloatRegisterSet simdSet)
void void
MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore, FloatRegisterSet simdSet) MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore, FloatRegisterSet simdSet)
{ {
JS_ASSERT(!SupportsSimd() && simdSet.size() == 0); JS_ASSERT(!SupportsSimd && simdSet.size() == 0);
int32_t diffG = set.gprs().size() * sizeof(intptr_t); int32_t diffG = set.gprs().size() * sizeof(intptr_t);
int32_t diffF = set.fpus().getPushSizeInBytes(); int32_t diffF = set.fpus().getPushSizeInBytes();
const int32_t reservedG = diffG; const int32_t reservedG = diffG;
@ -3778,7 +3778,7 @@ MacroAssemblerARMCompat::setupUnalignedABICall(uint32_t args, Register scratch)
ma_mov(sp, scratch); ma_mov(sp, scratch);
// Force sp to be aligned. // Force sp to be aligned.
ma_and(Imm32(~(ABIStackAlignment - 1)), sp, sp); ma_and(Imm32(~(StackAlignment - 1)), sp, sp);
ma_push(scratch); ma_push(scratch);
} }
@ -3937,7 +3937,7 @@ MacroAssemblerARMCompat::passABIArg(FloatRegister freg, MoveOp::Type type)
void MacroAssemblerARMCompat::checkStackAlignment() void MacroAssemblerARMCompat::checkStackAlignment()
{ {
#ifdef DEBUG #ifdef DEBUG
ma_tst(sp, Imm32(ABIStackAlignment - 1)); ma_tst(sp, Imm32(StackAlignment - 1));
breakpoint(NonZero); breakpoint(NonZero);
#endif #endif
} }
@ -3956,11 +3956,11 @@ MacroAssemblerARMCompat::callWithABIPre(uint32_t *stackAdjust, bool callFromAsmJ
if (!dynamicAlignment_) { if (!dynamicAlignment_) {
*stackAdjust += ComputeByteAlignment(framePushed_ + *stackAdjust + alignmentAtPrologue, *stackAdjust += ComputeByteAlignment(framePushed_ + *stackAdjust + alignmentAtPrologue,
ABIStackAlignment); StackAlignment);
} else { } else {
// sizeof(intptr_t) accounts for the saved stack pointer pushed by // sizeof(intptr_t) accounts for the saved stack pointer pushed by
// setupUnalignedABICall. // setupUnalignedABICall.
*stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), ABIStackAlignment); *stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), StackAlignment);
} }
reserveStack(*stackAdjust); reserveStack(*stackAdjust);

View File

@ -2117,7 +2117,7 @@ Simulator::softwareInterrupt(SimInstruction *instr)
int32_t saved_lr = get_register(lr); int32_t saved_lr = get_register(lr);
intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction()); intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction());
bool stack_aligned = (get_register(sp) & (ABIStackAlignment - 1)) == 0; bool stack_aligned = (get_register(sp) & (StackAlignment - 1)) == 0;
if (!stack_aligned) { if (!stack_aligned) {
fprintf(stderr, "Runtime call with unaligned stack!\n"); fprintf(stderr, "Runtime call with unaligned stack!\n");
MOZ_CRASH(); MOZ_CRASH();
@ -4258,7 +4258,7 @@ Simulator::call(uint8_t* entry, int argument_count, ...)
if (argument_count >= 4) if (argument_count >= 4)
entry_stack -= (argument_count - 4) * sizeof(int32_t); entry_stack -= (argument_count - 4) * sizeof(int32_t);
entry_stack &= ~ABIStackAlignment; entry_stack &= ~StackAlignment;
// Store remaining arguments on stack, from low to high memory. // Store remaining arguments on stack, from low to high memory.
intptr_t *stack_argument = reinterpret_cast<intptr_t*>(entry_stack); intptr_t *stack_argument = reinterpret_cast<intptr_t*>(entry_stack);

View File

@ -158,8 +158,9 @@ static MOZ_CONSTEXPR_VAR FloatRegister f30 = { FloatRegisters::f30, FloatRegiste
// MIPS CPUs can only load multibyte data that is "naturally" // MIPS CPUs can only load multibyte data that is "naturally"
// four-byte-aligned, sp register should be eight-byte-aligned. // four-byte-aligned, sp register should be eight-byte-aligned.
static const uint32_t ABIStackAlignment = 8; static const uint32_t StackAlignment = 8;
static const uint32_t CodeAlignment = 4; static const uint32_t CodeAlignment = 4;
static const bool StackKeptAligned = true;
// This boolean indicates whether we support SIMD instructions flavoured for // This boolean indicates whether we support SIMD instructions flavoured for
// this architecture or not. Rather than a method in the LIRGenerator, it is // this architecture or not. Rather than a method in the LIRGenerator, it is
@ -170,8 +171,6 @@ static const bool SupportsSimd = false;
// alignment requirements still need to be explored. // alignment requirements still need to be explored.
static const uint32_t SimdStackAlignment = 8; static const uint32_t SimdStackAlignment = 8;
static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
static const Scale ScalePointer = TimesFour; static const Scale ScalePointer = TimesFour;
// MIPS instruction types // MIPS instruction types
@ -239,6 +238,7 @@ static const uint32_t RDMask = ((1 << RDBits) - 1) << RDShift;
static const uint32_t SAMask = ((1 << SABits) - 1) << SAShift; static const uint32_t SAMask = ((1 << SABits) - 1) << SAShift;
static const uint32_t FunctionMask = ((1 << FunctionBits) - 1) << FunctionShift; static const uint32_t FunctionMask = ((1 << FunctionBits) - 1) << FunctionShift;
static const uint32_t RegMask = Registers::Total - 1; static const uint32_t RegMask = Registers::Total - 1;
static const uint32_t StackAlignmentMask = StackAlignment - 1;
static const uint32_t MAX_BREAK_CODE = 1024 - 1; static const uint32_t MAX_BREAK_CODE = 1024 - 1;

View File

@ -1574,7 +1574,7 @@ MacroAssembler::PushRegsInMask(RegisterSet set, FloatRegisterSet simdSet)
// Double values have to be aligned. We reserve extra space so that we can // Double values have to be aligned. We reserve extra space so that we can
// start writing from the first aligned location. // start writing from the first aligned location.
// We reserve a whole extra double so that the buffer has even size. // We reserve a whole extra double so that the buffer has even size.
ma_and(SecondScratchReg, sp, Imm32(~(ABIStackAlignment - 1))); ma_and(SecondScratchReg, sp, Imm32(~(StackAlignment - 1)));
reserveStack(diffF + sizeof(double)); reserveStack(diffF + sizeof(double));
for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) { for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
@ -1596,7 +1596,7 @@ MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore, FloatRe
// Read the buffer form the first aligned location. // Read the buffer form the first aligned location.
ma_addu(SecondScratchReg, sp, Imm32(reservedF + sizeof(double))); ma_addu(SecondScratchReg, sp, Imm32(reservedF + sizeof(double)));
ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(ABIStackAlignment - 1))); ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(StackAlignment - 1)));
for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) { for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
if (!ignore.has(*iter) && ((*iter).code() % 2 == 0)) if (!ignore.has(*iter) && ((*iter).code() % 2 == 0))
@ -3158,7 +3158,7 @@ MacroAssemblerMIPSCompat::setupUnalignedABICall(uint32_t args, Register scratch)
// Force sp to be aligned // Force sp to be aligned
ma_subu(StackPointer, StackPointer, Imm32(sizeof(uint32_t))); ma_subu(StackPointer, StackPointer, Imm32(sizeof(uint32_t)));
ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1))); ma_and(StackPointer, StackPointer, Imm32(~(StackAlignment - 1)));
as_sw(scratch, StackPointer, 0); as_sw(scratch, StackPointer, 0);
} }
@ -3259,7 +3259,7 @@ MacroAssemblerMIPSCompat::checkStackAlignment()
{ {
#ifdef DEBUG #ifdef DEBUG
Label aligned; Label aligned;
as_andi(ScratchRegister, sp, ABIStackAlignment - 1); as_andi(ScratchRegister, sp, StackAlignment - 1);
ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump); ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
as_break(MAX_BREAK_CODE); as_break(MAX_BREAK_CODE);
bind(&aligned); bind(&aligned);
@ -3271,7 +3271,7 @@ MacroAssemblerMIPSCompat::alignStackPointer()
{ {
movePtr(StackPointer, SecondScratchReg); movePtr(StackPointer, SecondScratchReg);
subPtr(Imm32(sizeof(uintptr_t)), StackPointer); subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
andPtr(Imm32(~(ABIStackAlignment - 1)), StackPointer); andPtr(Imm32(~(StackAlignment - 1)), StackPointer);
storePtr(SecondScratchReg, Address(StackPointer, 0)); storePtr(SecondScratchReg, Address(StackPointer, 0));
} }
@ -3284,13 +3284,13 @@ MacroAssemblerMIPSCompat::restoreStackPointer()
void void
MacroAssembler::alignFrameForICArguments(AfterICSaveLive &aic) MacroAssembler::alignFrameForICArguments(AfterICSaveLive &aic)
{ {
if (framePushed() % ABIStackAlignment != 0) { if (framePushed() % StackAlignment != 0) {
aic.alignmentPadding = ABIStackAlignment - (framePushed() % StackAlignment); aic.alignmentPadding = StackAlignment - (framePushed() % StackAlignment);
reserveStack(aic.alignmentPadding); reserveStack(aic.alignmentPadding);
} else { } else {
aic.alignmentPadding = 0; aic.alignmentPadding = 0;
} }
MOZ_ASSERT(framePushed() % ABIStackAlignment == 0); MOZ_ASSERT(framePushed() % StackAlignment == 0);
checkStackAlignment(); checkStackAlignment();
} }
@ -3316,10 +3316,10 @@ MacroAssemblerMIPSCompat::callWithABIPre(uint32_t *stackAdjust, bool callFromAsm
uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0; uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
if (dynamicAlignment_) { if (dynamicAlignment_) {
*stackAdjust += ComputeByteAlignment(*stackAdjust, ABIStackAlignment); *stackAdjust += ComputeByteAlignment(*stackAdjust, StackAlignment);
} else { } else {
*stackAdjust += ComputeByteAlignment(framePushed_ + alignmentAtPrologue + *stackAdjust, *stackAdjust += ComputeByteAlignment(framePushed_ + alignmentAtPrologue + *stackAdjust,
ABIStackAlignment); StackAlignment);
} }
reserveStack(*stackAdjust); reserveStack(*stackAdjust);
@ -3444,7 +3444,7 @@ void
MacroAssemblerMIPSCompat::handleFailureWithHandler(void *handler) MacroAssemblerMIPSCompat::handleFailureWithHandler(void *handler)
{ {
// Reserve space for exception information. // Reserve space for exception information.
int size = (sizeof(ResumeFromException) + ABIStackAlignment) & ~(ABIStackAlignment - 1); int size = (sizeof(ResumeFromException) + StackAlignment) & ~(StackAlignment - 1);
ma_subu(StackPointer, StackPointer, Imm32(size)); ma_subu(StackPointer, StackPointer, Imm32(size));
ma_move(a0, StackPointer); // Use a0 since it is a first function argument ma_move(a0, StackPointer); // Use a0 since it is a first function argument

View File

@ -1871,7 +1871,7 @@ Simulator::softwareInterrupt(SimInstruction *instr)
intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction()); intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction());
bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0; bool stack_aligned = (getRegister(sp) & (StackAlignment - 1)) == 0;
if (!stack_aligned) { if (!stack_aligned) {
fprintf(stderr, "Runtime call with unaligned stack!\n"); fprintf(stderr, "Runtime call with unaligned stack!\n");
MOZ_CRASH(); MOZ_CRASH();
@ -3405,7 +3405,7 @@ Simulator::call(uint8_t *entry, int argument_count, ...)
else else
entry_stack = entry_stack - kCArgsSlotsSize; entry_stack = entry_stack - kCArgsSlotsSize;
entry_stack &= ~ABIStackAlignment; entry_stack &= ~StackAlignment;
intptr_t *stack_argument = reinterpret_cast<intptr_t*>(entry_stack); intptr_t *stack_argument = reinterpret_cast<intptr_t*>(entry_stack);

View File

@ -16,7 +16,6 @@ namespace jit {
static const bool SupportsSimd = false; static const bool SupportsSimd = false;
static const uint32_t SimdStackAlignment = 0; static const uint32_t SimdStackAlignment = 0;
static const uint32_t AsmJSStackAlignment = 0;
class Registers class Registers
{ {

View File

@ -68,8 +68,9 @@ static MOZ_CONSTEXPR_VAR ValueOperand JSReturnOperand(InvalidReg);
#error "Bad architecture" #error "Bad architecture"
#endif #endif
static const uint32_t ABIStackAlignment = 4; static const uint32_t StackAlignment = 8;
static const uint32_t CodeAlignment = 4; static const uint32_t CodeAlignment = 4;
static const bool StackKeptAligned = false;
static const Scale ScalePointer = TimesOne; static const Scale ScalePointer = TimesOne;

View File

@ -640,9 +640,9 @@ class CallSite : public CallSiteDesc
typedef Vector<CallSite, 0, SystemAllocPolicy> CallSiteVector; typedef Vector<CallSite, 0, SystemAllocPolicy> CallSiteVector;
// As an invariant across architectures, within asm.js code: // As an invariant across architectures, within asm.js code:
// $sp % AsmJSStackAlignment = (sizeof(AsmJSFrame) + masm.framePushed) % AsmJSStackAlignment // $sp % StackAlignment = (sizeof(AsmJSFrame) + masm.framePushed) % StackAlignment
// Thus, AsmJSFrame represents the bytes pushed after the call (which occurred // Thus, AsmJSFrame represents the bytes pushed after the call (which occurred
// with a AsmJSStackAlignment-aligned StackPointer) that are not included in // with a StackAlignment-aligned StackPointer) that are not included in
// masm.framePushed. // masm.framePushed.
struct AsmJSFrame struct AsmJSFrame
{ {

View File

@ -926,7 +926,6 @@ class AssemblerX86Shared : public AssemblerShared
static bool HasSSE3() { return CPUInfo::IsSSE3Present(); } static bool HasSSE3() { return CPUInfo::IsSSE3Present(); }
static bool HasSSE41() { return CPUInfo::IsSSE41Present(); } static bool HasSSE41() { return CPUInfo::IsSSE41Present(); }
static bool SupportsFloatingPoint() { return CPUInfo::IsSSE2Present(); } static bool SupportsFloatingPoint() { return CPUInfo::IsSSE2Present(); }
static bool SupportsSimd() { return CPUInfo::IsSSE2Present(); }
// The below cmpl methods switch the lhs and rhs when it invokes the // The below cmpl methods switch the lhs and rhs when it invokes the
// macroassembler to conform with intel standard. When calling this // macroassembler to conform with intel standard. When calling this

View File

@ -2978,21 +2978,6 @@ public:
m_formatter.prefix(PRE_SSE_F3); m_formatter.prefix(PRE_SSE_F3);
m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address); m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
} }
void movdqa_rm(XMMRegisterID src, const void* address)
{
spew("movdqa %s, %p",
nameFPReg(src), address);
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_MOVDQ_WdqVdq, (RegisterID)src, address);
}
void movaps_rm(XMMRegisterID src, const void* address)
{
spew("movaps %s, %p",
nameFPReg(src), address);
m_formatter.twoByteOp(OP2_MOVPS_WpsVps, (RegisterID)src, address);
}
#else #else
JmpSrc movsd_ripr(XMMRegisterID dst) JmpSrc movsd_ripr(XMMRegisterID dst)
{ {
@ -3018,29 +3003,6 @@ public:
m_formatter.twoByteRipOp(OP2_MOVSD_WsdVsd, (RegisterID)src, 0); m_formatter.twoByteRipOp(OP2_MOVSD_WsdVsd, (RegisterID)src, 0);
return JmpSrc(m_formatter.size()); return JmpSrc(m_formatter.size());
} }
JmpSrc movss_rrip(XMMRegisterID src)
{
spew("movss %s, ?(%%rip)",
nameFPReg(src));
m_formatter.prefix(PRE_SSE_F3);
m_formatter.twoByteRipOp(OP2_MOVSD_WsdVsd, (RegisterID)src, 0);
return JmpSrc(m_formatter.size());
}
JmpSrc movdqa_rrip(XMMRegisterID src)
{
spew("movdqa %s, ?(%%rip)",
nameFPReg(src));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteRipOp(OP2_MOVDQ_WdqVdq, (RegisterID)src, 0);
return JmpSrc(m_formatter.size());
}
JmpSrc movaps_rrip(XMMRegisterID src)
{
spew("movaps %s, ?(%%rip)",
nameFPReg(src));
m_formatter.twoByteRipOp(OP2_MOVPS_WpsVps, (RegisterID)src, 0);
return JmpSrc(m_formatter.size());
}
#endif #endif
void movaps_rr(XMMRegisterID src, XMMRegisterID dst) void movaps_rr(XMMRegisterID src, XMMRegisterID dst)

View File

@ -69,25 +69,25 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, Mac
if (!gen->compilingAsmJS()) if (!gen->compilingAsmJS())
masm.setInstrumentation(&sps_); masm.setInstrumentation(&sps_);
if (gen->compilingAsmJS()) {
// Since asm.js uses the system ABI which does not necessarily use a // Since asm.js uses the system ABI which does not necessarily use a
// regular array where all slots are sizeof(Value), it maintains the max // regular array where all slots are sizeof(Value), it maintains the max
// argument stack depth separately. // argument stack depth separately.
if (gen->compilingAsmJS()) {
JS_ASSERT(graph->argumentSlotCount() == 0); JS_ASSERT(graph->argumentSlotCount() == 0);
frameDepth_ += gen->maxAsmJSStackArgBytes(); frameDepth_ += gen->maxAsmJSStackArgBytes();
// If the function uses any SIMD, we may need to insert padding so that
// local slots are aligned for SIMD.
if (gen->usesSimd()) {
frameInitialAdjustment_ = ComputeByteAlignment(sizeof(AsmJSFrame), AsmJSStackAlignment);
frameDepth_ += frameInitialAdjustment_;
}
// An MAsmJSCall does not align the stack pointer at calls sites but instead // An MAsmJSCall does not align the stack pointer at calls sites but instead
// relies on the a priori stack adjustment. This must be the last // relies on the a priori stack adjustment (in the prologue) on platforms
// adjustment of frameDepth_. // (like x64) which require the stack to be aligned.
if (gen->performsCall()) if (StackKeptAligned || gen->performsCall() || gen->usesSimd()) {
frameDepth_ += ComputeByteAlignment(sizeof(AsmJSFrame) + frameDepth_, AsmJSStackAlignment); unsigned alignmentAtCall = sizeof(AsmJSFrame) + frameDepth_;
unsigned firstFixup = 0;
if (unsigned rem = alignmentAtCall % StackAlignment)
frameDepth_ += (firstFixup = StackAlignment - rem);
if (gen->usesSimd())
setupSimdAlignment(firstFixup);
}
// FrameSizeClass is only used for bailing, which cannot happen in // FrameSizeClass is only used for bailing, which cannot happen in
// asm.js code. // asm.js code.
@ -97,6 +97,38 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, Mac
} }
} }
void
CodeGeneratorShared::setupSimdAlignment(unsigned fixup)
{
JS_STATIC_ASSERT(SimdStackAlignment % StackAlignment == 0);
// At this point, we have:
// (frameDepth_ + sizeof(AsmJSFrame)) % StackAlignment == 0
// which means we can add as many SimdStackAlignment as needed.
// The next constraint is to have all stack slots
// aligned for SIMD. That's done by having the first stack slot
// aligned. We need an offset such that:
// (frameDepth_ - offset) % SimdStackAlignment == 0
frameInitialAdjustment_ = frameDepth_ % SimdStackAlignment;
// We need to ensure that the first stack slot is actually
// located in this frame and not beforehand, when taking this
// offset into account, i.e.:
// frameDepth_ - initial adjustment >= frameDepth_ - fixup
// <=> fixup >= initial adjustment
//
// For instance, on x86 with gcc, if the initial frameDepth
// % 16 is 8, then the fixup is 0, although the initial
// adjustment is 8. The first stack slot would be located at
// frameDepth - 8 in this case, which is obviously before
// frameDepth.
//
// If that's not the case, we add SimdStackAlignment to the
// fixup, which will keep on satisfying other constraints.
if (frameInitialAdjustment_ > int32_t(fixup))
frameDepth_ += SimdStackAlignment;
}
bool bool
CodeGeneratorShared::generateOutOfLineCode() CodeGeneratorShared::generateOutOfLineCode()
{ {

View File

@ -496,6 +496,8 @@ class CodeGeneratorShared : public LInstructionVisitor
private: private:
void generateInvalidateEpilogue(); void generateInvalidateEpilogue();
void setupSimdAlignment(unsigned fixup);
public: public:
CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm); CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm);

View File

@ -319,26 +319,10 @@ CodeGeneratorX86Shared::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins)
if (ins->arg()->isConstant()) { if (ins->arg()->isConstant()) {
masm.storePtr(ImmWord(ToInt32(ins->arg())), dst); masm.storePtr(ImmWord(ToInt32(ins->arg())), dst);
} else { } else {
if (ins->arg()->isGeneralReg()) { if (ins->arg()->isGeneralReg())
masm.storePtr(ToRegister(ins->arg()), dst); masm.storePtr(ToRegister(ins->arg()), dst);
} else { else
switch (mir->input()->type()) {
case MIRType_Double:
case MIRType_Float32:
masm.storeDouble(ToFloatRegister(ins->arg()), dst); masm.storeDouble(ToFloatRegister(ins->arg()), dst);
return true;
// StackPointer is SimdStackAlignment-aligned and ABIArgGenerator guarantees stack
// offsets are SimdStackAlignment-aligned.
case MIRType_Int32x4:
masm.storeAlignedInt32x4(ToFloatRegister(ins->arg()), dst);
return true;
case MIRType_Float32x4:
masm.storeAlignedFloat32x4(ToFloatRegister(ins->arg()), dst);
return true;
default: break;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected mir type in AsmJSPassStackArg");
}
} }
return true; return true;
} }

View File

@ -154,12 +154,6 @@ LIRGeneratorShared::defineReturn(LInstruction *lir, MDefinition *mir)
case MIRType_Double: case MIRType_Double:
lir->setDef(0, LDefinition(vreg, LDefinition::DOUBLE, LFloatReg(ReturnDoubleReg))); lir->setDef(0, LDefinition(vreg, LDefinition::DOUBLE, LFloatReg(ReturnDoubleReg)));
break; break;
case MIRType_Int32x4:
lir->setDef(0, LDefinition(vreg, LDefinition::INT32X4, LFloatReg(ReturnSimdReg)));
break;
case MIRType_Float32x4:
lir->setDef(0, LDefinition(vreg, LDefinition::FLOAT32X4, LFloatReg(ReturnSimdReg)));
break;
default: default:
LDefinition::Type type = LDefinition::TypeFrom(mir->type()); LDefinition::Type type = LDefinition::TypeFrom(mir->type());
JS_ASSERT(type != LDefinition::DOUBLE && type != LDefinition::FLOAT32); JS_ASSERT(type != LDefinition::DOUBLE && type != LDefinition::FLOAT32);

View File

@ -30,17 +30,8 @@ ABIArgGenerator::next(MIRType type)
#if defined(XP_WIN) #if defined(XP_WIN)
JS_STATIC_ASSERT(NumIntArgRegs == NumFloatArgRegs); JS_STATIC_ASSERT(NumIntArgRegs == NumFloatArgRegs);
if (regIndex_ == NumIntArgRegs) { if (regIndex_ == NumIntArgRegs) {
if (IsSimdType(type)) {
// On Win64, >64 bit args need to be passed by reference, but asm.js
// doesn't allow passing SIMD values to FFIs. The only way to reach
// here is asm to asm calls, so we can break the ABI here.
stackOffset_ = AlignBytes(stackOffset_, SimdStackAlignment);
current_ = ABIArg(stackOffset_); current_ = ABIArg(stackOffset_);
stackOffset_ += Simd128DataSize;
} else {
stackOffset_ += sizeof(uint64_t); stackOffset_ += sizeof(uint64_t);
current_ = ABIArg(stackOffset_);
}
return current_; return current_;
} }
switch (type) { switch (type) {
@ -52,13 +43,6 @@ ABIArgGenerator::next(MIRType type)
case MIRType_Double: case MIRType_Double:
current_ = ABIArg(FloatArgRegs[regIndex_++]); current_ = ABIArg(FloatArgRegs[regIndex_++]);
break; break;
case MIRType_Int32x4:
case MIRType_Float32x4:
// On Win64, >64 bit args need to be passed by reference, but asm.js
// doesn't allow passing SIMD values to FFIs. The only way to reach
// here is asm to asm calls, so we can break the ABI here.
current_ = ABIArg(FloatArgRegs[regIndex_++]);
break;
default: default:
MOZ_CRASH("Unexpected argument type"); MOZ_CRASH("Unexpected argument type");
} }
@ -83,16 +67,6 @@ ABIArgGenerator::next(MIRType type)
} }
current_ = ABIArg(FloatArgRegs[floatRegIndex_++]); current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
break; break;
case MIRType_Int32x4:
case MIRType_Float32x4:
if (floatRegIndex_ == NumFloatArgRegs) {
stackOffset_ = AlignBytes(stackOffset_, SimdStackAlignment);
current_ = ABIArg(stackOffset_);
stackOffset_ += Simd128DataSize;
break;
}
current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
break;
default: default:
MOZ_CRASH("Unexpected argument type"); MOZ_CRASH("Unexpected argument type");
} }

View File

@ -184,7 +184,10 @@ static MOZ_CONSTEXPR_VAR Register OsrFrameReg = IntArgReg3;
static MOZ_CONSTEXPR_VAR Register PreBarrierReg = rdx; static MOZ_CONSTEXPR_VAR Register PreBarrierReg = rdx;
static const uint32_t ABIStackAlignment = 16; // GCC stack is aligned on 16 bytes, but we don't maintain the invariant in
// jitted code.
static const uint32_t StackAlignment = 16;
static const bool StackKeptAligned = false;
static const uint32_t CodeAlignment = 8; static const uint32_t CodeAlignment = 8;
// This boolean indicates whether we support SIMD instructions flavoured for // This boolean indicates whether we support SIMD instructions flavoured for
@ -194,8 +197,6 @@ static const uint32_t CodeAlignment = 8;
static const bool SupportsSimd = true; static const bool SupportsSimd = true;
static const uint32_t SimdStackAlignment = 16; static const uint32_t SimdStackAlignment = 16;
static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
static const Scale ScalePointer = TimesEight; static const Scale ScalePointer = TimesEight;
} // namespace jit } // namespace jit
@ -602,30 +603,12 @@ class Assembler : public AssemblerX86Shared
CodeOffsetLabel loadRipRelativeDouble(FloatRegister dest) { CodeOffsetLabel loadRipRelativeDouble(FloatRegister dest) {
return CodeOffsetLabel(masm.movsd_ripr(dest.code()).offset()); return CodeOffsetLabel(masm.movsd_ripr(dest.code()).offset());
} }
CodeOffsetLabel loadRipRelativeFloat32(FloatRegister dest) {
return CodeOffsetLabel(masm.movss_ripr(dest.code()).offset());
}
CodeOffsetLabel loadRipRelativeInt32x4(FloatRegister dest) {
return CodeOffsetLabel(masm.movdqa_ripr(dest.code()).offset());
}
CodeOffsetLabel loadRipRelativeFloat32x4(FloatRegister dest) {
return CodeOffsetLabel(masm.movaps_ripr(dest.code()).offset());
}
CodeOffsetLabel storeRipRelativeInt32(Register dest) { CodeOffsetLabel storeRipRelativeInt32(Register dest) {
return CodeOffsetLabel(masm.movl_rrip(dest.code()).offset()); return CodeOffsetLabel(masm.movl_rrip(dest.code()).offset());
} }
CodeOffsetLabel storeRipRelativeDouble(FloatRegister dest) { CodeOffsetLabel storeRipRelativeDouble(FloatRegister dest) {
return CodeOffsetLabel(masm.movsd_rrip(dest.code()).offset()); return CodeOffsetLabel(masm.movsd_rrip(dest.code()).offset());
} }
CodeOffsetLabel storeRipRelativeFloat32(FloatRegister dest) {
return CodeOffsetLabel(masm.movss_rrip(dest.code()).offset());
}
CodeOffsetLabel storeRipRelativeInt32x4(FloatRegister dest) {
return CodeOffsetLabel(masm.movdqa_rrip(dest.code()).offset());
}
CodeOffsetLabel storeRipRelativeFloat32x4(FloatRegister dest) {
return CodeOffsetLabel(masm.movaps_rrip(dest.code()).offset());
}
CodeOffsetLabel leaRipRelative(Register dest) { CodeOffsetLabel leaRipRelative(Register dest) {
return CodeOffsetLabel(masm.leaq_rip(dest.code()).offset()); return CodeOffsetLabel(masm.leaq_rip(dest.code()).offset());
} }

View File

@ -349,32 +349,11 @@ CodeGeneratorX64::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
{ {
MAsmJSLoadGlobalVar *mir = ins->mir(); MAsmJSLoadGlobalVar *mir = ins->mir();
MIRType type = mir->type();
JS_ASSERT(IsNumberType(type) || IsSimdType(type));
CodeOffsetLabel label; CodeOffsetLabel label;
switch (type) { if (mir->type() == MIRType_Int32)
case MIRType_Int32:
label = masm.loadRipRelativeInt32(ToRegister(ins->output())); label = masm.loadRipRelativeInt32(ToRegister(ins->output()));
break; else
case MIRType_Float32:
label = masm.loadRipRelativeFloat32(ToFloatRegister(ins->output()));
break;
case MIRType_Double:
label = masm.loadRipRelativeDouble(ToFloatRegister(ins->output())); label = masm.loadRipRelativeDouble(ToFloatRegister(ins->output()));
break;
// Aligned access: code is aligned on PageSize + there is padding
// before the global data section.
case MIRType_Int32x4:
label = masm.loadRipRelativeInt32x4(ToFloatRegister(ins->output()));
break;
case MIRType_Float32x4:
label = masm.loadRipRelativeFloat32x4(ToFloatRegister(ins->output()));
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected type in visitAsmJSLoadGlobalVar");
}
masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset())); masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
return true; return true;
} }
@ -385,31 +364,13 @@ CodeGeneratorX64::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins)
MAsmJSStoreGlobalVar *mir = ins->mir(); MAsmJSStoreGlobalVar *mir = ins->mir();
MIRType type = mir->value()->type(); MIRType type = mir->value()->type();
JS_ASSERT(IsNumberType(type) || IsSimdType(type)); JS_ASSERT(IsNumberType(type));
CodeOffsetLabel label; CodeOffsetLabel label;
switch (type) { if (type == MIRType_Int32)
case MIRType_Int32:
label = masm.storeRipRelativeInt32(ToRegister(ins->value())); label = masm.storeRipRelativeInt32(ToRegister(ins->value()));
break; else
case MIRType_Float32:
label = masm.storeRipRelativeFloat32(ToFloatRegister(ins->value()));
break;
case MIRType_Double:
label = masm.storeRipRelativeDouble(ToFloatRegister(ins->value())); label = masm.storeRipRelativeDouble(ToFloatRegister(ins->value()));
break;
// Aligned access: code is aligned on PageSize + there is padding
// before the global data section.
case MIRType_Int32x4:
label = masm.storeRipRelativeInt32x4(ToFloatRegister(ins->value()));
break;
case MIRType_Float32x4:
label = masm.storeRipRelativeFloat32x4(ToFloatRegister(ins->value()));
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected type in visitAsmJSStoreGlobalVar");
}
masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset())); masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
return true; return true;
} }

View File

@ -200,7 +200,7 @@ MacroAssemblerX64::setupUnalignedABICall(uint32_t args, Register scratch)
dynamicAlignment_ = true; dynamicAlignment_ = true;
movq(rsp, scratch); movq(rsp, scratch);
andq(Imm32(~(ABIStackAlignment - 1)), rsp); andq(Imm32(~(StackAlignment - 1)), rsp);
push(scratch); push(scratch);
} }
@ -270,11 +270,11 @@ MacroAssemblerX64::callWithABIPre(uint32_t *stackAdjust)
if (dynamicAlignment_) { if (dynamicAlignment_) {
*stackAdjust = stackForCall_ *stackAdjust = stackForCall_
+ ComputeByteAlignment(stackForCall_ + sizeof(intptr_t), + ComputeByteAlignment(stackForCall_ + sizeof(intptr_t),
ABIStackAlignment); StackAlignment);
} else { } else {
*stackAdjust = stackForCall_ *stackAdjust = stackForCall_
+ ComputeByteAlignment(stackForCall_ + framePushed_, + ComputeByteAlignment(stackForCall_ + framePushed_,
ABIStackAlignment); StackAlignment);
} }
reserveStack(*stackAdjust); reserveStack(*stackAdjust);
@ -293,7 +293,7 @@ MacroAssemblerX64::callWithABIPre(uint32_t *stackAdjust)
#ifdef DEBUG #ifdef DEBUG
{ {
Label good; Label good;
testq(rsp, Imm32(ABIStackAlignment - 1)); testq(rsp, Imm32(StackAlignment - 1));
j(Equal, &good); j(Equal, &good);
breakpoint(); breakpoint();
bind(&good); bind(&good);

View File

@ -551,6 +551,7 @@ JitRuntime::generateBailoutHandler(JSContext *cx, ExecutionMode mode)
JitCode * JitCode *
JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f) JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
{ {
JS_ASSERT(!StackKeptAligned);
JS_ASSERT(functionWrappers_); JS_ASSERT(functionWrappers_);
JS_ASSERT(functionWrappers_->initialized()); JS_ASSERT(functionWrappers_->initialized());
VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f); VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);

View File

@ -19,26 +19,16 @@ ABIArgGenerator::ABIArgGenerator()
ABIArg ABIArg
ABIArgGenerator::next(MIRType type) ABIArgGenerator::next(MIRType type)
{ {
current_ = ABIArg(stackOffset_);
switch (type) { switch (type) {
case MIRType_Int32: case MIRType_Int32:
case MIRType_Pointer: case MIRType_Pointer:
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t); stackOffset_ += sizeof(uint32_t);
break; break;
case MIRType_Float32: // Float32 moves are actually double moves case MIRType_Float32: // Float32 moves are actually double moves
case MIRType_Double: case MIRType_Double:
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t); stackOffset_ += sizeof(uint64_t);
break; break;
case MIRType_Int32x4:
case MIRType_Float32x4:
// SIMD values aren't passed in or out of C++, so we can make up
// whatever internal ABI we like. visitAsmJSPassArg assumes
// SimdStackAlignment.
stackOffset_ = AlignBytes(stackOffset_, SimdStackAlignment);
current_ = ABIArg(stackOffset_);
stackOffset_ += Simd128DataSize;
break;
default: default:
MOZ_CRASH("Unexpected argument type"); MOZ_CRASH("Unexpected argument type");
} }

View File

@ -108,13 +108,14 @@ static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD0 = edi;
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = eax; static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = eax;
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = esi; static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = esi;
// GCC stack is aligned on 16 bytes. Ion does not maintain this for internal // GCC stack is aligned on 16 bytes, but we don't maintain the invariant in
// calls. asm.js code does. // jitted code.
#if defined(__GNUC__) #if defined(__GNUC__)
static const uint32_t ABIStackAlignment = 16; static const uint32_t StackAlignment = 16;
#else #else
static const uint32_t ABIStackAlignment = 4; static const uint32_t StackAlignment = 4;
#endif #endif
static const bool StackKeptAligned = false;
static const uint32_t CodeAlignment = 8; static const uint32_t CodeAlignment = 8;
// This boolean indicates whether we support SIMD instructions flavoured for // This boolean indicates whether we support SIMD instructions flavoured for
@ -124,8 +125,6 @@ static const uint32_t CodeAlignment = 8;
static const bool SupportsSimd = true; static const bool SupportsSimd = true;
static const uint32_t SimdStackAlignment = 16; static const uint32_t SimdStackAlignment = 16;
static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
struct ImmTag : public Imm32 struct ImmTag : public Imm32
{ {
ImmTag(JSValueTag mask) ImmTag(JSValueTag mask)
@ -523,16 +522,6 @@ class Assembler : public AssemblerX86Shared
masm.movsd_mr(src.addr, dest.code()); masm.movsd_mr(src.addr, dest.code());
return CodeOffsetLabel(masm.currentOffset()); return CodeOffsetLabel(masm.currentOffset());
} }
CodeOffsetLabel movdqaWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
JS_ASSERT(HasSSE2());
masm.movdqa_mr(src.addr, dest.code());
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel movapsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
JS_ASSERT(HasSSE2());
masm.movaps_mr(src.addr, dest.code());
return CodeOffsetLabel(masm.currentOffset());
}
// Store to *dest where dest can be patched. // Store to *dest where dest can be patched.
CodeOffsetLabel movbWithPatch(Register src, PatchedAbsoluteAddress dest) { CodeOffsetLabel movbWithPatch(Register src, PatchedAbsoluteAddress dest) {
@ -557,16 +546,6 @@ class Assembler : public AssemblerX86Shared
masm.movsd_rm(src.code(), dest.addr); masm.movsd_rm(src.code(), dest.addr);
return CodeOffsetLabel(masm.currentOffset()); return CodeOffsetLabel(masm.currentOffset());
} }
CodeOffsetLabel movdqaWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
JS_ASSERT(HasSSE2());
masm.movdqa_rm(src.code(), dest.addr);
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel movapsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
JS_ASSERT(HasSSE2());
masm.movaps_rm(src.code(), dest.addr);
return CodeOffsetLabel(masm.currentOffset());
}
void loadAsmJSActivation(Register dest) { void loadAsmJSActivation(Register dest) {
CodeOffsetLabel label = movlWithPatch(PatchedAbsoluteAddress(), dest); CodeOffsetLabel label = movlWithPatch(PatchedAbsoluteAddress(), dest);

View File

@ -462,30 +462,15 @@ CodeGeneratorX86::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
{ {
MAsmJSLoadGlobalVar *mir = ins->mir(); MAsmJSLoadGlobalVar *mir = ins->mir();
MIRType type = mir->type(); MIRType type = mir->type();
JS_ASSERT(IsNumberType(type) || IsSimdType(type)); JS_ASSERT(IsNumberType(type));
CodeOffsetLabel label; CodeOffsetLabel label;
switch (type) { if (type == MIRType_Int32)
case MIRType_Int32:
label = masm.movlWithPatch(PatchedAbsoluteAddress(), ToRegister(ins->output())); label = masm.movlWithPatch(PatchedAbsoluteAddress(), ToRegister(ins->output()));
break; else if (type == MIRType_Float32)
case MIRType_Float32:
label = masm.movssWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output())); label = masm.movssWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
break; else
case MIRType_Double:
label = masm.movsdWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output())); label = masm.movsdWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
break;
// Aligned access: code is aligned on PageSize + there is padding
// before the global data section.
case MIRType_Int32x4:
label = masm.movdqaWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
break;
case MIRType_Float32x4:
label = masm.movapsWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected type in visitAsmJSLoadGlobalVar");
}
masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset())); masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
return true; return true;
} }
@ -496,30 +481,15 @@ CodeGeneratorX86::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins)
MAsmJSStoreGlobalVar *mir = ins->mir(); MAsmJSStoreGlobalVar *mir = ins->mir();
MIRType type = mir->value()->type(); MIRType type = mir->value()->type();
JS_ASSERT(IsNumberType(type) || IsSimdType(type)); JS_ASSERT(IsNumberType(type));
CodeOffsetLabel label; CodeOffsetLabel label;
switch (type) { if (type == MIRType_Int32)
case MIRType_Int32:
label = masm.movlWithPatch(ToRegister(ins->value()), PatchedAbsoluteAddress()); label = masm.movlWithPatch(ToRegister(ins->value()), PatchedAbsoluteAddress());
break; else if (type == MIRType_Float32)
case MIRType_Float32:
label = masm.movssWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress()); label = masm.movssWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
break; else
case MIRType_Double:
label = masm.movsdWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress()); label = masm.movsdWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
break;
// Aligned access: code is aligned on PageSize + there is padding
// before the global data section.
case MIRType_Int32x4:
label = masm.movdqaWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
break;
case MIRType_Float32x4:
label = masm.movapsWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected type in visitAsmJSStoreGlobalVar");
}
masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset())); masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
return true; return true;
} }

View File

@ -227,7 +227,7 @@ MacroAssemblerX86::setupUnalignedABICall(uint32_t args, Register scratch)
dynamicAlignment_ = true; dynamicAlignment_ = true;
movl(esp, scratch); movl(esp, scratch);
andl(Imm32(~(ABIStackAlignment - 1)), esp); andl(Imm32(~(StackAlignment - 1)), esp);
push(scratch); push(scratch);
} }
@ -267,11 +267,11 @@ MacroAssemblerX86::callWithABIPre(uint32_t *stackAdjust)
if (dynamicAlignment_) { if (dynamicAlignment_) {
*stackAdjust = stackForCall_ *stackAdjust = stackForCall_
+ ComputeByteAlignment(stackForCall_ + sizeof(intptr_t), + ComputeByteAlignment(stackForCall_ + sizeof(intptr_t),
ABIStackAlignment); StackAlignment);
} else { } else {
*stackAdjust = stackForCall_ *stackAdjust = stackForCall_
+ ComputeByteAlignment(stackForCall_ + framePushed_, + ComputeByteAlignment(stackForCall_ + framePushed_,
ABIStackAlignment); StackAlignment);
} }
reserveStack(*stackAdjust); reserveStack(*stackAdjust);
@ -291,7 +291,7 @@ MacroAssemblerX86::callWithABIPre(uint32_t *stackAdjust)
{ {
// Check call alignment. // Check call alignment.
Label good; Label good;
testl(esp, Imm32(ABIStackAlignment - 1)); testl(esp, Imm32(StackAlignment - 1));
j(Equal, &good); j(Equal, &good);
breakpoint(); breakpoint();
bind(&good); bind(&good);

View File

@ -590,6 +590,7 @@ JitRuntime::generateBailoutHandler(JSContext *cx, ExecutionMode mode)
JitCode * JitCode *
JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f) JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
{ {
JS_ASSERT(!StackKeptAligned);
JS_ASSERT(functionWrappers_); JS_ASSERT(functionWrappers_);
JS_ASSERT(functionWrappers_->initialized()); JS_ASSERT(functionWrappers_->initialized());
VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f); VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);

View File

@ -291,7 +291,6 @@ MSG_DEF(JSMSG_STRICT_CODE_WITH, 0, JSEXN_SYNTAXERR, "strict mode code may
MSG_DEF(JSMSG_STRICT_FUNCTION_STATEMENT, 0, JSEXN_SYNTAXERR, "in strict mode code, functions may be declared only at top level or immediately within another function") MSG_DEF(JSMSG_STRICT_FUNCTION_STATEMENT, 0, JSEXN_SYNTAXERR, "in strict mode code, functions may be declared only at top level or immediately within another function")
MSG_DEF(JSMSG_SYNTAX_ERROR, 0, JSEXN_SYNTAXERR, "syntax error") MSG_DEF(JSMSG_SYNTAX_ERROR, 0, JSEXN_SYNTAXERR, "syntax error")
MSG_DEF(JSMSG_TEMPLSTR_UNTERM_EXPR, 0, JSEXN_SYNTAXERR, "missing } in template string") MSG_DEF(JSMSG_TEMPLSTR_UNTERM_EXPR, 0, JSEXN_SYNTAXERR, "missing } in template string")
MSG_DEF(JSMSG_SIMD_NOT_A_VECTOR, 0, JSEXN_TYPEERR, "value isn't a SIMD value object")
MSG_DEF(JSMSG_TOO_MANY_CASES, 0, JSEXN_INTERNALERR, "too many switch cases") MSG_DEF(JSMSG_TOO_MANY_CASES, 0, JSEXN_INTERNALERR, "too many switch cases")
MSG_DEF(JSMSG_TOO_MANY_CATCH_VARS, 0, JSEXN_SYNTAXERR, "too many catch variables") MSG_DEF(JSMSG_TOO_MANY_CATCH_VARS, 0, JSEXN_SYNTAXERR, "too many catch variables")
MSG_DEF(JSMSG_TOO_MANY_CON_ARGS, 0, JSEXN_SYNTAXERR, "too many constructor arguments") MSG_DEF(JSMSG_TOO_MANY_CON_ARGS, 0, JSEXN_SYNTAXERR, "too many constructor arguments")

View File

@ -295,7 +295,6 @@ struct ThreadSafeContext : ContextFriendFields,
bool signalHandlersInstalled() const { return runtime_->signalHandlersInstalled(); } bool signalHandlersInstalled() const { return runtime_->signalHandlersInstalled(); }
bool canUseSignalHandlers() const { return runtime_->canUseSignalHandlers(); } bool canUseSignalHandlers() const { return runtime_->canUseSignalHandlers(); }
bool jitSupportsFloatingPoint() const { return runtime_->jitSupportsFloatingPoint; } bool jitSupportsFloatingPoint() const { return runtime_->jitSupportsFloatingPoint; }
bool jitSupportsSimd() const { return runtime_->jitSupportsSimd; }
// Thread local data that may be accessed freely. // Thread local data that may be accessed freely.
DtoaState *dtoaState() { DtoaState *dtoaState() {

View File

@ -211,7 +211,6 @@ JSRuntime::JSRuntime(JSRuntime *parentRuntime)
wrapObjectCallbacks(&DefaultWrapObjectCallbacks), wrapObjectCallbacks(&DefaultWrapObjectCallbacks),
preserveWrapperCallback(nullptr), preserveWrapperCallback(nullptr),
jitSupportsFloatingPoint(false), jitSupportsFloatingPoint(false),
jitSupportsSimd(false),
ionPcScriptCache(nullptr), ionPcScriptCache(nullptr),
threadPool(this), threadPool(this),
defaultJSContextCallback(nullptr), defaultJSContextCallback(nullptr),
@ -316,7 +315,6 @@ JSRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
nativeStackBase = GetNativeStackBase(); nativeStackBase = GetNativeStackBase();
jitSupportsFloatingPoint = js::jit::JitSupportsFloatingPoint(); jitSupportsFloatingPoint = js::jit::JitSupportsFloatingPoint();
jitSupportsSimd = js::jit::JitSupportsSimd();
signalHandlersInstalled_ = EnsureAsmJSSignalHandlersInstalled(this); signalHandlersInstalled_ = EnsureAsmJSSignalHandlersInstalled(this);
canUseSignalHandlers_ = signalHandlersInstalled_ && !SignalBasedTriggersDisabled(); canUseSignalHandlers_ = signalHandlersInstalled_ && !SignalBasedTriggersDisabled();

View File

@ -1274,7 +1274,6 @@ struct JSRuntime : public JS::shadow::Runtime,
} }
bool jitSupportsFloatingPoint; bool jitSupportsFloatingPoint;
bool jitSupportsSimd;
// Used to reset stack limit after a signaled interrupt (i.e. jitStackLimit_ = -1) // Used to reset stack limit after a signaled interrupt (i.e. jitStackLimit_ = -1)
// has been noticed by Ion/Baseline. // has been noticed by Ion/Baseline.

View File

@ -1550,7 +1550,7 @@ jit::JitActivation::markRematerializedFrames(JSTracer *trc)
AsmJSActivation::AsmJSActivation(JSContext *cx, AsmJSModule &module) AsmJSActivation::AsmJSActivation(JSContext *cx, AsmJSModule &module)
: Activation(cx, AsmJS), : Activation(cx, AsmJS),
module_(module), module_(module),
entrySP_(nullptr), errorRejoinSP_(nullptr),
profiler_(nullptr), profiler_(nullptr),
resumePC_(nullptr), resumePC_(nullptr),
fp_(nullptr), fp_(nullptr),
@ -1573,7 +1573,7 @@ AsmJSActivation::AsmJSActivation(JSContext *cx, AsmJSModule &module)
JSRuntime::AutoLockForInterrupt lock(cx->runtime()); JSRuntime::AutoLockForInterrupt lock(cx->runtime());
cx->mainThread().asmJSActivationStack_ = this; cx->mainThread().asmJSActivationStack_ = this;
(void) entrySP_; // squelch GCC warning (void) errorRejoinSP_; // squelch GCC warning
} }
AsmJSActivation::~AsmJSActivation() AsmJSActivation::~AsmJSActivation()

View File

@ -1482,7 +1482,7 @@ class AsmJSActivation : public Activation
AsmJSModule &module_; AsmJSModule &module_;
AsmJSActivation *prevAsmJS_; AsmJSActivation *prevAsmJS_;
AsmJSActivation *prevAsmJSForModule_; AsmJSActivation *prevAsmJSForModule_;
void *entrySP_; void *errorRejoinSP_;
SPSProfiler *profiler_; SPSProfiler *profiler_;
void *resumePC_; void *resumePC_;
uint8_t *fp_; uint8_t *fp_;
@ -1512,7 +1512,7 @@ class AsmJSActivation : public Activation
static unsigned offsetOfResumePC() { return offsetof(AsmJSActivation, resumePC_); } static unsigned offsetOfResumePC() { return offsetof(AsmJSActivation, resumePC_); }
// Written by JIT code: // Written by JIT code:
static unsigned offsetOfEntrySP() { return offsetof(AsmJSActivation, entrySP_); } static unsigned offsetOfErrorRejoinSP() { return offsetof(AsmJSActivation, errorRejoinSP_); }
static unsigned offsetOfFP() { return offsetof(AsmJSActivation, fp_); } static unsigned offsetOfFP() { return offsetof(AsmJSActivation, fp_); }
static unsigned offsetOfExitReason() { return offsetof(AsmJSActivation, exitReason_); } static unsigned offsetOfExitReason() { return offsetof(AsmJSActivation, exitReason_); }