Merge inbound to m-c. a=merge

This commit is contained in:
Ryan VanderMeulen 2014-08-30 22:36:52 -04:00
commit f11f3732ac
71 changed files with 2759 additions and 399 deletions

View File

@ -550,6 +550,10 @@ GK_ATOM(lwthemetextcolor, "lwthemetextcolor")
GK_ATOM(main, "main")
GK_ATOM(map, "map")
GK_ATOM(manifest, "manifest")
GK_ATOM(marginBottom, "margin-bottom")
GK_ATOM(marginLeft, "margin-left")
GK_ATOM(marginRight, "margin-right")
GK_ATOM(marginTop, "margin-top")
GK_ATOM(marginheight, "marginheight")
GK_ATOM(marginwidth, "marginwidth")
GK_ATOM(mark, "mark")
@ -1111,6 +1115,7 @@ GK_ATOM(text_decoration, "text-decoration")
GK_ATOM(terminate, "terminate")
GK_ATOM(test, "test")
GK_ATOM(text, "text")
GK_ATOM(textAlign, "text-align")
GK_ATOM(textarea, "textarea")
GK_ATOM(textbox, "textbox")
GK_ATOM(textnode, "textnode")
@ -2234,10 +2239,6 @@ GK_ATOM(itemset, "itemset")
GK_ATOM(lineNumber, "line-number")
GK_ATOM(linkedPanel, "linkedpanel")
GK_ATOM(live, "live")
GK_ATOM(marginBottom, "margin-bottom")
GK_ATOM(marginLeft, "margin-left")
GK_ATOM(marginRight, "margin-right")
GK_ATOM(marginTop, "margin-top")
GK_ATOM(menuitemcheckbox, "menuitemcheckbox")
GK_ATOM(menuitemradio, "menuitemradio")
GK_ATOM(mixed, "mixed")
@ -2256,7 +2257,6 @@ GK_ATOM(spinbutton, "spinbutton")
GK_ATOM(status, "status")
GK_ATOM(tableCellIndex, "table-cell-index")
GK_ATOM(tablist, "tablist")
GK_ATOM(textAlign, "text-align")
GK_ATOM(textIndent, "text-indent")
GK_ATOM(textInputType, "text-input-type")
GK_ATOM(textLineThroughColor, "text-line-through-color")

View File

@ -259,15 +259,24 @@ nsImageLoadingContent::OnStopRequest(imgIRequest* aRequest,
if (shell && shell->IsVisible() &&
(!shell->DidInitialize() || shell->IsPaintingSuppressed())) {
// If we've gotten a frame and that frame has called FrameCreate and that
// frame has been reflowed then we know that it checked it's own visibility
// so we can trust our visible count and we don't start decode if we are not
// visible.
nsIFrame* f = GetOurPrimaryFrame();
if (!mFrameCreateCalled || !f || (f->GetStateBits() & NS_FRAME_FIRST_REFLOW) ||
mVisibleCount > 0 || shell->AssumeAllImagesVisible()) {
if (NS_SUCCEEDED(mCurrentRequest->StartDecoding())) {
startedDecoding = true;
// If we haven't gotten a frame yet either we aren't going to (so don't
// bother kicking off a decode), or we will get very soon on the next
// refresh driver tick when it flushes. And it will most likely be a
// specific image type frame (we only create generic (ie inline) type
// frames for images that don't have a size, and since we have all the data
// we should have the size) which will check its own visibility on its
// first reflow.
if (f) {
// If we've gotten a frame and that frame has called FrameCreate and that
// frame has been reflowed then we know that it checked it's own visibility
// so we can trust our visible count and we don't start decode if we are not
// visible.
if (!mFrameCreateCalled || (f->GetStateBits() & NS_FRAME_FIRST_REFLOW) ||
mVisibleCount > 0 || shell->AssumeAllImagesVisible()) {
if (NS_SUCCEEDED(mCurrentRequest->StartDecoding())) {
startedDecoding = true;
}
}
}
}

View File

@ -27,6 +27,10 @@ SurfaceFormatToDXGIFormat(gfx::SurfaceFormat aFormat)
return DXGI_FORMAT_B8G8R8A8_UNORM;
case SurfaceFormat::B8G8R8X8:
return DXGI_FORMAT_B8G8R8A8_UNORM;
case SurfaceFormat::R8G8B8A8:
return DXGI_FORMAT_R8G8B8A8_UNORM;
case SurfaceFormat::R8G8B8X8:
return DXGI_FORMAT_R8G8B8A8_UNORM;
case SurfaceFormat::A8:
return DXGI_FORMAT_A8_UNORM;
default:

View File

@ -3385,9 +3385,26 @@ gfxFont::DrawGlyphs(gfxShapedText *aShapedText,
gfxFloat height = GetMetrics().maxAscent;
gfxRect glyphRect(pt.x, pt.y - height,
advanceDevUnits, height);
// If there's a fake-italic skew in effect as part
// of the drawTarget's transform, we need to remove
// this before drawing the hexbox. (Bug 983985)
Matrix oldMat;
if (aFontParams.passedInvMatrix) {
oldMat = aRunParams.dt->GetTransform();
aRunParams.dt->SetTransform(
*aFontParams.passedInvMatrix * oldMat);
}
gfxFontMissingGlyphs::DrawMissingGlyph(
aRunParams.context, glyphRect, details->mGlyphID,
aShapedText->GetAppUnitsPerDevUnit());
// Restore the matrix, if we modified it before
// drawing the hexbox.
if (aFontParams.passedInvMatrix) {
aRunParams.dt->SetTransform(oldMat);
}
}
} else {
gfxPoint glyphXY(*aPt);

View File

@ -356,11 +356,11 @@ js::GenerateAsmJSStackOverflowExit(MacroAssembler &masm, Label *overflowExit, La
masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfFP()));
// Prepare the stack for calling C++.
if (unsigned stackDec = StackDecrementForCall(sizeof(AsmJSFrame), ShadowStackSpace))
masm.subPtr(Imm32(stackDec), StackPointer);
if (uint32_t d = StackDecrementForCall(ABIStackAlignment, sizeof(AsmJSFrame), ShadowStackSpace))
masm.subPtr(Imm32(d), StackPointer);
// No need to restore the stack; the throw stub pops everything.
masm.assertStackAlignment();
masm.assertStackAlignment(ABIStackAlignment);
masm.call(AsmJSImmPtr(AsmJSImm_ReportOverRecursed));
masm.jump(throwLabel);
}

View File

@ -170,7 +170,6 @@ void
GenerateAsmJSExitEpilogue(jit::MacroAssembler &masm, unsigned framePushed, AsmJSExit::Reason reason,
jit::Label *profilingReturn);
} // namespace js
#endif // asmjs_AsmJSFrameIterator_h

View File

@ -30,6 +30,7 @@
#include "jswrapper.h"
#include "asmjs/AsmJSModule.h"
#include "builtin/SIMD.h"
#include "frontend/BytecodeCompiler.h"
#include "jit/Ion.h"
#include "jit/JitCommon.h"
@ -96,59 +97,75 @@ GetDataProperty(JSContext *cx, HandleValue objVal, HandlePropertyName field, Mut
return true;
}
static bool
HasPureCoercion(JSContext *cx, HandleValue v)
{
if (IsVectorObject<Int32x4>(v) || IsVectorObject<Float32x4>(v))
return true;
// Ideally, we'd reject all non-SIMD non-primitives, but Emscripten has a
// bug that generates code that passes functions for some imports. To avoid
// breaking all the code that contains this bug, we make an exception for
// functions that don't have user-defined valueOf or toString, for their
// coercions are not observable and coercion via ToNumber/ToInt32
// definitely produces NaN/0. We should remove this special case later once
// most apps have been built with newer Emscripten.
jsid toString = NameToId(cx->names().toString);
if (v.toObject().is<JSFunction>() &&
HasObjectValueOf(&v.toObject(), cx) &&
ClassMethodIsNative(cx, &v.toObject(), &JSFunction::class_, toString, fun_toString))
{
return true;
}
return false;
}
static bool
ValidateGlobalVariable(JSContext *cx, const AsmJSModule &module, AsmJSModule::Global &global,
HandleValue importVal)
{
JS_ASSERT(global.which() == AsmJSModule::Global::Variable);
void *datum = module.globalVarIndexToGlobalDatum(global.varIndex());
void *datum = module.globalVarToGlobalDatum(global);
switch (global.varInitKind()) {
case AsmJSModule::Global::InitConstant: {
const AsmJSNumLit &lit = global.varInitNumLit();
const Value &v = lit.value();
switch (lit.which()) {
case AsmJSNumLit::Fixnum:
case AsmJSNumLit::NegativeInt:
case AsmJSNumLit::BigUnsigned:
*(int32_t *)datum = v.toInt32();
*(int32_t *)datum = lit.scalarValue().toInt32();
break;
case AsmJSNumLit::Double:
*(double *)datum = v.toDouble();
*(double *)datum = lit.scalarValue().toDouble();
break;
case AsmJSNumLit::Float:
*(float *)datum = static_cast<float>(v.toDouble());
*(float *)datum = static_cast<float>(lit.scalarValue().toDouble());
break;
case AsmJSNumLit::Int32x4:
memcpy(datum, lit.simdValue().asInt32x4(), Simd128DataSize);
break;
case AsmJSNumLit::Float32x4:
memcpy(datum, lit.simdValue().asFloat32x4(), Simd128DataSize);
break;
case AsmJSNumLit::OutOfRangeInt:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("OutOfRangeInt isn't valid in the first place");
}
break;
}
case AsmJSModule::Global::InitImport: {
RootedPropertyName field(cx, global.varImportField());
RootedValue v(cx);
if (!GetDataProperty(cx, importVal, field, &v))
return false;
if (!v.isPrimitive()) {
// Ideally, we'd reject all non-primitives, but Emscripten has a bug
// that generates code that passes functions for some imports. To
// avoid breaking all the code that contains this bug, we make an
// exception for functions that don't have user-defined valueOf or
// toString, for their coercions are not observable and coercion via
// ToNumber/ToInt32 definitely produces NaN/0. We should remove this
// special case later once most apps have been built with newer
// Emscripten.
jsid toString = NameToId(cx->names().toString);
if (!v.toObject().is<JSFunction>() ||
!HasObjectValueOf(&v.toObject(), cx) ||
!ClassMethodIsNative(cx, &v.toObject(), &JSFunction::class_, toString, fun_toString))
{
return LinkFail(cx, "Imported values must be primitives");
}
}
if (!v.isPrimitive() && !HasPureCoercion(cx, v))
return LinkFail(cx, "Imported values must be primitives");
SimdConstant simdConstant;
switch (global.varInitCoercion()) {
case AsmJS_ToInt32:
if (!ToInt32(cx, v, (int32_t *)datum))
@ -162,6 +179,16 @@ ValidateGlobalVariable(JSContext *cx, const AsmJSModule &module, AsmJSModule::Gl
if (!RoundFloat32(cx, v, (float *)datum))
return false;
break;
case AsmJS_ToInt32x4:
if (!ToSimdConstant<Int32x4>(cx, v, &simdConstant))
return false;
memcpy(datum, simdConstant.asInt32x4(), Simd128DataSize);
break;
case AsmJS_ToFloat32x4:
if (!ToSimdConstant<Float32x4>(cx, v, &simdConstant))
return false;
memcpy(datum, simdConstant.asFloat32x4(), Simd128DataSize);
break;
}
break;
}
@ -241,6 +268,103 @@ ValidateMathBuiltinFunction(JSContext *cx, AsmJSModule::Global &global, HandleVa
return true;
}
static PropertyName *
SimdTypeToName(JSContext *cx, AsmJSSimdType type)
{
switch (type) {
case AsmJSSimdType_int32x4: return cx->names().int32x4;
case AsmJSSimdType_float32x4: return cx->names().float32x4;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected SIMD type");
}
static X4TypeDescr::Type
AsmJSSimdTypeToTypeDescrType(AsmJSSimdType type)
{
switch (type) {
case AsmJSSimdType_int32x4: return Int32x4::type;
case AsmJSSimdType_float32x4: return Float32x4::type;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected AsmJSSimdType");
}
static bool
ValidateSimdType(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal,
MutableHandleValue out)
{
RootedValue v(cx);
if (!GetDataProperty(cx, globalVal, cx->names().SIMD, &v))
return false;
AsmJSSimdType type;
if (global.which() == AsmJSModule::Global::SimdCtor)
type = global.simdCtorType();
else
type = global.simdOperationType();
RootedPropertyName simdTypeName(cx, SimdTypeToName(cx, type));
if (!GetDataProperty(cx, v, simdTypeName, &v))
return false;
if (!v.isObject())
return LinkFail(cx, "bad SIMD type");
RootedObject x4desc(cx, &v.toObject());
if (!x4desc->is<X4TypeDescr>())
return LinkFail(cx, "bad SIMD type");
if (AsmJSSimdTypeToTypeDescrType(type) != x4desc->as<X4TypeDescr>().type())
return LinkFail(cx, "bad SIMD type");
out.set(v);
return true;
}
static bool
ValidateSimdType(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal)
{
RootedValue _(cx);
return ValidateSimdType(cx, global, globalVal, &_);
}
static bool
ValidateSimdOperation(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal)
{
// SIMD operations are loaded from the SIMD type, so the type must have been
// validated before the operation.
RootedValue v(cx);
JS_ALWAYS_TRUE(ValidateSimdType(cx, global, globalVal, &v));
RootedPropertyName opName(cx, global.simdOperationName());
if (!GetDataProperty(cx, v, opName, &v))
return false;
Native native = nullptr;
switch (global.simdOperationType()) {
case AsmJSSimdType_int32x4:
switch (global.simdOperation()) {
case AsmJSSimdOperation_add: native = simd_int32x4_add; break;
case AsmJSSimdOperation_sub: native = simd_int32x4_sub; break;
case AsmJSSimdOperation_mul:
case AsmJSSimdOperation_div:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Mul and div shouldn't have been validated in "
"the first place");
}
break;
case AsmJSSimdType_float32x4:
switch (global.simdOperation()) {
case AsmJSSimdOperation_add: native = simd_float32x4_add; break;
case AsmJSSimdOperation_sub: native = simd_float32x4_sub; break;
case AsmJSSimdOperation_mul: native = simd_float32x4_mul; break;
case AsmJSSimdOperation_div: native = simd_float32x4_div; break;
}
break;
}
if (!native || !IsNativeFunction(v, native))
return LinkFail(cx, "bad SIMD.type.* operation");
return true;
}
static bool
ValidateConstant(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal)
{
@ -377,6 +501,14 @@ DynamicallyLinkModule(JSContext *cx, CallArgs args, AsmJSModule &module)
if (!ValidateConstant(cx, global, globalVal))
return false;
break;
case AsmJSModule::Global::SimdCtor:
if (!ValidateSimdType(cx, global, globalVal))
return false;
break;
case AsmJSModule::Global::SimdOperation:
if (!ValidateSimdOperation(cx, global, globalVal))
return false;
break;
}
}
@ -437,14 +569,14 @@ CallAsmJS(JSContext *cx, unsigned argc, Value *vp)
const AsmJSModule::ExportedFunction &func = FunctionToExportedFunction(callee, module);
// The calling convention for an external call into asm.js is to pass an
// array of 8-byte values where each value contains either a coerced int32
// (in the low word) or double value, with the coercions specified by the
// asm.js signature. The external entry point unpacks this array into the
// system-ABI-specified registers and stack memory and then calls into the
// internal entry point. The return value is stored in the first element of
// the array (which, therefore, must have length >= 1).
js::Vector<uint64_t, 8> coercedArgs(cx);
// array of 16-byte values where each value contains either a coerced int32
// (in the low word), a double value (in the low dword) or a SIMD vector
// value, with the coercions specified by the asm.js signature. The
// external entry point unpacks this array into the system-ABI-specified
// registers and stack memory and then calls into the internal entry point.
// The return value is stored in the first element of the array (which,
// therefore, must have length >= 1).
js::Vector<AsmJSModule::EntryArg, 8> coercedArgs(cx);
if (!coercedArgs.resize(Max<size_t>(1, func.numArgs())))
return false;
@ -464,6 +596,20 @@ CallAsmJS(JSContext *cx, unsigned argc, Value *vp)
if (!RoundFloat32(cx, v, (float *)&coercedArgs[i]))
return false;
break;
case AsmJS_ToInt32x4: {
SimdConstant simd;
if (!ToSimdConstant<Int32x4>(cx, v, &simd))
return false;
memcpy(&coercedArgs[i], simd.asInt32x4(), Simd128DataSize);
break;
}
case AsmJS_ToFloat32x4: {
SimdConstant simd;
if (!ToSimdConstant<Float32x4>(cx, v, &simd))
return false;
memcpy(&coercedArgs[i], simd.asFloat32x4(), Simd128DataSize);
break;
}
}
}
@ -501,6 +647,7 @@ CallAsmJS(JSContext *cx, unsigned argc, Value *vp)
return true;
}
JSObject *x4obj;
switch (func.returnType()) {
case AsmJSModule::Return_Void:
callArgs.rval().set(UndefinedValue());
@ -511,6 +658,18 @@ CallAsmJS(JSContext *cx, unsigned argc, Value *vp)
case AsmJSModule::Return_Double:
callArgs.rval().set(NumberValue(*(double*)&coercedArgs[0]));
break;
case AsmJSModule::Return_Int32x4:
x4obj = CreateSimd<Int32x4>(cx, (int32_t*)&coercedArgs[0]);
if (!x4obj)
return false;
callArgs.rval().set(ObjectValue(*x4obj));
break;
case AsmJSModule::Return_Float32x4:
x4obj = CreateSimd<Float32x4>(cx, (float*)&coercedArgs[0]);
if (!x4obj)
return false;
callArgs.rval().set(ObjectValue(*x4obj));
break;
}
return true;

View File

@ -303,8 +303,8 @@ AsmJSModule::finish(ExclusiveContext *cx, TokenStream &tokenStream, MacroAssembl
// The global data section sits immediately after the executable (and
// other) data allocated by the MacroAssembler, so ensure it is
// double-aligned.
pod.codeBytes_ = AlignBytes(masm.bytesNeeded(), sizeof(double));
// SIMD-aligned.
pod.codeBytes_ = AlignBytes(masm.bytesNeeded(), SimdStackAlignment);
// The entire region is allocated via mmap/VirtualAlloc which requires
// units of pages.
@ -518,11 +518,11 @@ TryEnablingIon(JSContext *cx, AsmJSModule &module, HandleFunction fun, uint32_t
if (fun->nargs() > size_t(argc))
return true;
// Normally the types should corresond, since we just ran with those types,
// Normally the types should correspond, since we just ran with those types,
// but there are reports this is asserting. Therefore doing it as a check, instead of DEBUG only.
if (!types::TypeScript::ThisTypes(script)->hasType(types::Type::UndefinedType()))
return true;
for(uint32_t i = 0; i < fun->nargs(); i++) {
for (uint32_t i = 0; i < fun->nargs(); i++) {
types::StackTypeSet *typeset = types::TypeScript::ArgTypes(script, i);
types::Type type = types::Type::DoubleType();
if (!argv[i].isDouble())

View File

@ -27,8 +27,10 @@
#include "asmjs/AsmJSFrameIterator.h"
#include "asmjs/AsmJSValidate.h"
#include "builtin/SIMD.h"
#include "gc/Marking.h"
#include "jit/IonMacroAssembler.h"
#include "jit/IonTypes.h"
#ifdef JS_ION_PERF
# include "jit/PerfSpewer.h"
#endif
@ -47,7 +49,9 @@ enum AsmJSCoercion
{
AsmJS_ToInt32,
AsmJS_ToNumber,
AsmJS_FRound
AsmJS_FRound,
AsmJS_ToInt32x4,
AsmJS_ToFloat32x4
};
// The asm.js spec recognizes this set of builtin Math functions.
@ -62,6 +66,22 @@ enum AsmJSMathBuiltinFunction
AsmJSMathBuiltin_clz32
};
// Set of known global object SIMD's attributes, i.e. types
enum AsmJSSimdType
{
AsmJSSimdType_int32x4,
AsmJSSimdType_float32x4
};
// Set of known operations, for a given SIMD type (int32x4, float32x4,...)
enum AsmJSSimdOperation
{
AsmJSSimdOperation_add,
AsmJSSimdOperation_sub,
AsmJSSimdOperation_mul,
AsmJSSimdOperation_div
};
// These labels describe positions in the prologue/epilogue of functions while
// compiling an AsmJSModule.
struct AsmJSFunctionLabels
@ -98,18 +118,32 @@ class AsmJSNumLit
BigUnsigned,
Double,
Float,
Int32x4,
Float32x4,
OutOfRangeInt = -1
};
private:
Which which_;
Value value_;
union {
Value scalar_;
jit::SimdConstant simd_;
} value;
public:
static AsmJSNumLit Create(Which w, Value v) {
AsmJSNumLit lit;
lit.which_ = w;
lit.value_ = v;
lit.value.scalar_ = v;
JS_ASSERT(!lit.isSimd());
return lit;
}
static AsmJSNumLit Create(Which w, jit::SimdConstant c) {
AsmJSNumLit lit;
lit.which_ = w;
lit.value.simd_ = c;
JS_ASSERT(lit.isSimd());
return lit;
}
@ -119,22 +153,31 @@ class AsmJSNumLit
int32_t toInt32() const {
JS_ASSERT(which_ == Fixnum || which_ == NegativeInt || which_ == BigUnsigned);
return value_.toInt32();
return value.scalar_.toInt32();
}
double toDouble() const {
JS_ASSERT(which_ == Double);
return value_.toDouble();
return value.scalar_.toDouble();
}
float toFloat() const {
JS_ASSERT(which_ == Float);
return float(value_.toDouble());
return float(value.scalar_.toDouble());
}
Value value() const {
Value scalarValue() const {
JS_ASSERT(which_ != OutOfRangeInt);
return value_;
return value.scalar_;
}
bool isSimd() const {
return which_ == Int32x4 || which_ == Float32x4;
}
const jit::SimdConstant &simdValue() const {
JS_ASSERT(isSimd());
return value.simd_;
}
bool hasType() const {
@ -158,7 +201,8 @@ class AsmJSModule
class Global
{
public:
enum Which { Variable, FFI, ArrayView, MathBuiltinFunction, Constant };
enum Which { Variable, FFI, ArrayView, MathBuiltinFunction, Constant,
SimdCtor, SimdOperation};
enum VarInitKind { InitConstant, InitImport };
enum ConstantKind { GlobalConstant, MathConstant };
@ -177,6 +221,11 @@ class AsmJSModule
uint32_t ffiIndex_;
Scalar::Type viewType_;
AsmJSMathBuiltinFunction mathBuiltinFunc_;
AsmJSSimdType simdCtorType_;
struct {
AsmJSSimdType type_;
AsmJSSimdOperation which_;
} simdOp;
struct {
ConstantKind kind_;
double value_;
@ -197,7 +246,7 @@ class AsmJSModule
if (name_)
MarkStringUnbarriered(trc, &name_, "asm.js global name");
JS_ASSERT_IF(pod.which_ == Variable && pod.u.var.initKind_ == InitConstant,
!pod.u.var.u.numLit_.value().isMarkable());
!pod.u.var.u.numLit_.scalarValue().isMarkable());
}
public:
@ -252,6 +301,26 @@ class AsmJSModule
JS_ASSERT(pod.which_ == MathBuiltinFunction);
return pod.u.mathBuiltinFunc_;
}
AsmJSSimdType simdCtorType() const {
JS_ASSERT(pod.which_ == SimdCtor);
return pod.u.simdCtorType_;
}
PropertyName *simdCtorName() const {
JS_ASSERT(pod.which_ == SimdCtor);
return name_;
}
PropertyName *simdOperationName() const {
JS_ASSERT(pod.which_ == SimdOperation);
return name_;
}
AsmJSSimdOperation simdOperation() const {
JS_ASSERT(pod.which_ == SimdOperation);
return pod.u.simdOp.which_;
}
AsmJSSimdType simdOperationType() const {
JS_ASSERT(pod.which_ == SimdOperation);
return pod.u.simdOp.type_;
}
PropertyName *constantName() const {
JS_ASSERT(pod.which_ == Constant);
return name_;
@ -310,7 +379,13 @@ class AsmJSModule
const uint8_t *deserialize(ExclusiveContext *cx, const uint8_t *cursor);
bool clone(ExclusiveContext *cx, Exit *out) const;
};
typedef int32_t (*CodePtr)(uint64_t *args, uint8_t *global);
struct EntryArg {
uint64_t lo;
uint64_t hi;
};
JS_STATIC_ASSERT(sizeof(EntryArg) >= jit::Simd128DataSize);
typedef int32_t (*CodePtr)(EntryArg *args, uint8_t *global);
// An Exit holds bookkeeping information about an exit; the ExitDatum
// struct overlays the actual runtime data stored in the global data
@ -323,7 +398,7 @@ class AsmJSModule
typedef Vector<AsmJSCoercion, 0, SystemAllocPolicy> ArgCoercionVector;
enum ReturnType { Return_Int32, Return_Double, Return_Void };
enum ReturnType { Return_Int32, Return_Double, Return_Int32x4, Return_Float32x4, Return_Void };
class ExportedFunction
{
@ -673,7 +748,8 @@ class AsmJSModule
size_t codeBytes_; // function bodies and stubs
size_t totalBytes_; // function bodies, stubs, and global data
uint32_t minHeapLength_;
uint32_t numGlobalVars_;
uint32_t numGlobalScalarVars_;
uint32_t numGlobalSimdVars_;
uint32_t numFFIs_;
uint32_t srcLength_;
uint32_t srcLengthWithRightBrace_;
@ -820,20 +896,43 @@ class AsmJSModule
}
bool addGlobalVarInit(const AsmJSNumLit &lit, uint32_t *globalIndex) {
JS_ASSERT(!isFinishedWithModulePrologue());
if (pod.numGlobalVars_ == UINT32_MAX)
return false;
Global g(Global::Variable, nullptr);
g.pod.u.var.initKind_ = Global::InitConstant;
g.pod.u.var.u.numLit_ = lit;
g.pod.u.var.index_ = *globalIndex = pod.numGlobalVars_++;
if (lit.isSimd()) {
if (pod.numGlobalSimdVars_ == UINT32_MAX)
return false;
*globalIndex = pod.numGlobalSimdVars_++;
} else {
if (pod.numGlobalScalarVars_ == UINT32_MAX)
return false;
*globalIndex = pod.numGlobalScalarVars_++;
}
g.pod.u.var.index_ = *globalIndex;
return globals_.append(g);
}
static bool IsSimdCoercion(AsmJSCoercion c) {
switch (c) {
case AsmJS_ToInt32:
case AsmJS_ToNumber:
case AsmJS_FRound:
return false;
case AsmJS_ToInt32x4:
case AsmJS_ToFloat32x4:
return true;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected AsmJSCoercion");
}
bool addGlobalVarImport(PropertyName *name, AsmJSCoercion coercion, uint32_t *globalIndex) {
JS_ASSERT(!isFinishedWithModulePrologue());
Global g(Global::Variable, name);
g.pod.u.var.initKind_ = Global::InitImport;
g.pod.u.var.u.coercion_ = coercion;
g.pod.u.var.index_ = *globalIndex = pod.numGlobalVars_++;
*globalIndex = IsSimdCoercion(coercion) ? pod.numGlobalSimdVars_++
: pod.numGlobalScalarVars_++;
g.pod.u.var.index_ = *globalIndex;
return globals_.append(g);
}
bool addFFI(PropertyName *field, uint32_t *ffiIndex) {
@ -864,6 +963,17 @@ class AsmJSModule
g.pod.u.constant.kind_ = Global::MathConstant;
return globals_.append(g);
}
bool addSimdCtor(AsmJSSimdType type, PropertyName *field) {
Global g(Global::SimdCtor, field);
g.pod.u.simdCtorType_ = type;
return globals_.append(g);
}
bool addSimdOperation(AsmJSSimdType type, AsmJSSimdOperation op, PropertyName *field) {
Global g(Global::SimdOperation, field);
g.pod.u.simdOp.type_ = type;
g.pod.u.simdOp.which_ = op;
return globals_.append(g);
}
bool addGlobalConstant(double value, PropertyName *name) {
JS_ASSERT(!isFinishedWithModulePrologue());
Global g(Global::Constant, name);
@ -1110,10 +1220,11 @@ class AsmJSModule
// are laid out in this order:
// 0. a pointer to the current AsmJSActivation
// 1. a pointer to the heap that was linked to the module
// 2. the double float constant NaN.
// 3. the float32 constant NaN, padded to sizeof(double).
// 4. global variable state (elements are sizeof(uint64_t))
// 5. interleaved function-pointer tables and exits. These are allocated
// 2. the double float constant NaN
// 3. the float32 constant NaN, padded to Simd128DataSize
// 4. global SIMD variable state (elements are Simd128DataSize)
// 5. global variable state (elements are sizeof(uint64_t))
// 6. interleaved function-pointer tables and exits. These are allocated
// while type checking function bodies (as exits and uses of
// function-pointer tables are encountered).
size_t offsetOfGlobalData() const {
@ -1124,13 +1235,18 @@ class AsmJSModule
JS_ASSERT(isFinished());
return code_ + offsetOfGlobalData();
}
size_t globalSimdVarsOffset() const {
return AlignBytes(/* 0 */ sizeof(void*) +
/* 1 */ sizeof(void*) +
/* 2 */ sizeof(double) +
/* 3 */ sizeof(float),
jit::Simd128DataSize);
}
size_t globalDataBytes() const {
return sizeof(void*) +
sizeof(void*) +
sizeof(double) +
sizeof(double) +
pod.numGlobalVars_ * sizeof(uint64_t) +
pod.funcPtrTableAndExitBytes_;
return globalSimdVarsOffset() +
/* 4 */ pod.numGlobalSimdVars_ * jit::Simd128DataSize +
/* 5 */ pod.numGlobalScalarVars_ * sizeof(uint64_t) +
/* 6 */ pod.funcPtrTableAndExitBytes_;
}
static unsigned activationGlobalDataOffset() {
JS_STATIC_ASSERT(jit::AsmJSActivationGlobalDataOffset == 0);
@ -1165,20 +1281,39 @@ class AsmJSModule
*(double *)(globalData() + nan64GlobalDataOffset()) = GenericNaN();
*(float *)(globalData() + nan32GlobalDataOffset()) = GenericNaN();
}
unsigned globalVariableOffset() const {
static_assert((2 * sizeof(void*) + 2 * sizeof(double)) % sizeof(double) == 0,
"Global data should be aligned");
return 2 * sizeof(void*) + 2 * sizeof(double);
}
unsigned globalVarIndexToGlobalDataOffset(unsigned i) const {
unsigned globalSimdVarIndexToGlobalDataOffset(unsigned i) const {
JS_ASSERT(isFinishedWithModulePrologue());
JS_ASSERT(i < pod.numGlobalVars_);
return globalVariableOffset() +
JS_ASSERT(i < pod.numGlobalSimdVars_);
return globalSimdVarsOffset() +
i * jit::Simd128DataSize;
}
unsigned globalScalarVarIndexToGlobalDataOffset(unsigned i) const {
JS_ASSERT(isFinishedWithModulePrologue());
JS_ASSERT(i < pod.numGlobalScalarVars_);
return globalSimdVarsOffset() +
pod.numGlobalSimdVars_ * jit::Simd128DataSize +
i * sizeof(uint64_t);
}
void *globalVarIndexToGlobalDatum(unsigned i) const {
void *globalScalarVarIndexToGlobalDatum(unsigned i) const {
JS_ASSERT(isFinished());
return (void *)(globalData() + globalVarIndexToGlobalDataOffset(i));
return (void *)(globalData() + globalScalarVarIndexToGlobalDataOffset(i));
}
void *globalSimdVarIndexToGlobalDatum(unsigned i) const {
JS_ASSERT(isFinished());
return (void *)(globalData() + globalSimdVarIndexToGlobalDataOffset(i));
}
void *globalVarToGlobalDatum(const Global &g) const {
unsigned index = g.varIndex();
if (g.varInitKind() == Global::VarInitKind::InitConstant) {
return g.varInitNumLit().isSimd()
? globalSimdVarIndexToGlobalDatum(index)
: globalScalarVarIndexToGlobalDatum(index);
}
JS_ASSERT(g.varInitKind() == Global::VarInitKind::InitImport);
return IsSimdCoercion(g.varInitCoercion())
? globalSimdVarIndexToGlobalDatum(index)
: globalScalarVarIndexToGlobalDatum(index);
}
uint8_t **globalDataOffsetToFuncPtrTable(unsigned globalDataOffset) const {
JS_ASSERT(isFinished());

File diff suppressed because it is too large Load Diff

View File

@ -39,8 +39,8 @@ extern const JSFunctionSpec Int32x4Methods[];
static const char *laneNames[] = {"lane 0", "lane 1", "lane 2", "lane3"};
template<typename V>
static bool
IsVectorObject(HandleValue v)
bool
js::IsVectorObject(HandleValue v)
{
if (!v.isObject())
return false;
@ -56,6 +56,27 @@ IsVectorObject(HandleValue v)
return typeRepr.as<X4TypeDescr>().type() == V::type;
}
template bool js::IsVectorObject<Int32x4>(HandleValue v);
template bool js::IsVectorObject<Float32x4>(HandleValue v);
template<typename V>
bool
js::ToSimdConstant(JSContext *cx, HandleValue v, jit::SimdConstant *out)
{
typedef typename V::Elem Elem;
if (!IsVectorObject<V>(v)) {
JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_SIMD_NOT_A_VECTOR);
return false;
}
Elem *mem = reinterpret_cast<Elem *>(v.toObject().as<TypedObject>().typedMem());
*out = jit::SimdConstant::CreateX4(mem);
return true;
}
template bool js::ToSimdConstant<Int32x4>(JSContext *cx, HandleValue v, jit::SimdConstant *out);
template bool js::ToSimdConstant<Float32x4>(JSContext *cx, HandleValue v, jit::SimdConstant *out);
template<typename Elem>
static Elem
TypedObjectMemory(HandleValue v)

View File

@ -166,6 +166,12 @@ struct Int32x4 {
template<typename V>
JSObject *CreateSimd(JSContext *cx, typename V::Elem *data);
template<typename V>
bool IsVectorObject(HandleValue v);
template<typename V>
bool ToSimdConstant(JSContext *cx, HandleValue v, jit::SimdConstant *out);
#define DECLARE_SIMD_FLOAT32X4_FUNCTION(Name, Func, Operands, Flags) \
extern bool \
simd_float32x4_##Name(JSContext *cx, unsigned argc, Value *vp);

View File

@ -1974,6 +1974,19 @@ EvalReturningScope(JSContext *cx, unsigned argc, jsval *vp)
return true;
}
static bool
IsSimdAvailable(JSContext *cx, unsigned argc, Value *vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
#ifdef JS_CODEGEN_NONE
bool available = false;
#else
bool available = cx->jitSupportsSimd();
#endif
args.rval().set(BooleanValue(available));
return true;
}
static const JSFunctionSpecWithHelp TestingFunctions[] = {
JS_FN_HELP("gc", ::GC, 0, 0,
"gc([obj] | 'compartment')",
@ -2157,6 +2170,10 @@ static const JSFunctionSpecWithHelp TestingFunctions[] = {
" Returns whether asm.js compilation is currently available or whether it is disabled\n"
" (e.g., by the debugger)."),
JS_FN_HELP("isSimdAvailable", IsSimdAvailable, 0, 0,
"isSimdAvailable",
" Returns true if SIMD extensions are supported on this platform."),
JS_FN_HELP("getJitCompilerOptions", GetJitCompilerOptions, 0, 0,
"getCompilerOptions()",
"Return an object describing some of the JIT compiler options.\n"),

View File

@ -144,7 +144,7 @@ NativeRegExpMacroAssembler::GenerateCode(JSContext *cx)
#endif
size_t frameSize = sizeof(FrameData) + num_registers_ * sizeof(void *);
frameSize = JS_ROUNDUP(frameSize + masm.framePushed(), StackAlignment) - masm.framePushed();
frameSize = JS_ROUNDUP(frameSize + masm.framePushed(), ABIStackAlignment) - masm.framePushed();
// Actually emit code to start a new stack frame.
masm.reserveStack(frameSize);

View File

@ -108,8 +108,8 @@ function assertAsmLinkFail(f)
assertEq(isAsmJSFunction(ret), false);
if (typeof ret === 'object')
for (f of ret)
assertEq(isAsmJSFunction(f), false);
for (var i in ret)
assertEq(isAsmJSFunction(ret[i]), false);
// Turn on warnings-as-errors
var oldOpts = options("werror");

View File

@ -0,0 +1,657 @@
load(libdir + "asm.js");
var heap = new ArrayBuffer(4096);
// Set to true to see more JS debugging spew
const DEBUG = false;
if (!isSimdAvailable() || typeof SIMD === 'undefined') {
DEBUG && print("won't run tests as simd extensions aren't activated yet");
quit(0);
}
const I32 = 'var i4 = glob.SIMD.int32x4;'
const I32A = 'var i4a = i4.add;'
const I32S = 'var i4s = i4.sub;'
const F32 = 'var f4 = glob.SIMD.float32x4;'
const F32A = 'var f4a = f4.add;'
const F32S = 'var f4s = f4.sub;'
const F32M = 'var f4m = f4.mul;'
const F32D = 'var f4d = f4.div;'
const FROUND = 'var f32=glob.Math.fround;'
const INT32_MAX = Math.pow(2, 31) - 1;
const INT32_MIN = INT32_MAX + 1 | 0;
const assertEqFFI = {assertEq:assertEq};
function assertEqX4(real, expected) {
assertEq(real.x, expected[0]);
assertEq(real.y, expected[1]);
assertEq(real.z, expected[2]);
assertEq(real.w, expected[3]);
}
function CheckI4(header, code, expected) {
// code needs to contain a local called x
header = USE_ASM + I32 + header;
var lanes = ['x', 'y', 'z', 'w'];
for (var i = 0; i < 4; ++i) {
var lane = lanes[i];
assertEq(asmLink(asmCompile('glob', header + ';function f() {' + code + ';return x.' + lane + '|0} return f'), this)(), expected[i]);
}
}
function CheckF4(header, code, expected) {
// code needs to contain a local called x
var lanes = ['x', 'y', 'z', 'w'];
header = USE_ASM + F32 + header;
for (var i = 0; i < 4; ++i) {
var lane = lanes[i];
assertEq(asmLink(asmCompile('glob', header + ';function f() {' + code + ';return +x.' + lane + '} return f'), this)(), Math.fround(expected[i]));
}
}
try {
// 1. Constructors
// 1.1 Compilation
assertAsmTypeFail('glob', USE_ASM + "var i4 = int32x4 ; return {}") ;
assertAsmTypeFail('glob', USE_ASM + "var i4 = glob.int32x4 ; return {}") ;
assertAsmTypeFail('glob', USE_ASM + "var i4 = glob.globglob.int32x4 ; return {}") ;
assertAsmTypeFail('glob', USE_ASM + "var i4 = glob.Math.int32x4 ; return {}") ;
assertAsmTypeFail('glob', USE_ASM + "var herd = glob.SIMD.ponyX4 ; return {}") ;
// 1.2 Linking
assertAsmLinkAlwaysFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: 42});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: Math.fround});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {int32x4: 42}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {int32x4: Math.fround}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {int32x4: new Array}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {int32x4: SIMD.float32x4}});
[Type, int32] = [TypedObject.StructType, TypedObject.int32];
var MyStruct = new Type({'x': int32, 'y': int32, 'z': int32, 'w': int32});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {int32x4: MyStruct}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {int32x4: new MyStruct}});
assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {} return f"), {SIMD:{int32x4: SIMD.int32x4}})(), undefined);
assertAsmLinkFail(asmCompile('glob', USE_ASM + F32 + "return {}"), {SIMD: {float32x4: 42}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + F32 + "return {}"), {SIMD: {float32x4: Math.fround}});
assertAsmLinkFail(asmCompile('glob', USE_ASM + F32 + "return {}"), {SIMD: {float32x4: new Array}});
assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {} return f"), {SIMD:{float32x4: SIMD.float32x4}})(), undefined);
// 1.3 Correctness
// 1.3.1 Local variables declarations
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Int32x4(1,2,3,4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4();} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2, 3);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2, 3, 4.0);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2.0, 3, 4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4a(1,2,3,4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,2+2|0);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3," + (INT32_MIN - 1) + ");} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(i4(1,2,3,4));} return f");
assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,4);} return f"), this)(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3," + (INT32_MAX + 1) + ");} return f"), this)(), undefined);
assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4();} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1.,2.,3.);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1.,2.,f32(3.),4.);} return f");
assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1.,2.,3.,4.);} return f"), this)(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4);} return f"), this)(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3," + (INT32_MIN - 1) + ");} return f"), this)(), undefined);
assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3," + (INT32_MAX + 1) + ");} return f"), this)(), undefined);
// Places where NumLit can creep in
assertAsmTypeFail('glob', USE_ASM + I32 + "function f(i) {i=i|0; var z=0; switch(i|0) {case i4(1,2,3,4): z=1; break; default: z=2; break;}} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f(i) {i=i|0; var z=0; return i * i4(1,2,3,4) | 0;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f(i) {var x=i4(1,2,3,i4(4,5,6,7))} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "function f(i) {var x=i4(1,2,3,f4(4,5,6,7))} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "function f(i) {var x=f4(1,2,3,i4(4,5,6,7))} return f");
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {return i4(1,2,3,4);} return f"), this)(), [1, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {return i4(i4(1,2,3,4));} return f"), this)(), [1, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {return f4(1,2,3,4);} return f"), this)(), [1, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {return f4(f4(1,2,3,4));} return f"), this)(), [1, 2, 3, 4]);
// Int32x4 ctor should accept int?
assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + I32 + "var i32=new glob.Int32Array(heap); function f(i) {i=i|0; return i4(i4(i32[i>>2], 2, 3, 4))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [0, 2, 3, 4]);
// Float32x4 ctor should accept floatish, i.e. float || float? || floatish
assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + F32 + "var f32=new glob.Float32Array(heap); function f(i) {i=i|0; return f4(f4(f32[i>>2], 2, 3, 4))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [NaN, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + "var f32=glob.Math.fround; function f(i) {i=i|0; return f4(f4(f32(1) + f32(2), 2, 3, 4))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [3, 2, 3, 4]);
// 1.3.2 Reading values out of lanes
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=1; return x.y | 0;} return f");
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=1; return (x + x).y | 0;} return f");
assertAsmTypeFail('glob', USE_ASM + "function f() {var x=1.; return x.y | 0;} return f");
assertAsmTypeFail('glob', USE_ASM + "var f32=glob.Math.fround;" + I32 + "function f() {var x=f32(1); return x.y | 0;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,4); return x.length|0;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,4).y; return x|0;} return f");
CheckI4('', 'var x=i4(0,0,0,0)', [0,0,0,0]);
CheckI4('', 'var x=i4(1,2,3,4)', [1,2,3,4]);
CheckI4('', 'var x=i4(' + INT32_MIN + ',2,3,' + INT32_MAX + ')', [INT32_MIN,2,3,INT32_MAX]);
CheckI4('', 'var x=i4(1,2,3,4); var y=i4(5,6,7,8)', [1,2,3,4]);
CheckI4('', 'var a=1; var b=i4(9,8,7,6); var c=13.37; var x=i4(1,2,3,4); var y=i4(5,6,7,8)', [1,2,3,4]);
CheckI4('', 'var y=i4(5,6,7,8); var x=i4(1,2,3,4)', [1,2,3,4]);
CheckF4('', 'var x=f4(' + INT32_MAX + ', 2, 3, ' + INT32_MIN + ')', [INT32_MAX, 2, 3, INT32_MIN]);
CheckF4('', 'var x=f4(' + (INT32_MAX + 1) + ', 2, 3, 4)', [INT32_MAX + 1, 2, 3, 4]);
CheckF4('', 'var x=f4(1.3, 2.4, 3.5, 98.76)', [1.3, 2.4, 3.5, 98.76]);
CheckF4('', 'var x=f4(13.37, 2., 3., -0)', [13.37, 2, 3, -0]);
// 1.3.3. Variable assignments
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4();} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1.0, 2, 3, 4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2.0, 3, 4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3.0, 4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3, 4.0);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3, x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); var c=4.0; x=i4(1, 2, 3, +c);} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + I32 + "var i32=new glob.Int32Array(heap); function f() {var x=i4(1,2,3,4); i32[0] = x;} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + I32 + "var i32=new glob.Int32Array(heap); function f() {var x=i4(1,2,3,4); x = i32[0];} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + F32 + "var f32=new glob.Float32Array(heap); function f() {var x=f4(1,2,3,4); f32[0] = x;} return f");
assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + F32 + "var f32=new glob.Int32Array(heap); function f() {var x=f4(1,2,3,4); x = f32[0];} return f");
CheckI4('', 'var x=i4(1,2,3,4); x=i4(5,6,7,8)', [5, 6, 7, 8]);
CheckI4('', 'var x=i4(1,2,3,4); var c=6; x=i4(5,c|0,7,8)', [5, 6, 7, 8]);
CheckI4('', 'var x=i4(8,7,6,5); x=i4(x.w|0,x.z|0,x.y|0,x.x|0)', [5, 6, 7, 8]);
CheckF4(FROUND, 'var x=f4(1,2,3,4); var y=f32(7.); x=f4(5,6,y,8)', [5, 6, 7, 8]);
CheckF4(FROUND, 'var x=f4(1,2,3,4); x=f4(f32(5.),6.,7.,8.)', [5, 6, 7, 8]);
CheckF4(FROUND, 'var x=f4(1,2,3,4); x=f4(f32(5),6,7,8)', [5, 6, 7, 8]);
CheckF4(FROUND, 'var x=f4(1,2,3,4); x=f4(f32(5.),f32(6.),f32(7.),f32(8.))', [5, 6, 7, 8]);
CheckF4('', 'var x=f4(1.,2.,3.,4.); x=f4(5.,6.,7.,8.)', [5, 6, 7, 8]);
CheckF4('', 'var x=f4(1.,2.,3.,4.); x=f4(1,2,3,4)', [1, 2, 3, 4]);
CheckF4(FROUND, 'var x=f4(1.,2.,3.,4.); var y=f32(7.); x=f4(9, 4, 2, 1)', [9, 4, 2, 1]);
CheckF4('', 'var x=f4(8.,7.,6.,5.); x=f4(x.w, x.z, x.y, x.x)', [5, 6, 7, 8]);
// 1.3.4 Return values
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=1; return i4(x)} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=1; return i4(x + x)} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=1.; return i4(x)} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + FROUND + "function f() {var x=f32(1.); return i4(x)} return f");
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,4); return i4(x)} return f"), this)(), [1,2,3,4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); return f4(x)} return f"), this)(), [1,2,3,4]);
// 1.3.5 Coerce and pass arguments
assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {x=i4();} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {x=i4(1,2,3,4);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x,y) {x=i4(y);y=+y} return f");
var i32x4 = SIMD.int32x4(1, 3, 3, 7);
assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f(x) {x=i4(x)} return f"), this)(i32x4), undefined);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + "function f(x) {x=i4(x); return i4(x);} return f"), this)(i32x4), [1,3,3,7]);
var f32x4 = SIMD.float32x4(13.37, 42.42, -0, NaN);
assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f(x) {x=f4(x)} return f"), this)(f32x4), undefined);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + "function f(x) {x=f4(x); return f4(x);} return f"), this)(f32x4),
[Math.fround(13.37), Math.fround(42.42), -0, NaN]);
function assertCaught(f) {
var caught = false;
try {
f.apply(null, Array.prototype.slice.call(arguments, 1));
} catch (e) {
DEBUG && print('Assert caught: ', e, '\n', e.stack);
assertEq(e instanceof TypeError, true);
caught = true;
}
assertEq(caught, true);
}
var f = asmLink(asmCompile('glob', USE_ASM + F32 + "function f(x) {x=f4(x); return f4(x);} return f"), this);
assertCaught(f);
assertCaught(f, 1);
assertCaught(f, {});
assertCaught(f, "I sincerely am a SIMD typed object.");
assertCaught(f, SIMD.int32x4(1,2,3,4));
var f = asmLink(asmCompile('glob', USE_ASM + I32 + "function f(x) {x=i4(x); return i4(x);} return f"), this);
assertCaught(f);
assertCaught(f, 1);
assertCaught(f, {});
assertCaught(f, "I sincerely am a SIMD typed object.");
assertCaught(f, SIMD.float32x4(4,3,2,1));
// 1.3.6 Globals
// 1.3.6.1 Local globals
// Read
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4; x=g|0;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4.; x=+g;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); var f32=glob.Math.fround; function f() {var x=f32(4.); x=f32(g);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4; x=g|0;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4.; x=+g;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); var f32=glob.Math.fround; function f() {var x=f32(4.); x=f32(g);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + I32 + "var g=f4(1., 2., 3., 4.); function f() {var x=i4(1,2,3,4); x=i4(g);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "var g=i4(1,2,3,4); function f() {var x=f4(1.,2.,3.,4.); x=f4(g);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=0; function f() {var x=i4(1,2,3,4); x=g|0;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=0.; function f() {var x=i4(1,2,3,4); x=+g;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var f32=glob.Math.fround; var g=f32(0.); function f() {var x=i4(1,2,3,4); x=f32(g);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=0; function f() {var x=f4(0.,0.,0.,0.); x=g|0;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=0.; function f() {var x=f4(0.,0.,0.,0.); x=+g;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var f32=glob.Math.fround; var g=f32(0.); function f() {var x=f4(0.,0.,0.,0.); x=f32(g);} return f");
CheckI4('var x=i4(1,2,3,4)', '', [1, 2, 3, 4]);
CheckI4('var _=42; var h=i4(5,5,5,5); var __=13.37; var x=i4(4,7,9,2);', '', [4,7,9,2]);
CheckF4('var x=f4(1.,2.,3.,4.)', '', [1, 2, 3, 4]);
CheckF4('var _=42; var h=f4(5.,5.,5.,5.); var __=13.37; var x=f4(4.,13.37,9.,-0.);', '', [4, 13.37, 9, -0]);
CheckF4('var x=f4(1,2,3,4)', '', [1, 2, 3, 4]);
// Write
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4; g=x|0;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4.; g=+x;} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); var f32=glob.Math.fround; function f() {var x=f32(4.); g=f32(x);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4; g=x|0;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4.; g=+x;} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); var f32=glob.Math.fround; function f() {var x=f32(4.); g=f32(x);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + I32 + "var g=f4(1., 2., 3., 4.); function f() {var x=i4(1,2,3,4); g=i4(x);} return f");
assertAsmTypeFail('glob', USE_ASM + F32 + I32 + "var g=f4(1., 2., 3., 4.); function f() {var x=i4(1,2,3,4); g=f4(x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "var g=i4(1,2,3,4); function f() {var x=f4(1.,2.,3.,4.); g=f4(x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "var g=i4(1,2,3,4); function f() {var x=f4(1.,2.,3.,4.); g=i4(x);} return f");
CheckI4('var x=i4(0,0,0,0);', 'x=i4(1,2,3,4)', [1,2,3,4]);
CheckF4('var x=f4(0.,0.,0.,0.);', 'x=f4(5.,3.,4.,2.)', [5,3,4,2]);
CheckI4('var x=i4(0,0,0,0); var y=42; var z=3.9; var w=13.37', 'x=i4(1,2,3,4); y=24; z=4.9; w=23.10;', [1,2,3,4]);
CheckF4('var x=f4(0,0,0,0); var y=42; var z=3.9; var w=13.37', 'x=f4(1,2,3,4); y=24; z=4.9; w=23.10;', [1,2,3,4]);
// 1.3.6.2 Imported globals
// Read
var int32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + I32 + "var g=i4(ffi.g); function f() {return i4(g)} return f"), this, {g: SIMD.int32x4(1,2,3,4)})();
assertEq(int32x4.x, 1);
assertEq(int32x4.y, 2);
assertEq(int32x4.z, 3);
assertEq(int32x4.w, 4);
for (var v of [1, {}, "totally legit SIMD variable", SIMD.float32x4(1,2,3,4)])
assertCaught(asmCompile('glob', 'ffi', USE_ASM + I32 + "var g=i4(ffi.g); function f() {return i4(g)} return f"), this, {g: v});
var float32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + F32 + "var g=f4(ffi.g); function f() {return f4(g)} return f"), this, {g: SIMD.float32x4(1,2,3,4)})();
assertEq(float32x4.x, 1);
assertEq(float32x4.y, 2);
assertEq(float32x4.z, 3);
assertEq(float32x4.w, 4);
for (var v of [1, {}, "totally legit SIMD variable", SIMD.int32x4(1,2,3,4)])
assertCaught(asmCompile('glob', 'ffi', USE_ASM + F32 + "var g=f4(ffi.g); function f() {return f4(g)} return f"), this, {g: v});
// Write
var int32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + I32 + "var g=i4(ffi.g); function f() {g=i4(4,5,6,7); return i4(g)} return f"), this, {g: SIMD.int32x4(1,2,3,4)})();
assertEq(int32x4.x, 4);
assertEq(int32x4.y, 5);
assertEq(int32x4.z, 6);
assertEq(int32x4.w, 7);
var float32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + F32 + "var g=f4(ffi.g); function f() {g=f4(4.,5.,6.,7.); return f4(g)} return f"), this, {g: SIMD.float32x4(1,2,3,4)})();
assertEq(float32x4.x, 4);
assertEq(float32x4.y, 5);
assertEq(float32x4.z, 6);
assertEq(float32x4.w, 7);
// 2. SIMD operations
// 2.1 Compilation
assertAsmTypeFail('glob', USE_ASM + "var add = int32x4.add; return {}");
assertAsmTypeFail('glob', USE_ASM + I32A + I32 + "return {}");
assertAsmTypeFail('glob', USE_ASM + "var g = 3; var add = g.add; return {}");
assertAsmTypeFail('glob', USE_ASM + I32 + "var func = i4.doTheHarlemShake; return {}");
assertAsmTypeFail('glob', USE_ASM + I32 + "var div = i4.div; return {}");
assertAsmTypeFail('glob', USE_ASM + "var f32 = glob.Math.fround; var i4a = f32.add; return {}");
// 2.2 Linking
assertAsmLinkAlwaysFail(asmCompile('glob', USE_ASM + I32 + I32A + "function f() {} return f"), {});
assertAsmLinkAlwaysFail(asmCompile('glob', USE_ASM + I32 + I32A + "function f() {} return f"), {SIMD: Math.fround});
var oldInt32x4Add = SIMD.int32x4.add;
var code = asmCompile('glob', USE_ASM + I32 + I32A + "return {}");
for (var v of [42, Math.fround, SIMD.float32x4.add, function(){}, SIMD.int32x4.mul]) {
SIMD.int32x4.add = v;
assertAsmLinkFail(code, {SIMD: {int32x4: SIMD.int32x4}});
}
SIMD.int32x4.add = oldInt32x4Add; // finally replace the add function with the original one
assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + I32A + "function f() {} return f"), {SIMD: {int32x4: SIMD.int32x4}})(), undefined);
// 2.3. Binary arithmetic operations
// 2.3.1 Additions
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a();} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x, x, x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(13, 37);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(23.10, 19.89);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x, 42);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x, 13.37);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); var y=4; x=i4a(x, y);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(0,0,0,0); var y=4; x=i4a(y, y);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(0,0,0,0); var y=4; y=i4a(x, x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); x=i4a(x, y);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=i4a(x, y);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=i4a(x, x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + F32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=f4a(x, x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + F32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=f4a(x, y);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + F32 + F32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); x=f4a(y, y);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + 'function f() {var x=i4(1,2,3,4); var y=0; y=i4(x,x)|0} return f');
assertAsmTypeFail('glob', USE_ASM + I32 + I32A + 'function f() {var x=i4(1,2,3,4); var y=0.; y=+i4(x,x)} return f');
CheckI4(I32A, 'var z=i4(1,2,3,4); var y=i4(0,1,0,3); var x=i4(0,0,0,0); x=i4a(z,y)', [1,3,3,7]);
CheckI4(I32A, 'var x=i4(2,3,4,5); var y=i4(0,1,0,3); x=i4a(x,y)', [2,4,4,8]);
CheckI4(I32A, 'var x=i4(1,2,3,4); x=i4a(x,x)', [2,4,6,8]);
CheckI4(I32A, 'var x=i4(' + INT32_MAX + ',2,3,4); var y=i4(1,1,0,3); x=i4a(x,y)', [INT32_MIN,3,3,7]);
CheckI4(I32A, 'var x=i4(' + INT32_MAX + ',2,3,4); var y=i4(1,1,0,3); x=i4(i4a(x,y))', [INT32_MIN,3,3,7]);
CheckF4(F32A, 'var x=f4(1,2,3,4); x=f4a(x,x)', [2,4,6,8]);
CheckF4(F32A, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4a(x,y)', [5,5,8,6]);
CheckF4(F32A, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4a(x,y)', [Math.fround(13.37) + 4,5,8,6]);
CheckF4(F32A, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4(f4a(x,y))', [Math.fround(13.37) + 4,5,8,6]);
// 2.3.2. Subtracts
CheckI4(I32S, 'var x=i4(1,2,3,4); var y=i4(-1,1,0,2); x=i4s(x,y)', [2,1,3,2]);
CheckI4(I32S, 'var x=i4(5,4,3,2); var y=i4(1,2,3,4); x=i4s(x,y)', [4,2,0,-2]);
CheckI4(I32S, 'var x=i4(1,2,3,4); x=i4s(x,x)', [0,0,0,0]);
CheckI4(I32S, 'var x=i4(' + INT32_MIN + ',2,3,4); var y=i4(1,1,0,3); x=i4s(x,y)', [INT32_MAX,1,3,1]);
CheckI4(I32S, 'var x=i4(' + INT32_MIN + ',2,3,4); var y=i4(1,1,0,3); x=i4(i4s(x,y))', [INT32_MAX,1,3,1]);
CheckF4(F32S, 'var x=f4(1,2,3,4); x=f4s(x,x)', [0,0,0,0]);
CheckF4(F32S, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4s(x,y)', [-3,-1,-2,2]);
CheckF4(F32S, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4s(x,y)', [Math.fround(13.37) - 4,-1,-2,2]);
CheckF4(F32S, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4(f4s(x,y))', [Math.fround(13.37) - 4,-1,-2,2]);
// 2.3.3. Multiplications / Divisions
assertAsmTypeFail('glob', USE_ASM + I32 + "var f4m=i4.mul; function f() {} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var f4d=i4.div; function f() {} return f");
CheckF4(F32M, 'var x=f4(1,2,3,4); x=f4m(x,x)', [1,4,9,16]);
CheckF4(F32M, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4m(x,y)', [4,6,15,8]);
CheckF4(F32M, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4m(x,y)', [Math.fround(13.37) * 4,6,15,8]);
CheckF4(F32M, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4(f4m(x,y))', [Math.fround(13.37) * 4,6,15,8]);
// Test NaN
var f32x4 = SIMD.float32x4(0, NaN, -0, NaN);
var another = SIMD.float32x4(NaN, -1, -0, NaN);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + F32M + "function f(x, y) {x=f4(x); y=f4(y); x=f4m(x,y); return f4(x);} return f"), this)(f32x4, another), [NaN, NaN, 0, NaN]);
CheckF4(F32D, 'var x=f4(1,2,3,4); x=f4d(x,x)', [1,1,1,1]);
CheckF4(F32D, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4d(x,y)', [1/4,2/3,3/5,2]);
CheckF4(F32D, 'var x=f4(13.37,1,1,4); var y=f4(4,0,-0.,2); x=f4d(x,y)', [Math.fround(13.37) / 4,+Infinity,-Infinity,2]);
// Test NaN
var f32x4 = SIMD.float32x4(0, 0, -0, NaN);
var another = SIMD.float32x4(0, -0, 0, 0);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + F32D + "function f(x,y) {x=f4(x); y=f4(y); x=f4d(x,y); return f4(x);} return f"), this)(f32x4, another), [NaN, NaN, NaN, NaN]);
// Dead code
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + 'function f(){var x=i4(1,2,3,4); return i4(x); x=i4(5,6,7,8); return i4(x);} return f'), this)(), [1, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + 'function f(){var x=i4(1,2,3,4); var c=0; return i4(x); c=x.x|0; return i4(x);} return f'), this)(), [1, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + I32A + 'function f(){var x=i4(1,2,3,4); var c=0; return i4(x); x=i4a(x,x); return i4(x);} return f'), this)(), [1, 2, 3, 4]);
assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + I32S + 'function f(){var x=i4(1,2,3,4); var c=0; return i4(x); x=i4s(x,x); return i4(x);} return f'), this)(), [1, 2, 3, 4]);
// 3. Function calls
// 3.1. No math builtins
assertAsmTypeFail('glob', USE_ASM + I32 + "var fround=glob.Math.fround; function f() {var x=i4(1,2,3,4); return +fround(x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var sin=glob.Math.sin; function f() {var x=i4(1,2,3,4); return +sin(x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var ceil=glob.Math.ceil; function f() {var x=i4(1,2,3,4); return +ceil(x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var pow=glob.Math.pow; function f() {var x=i4(1,2,3,4); return +pow(1.0, x);} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var fround=glob.Math.fround; function f() {var x=i4(1,2,3,4); x=i4(fround(3));} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var sin=glob.Math.sin; function f() {var x=i4(1,2,3,4); x=i4(sin(3.0));} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var ceil=glob.Math.sin; function f() {var x=i4(1,2,3,4); x=i4(ceil(3.0));} return f");
assertAsmTypeFail('glob', USE_ASM + I32 + "var pow=glob.Math.pow; function f() {var x=i4(1,2,3,4); x=i4(pow(1.0, 2.0));} return f");
// 3.2. FFI calls
// Can't pass SIMD arguments to FFI
assertAsmTypeFail('glob', 'ffi', USE_ASM + I32 + "var func=ffi.func; function f() {var x=i4(1,2,3,4); func(x);} return f");
assertAsmTypeFail('glob', 'ffi', USE_ASM + F32 + "var func=ffi.func; function f() {var x=f4(1,2,3,4); func(x);} return f");
// Can't have FFI return SIMD values
assertAsmTypeFail('glob', 'ffi', USE_ASM + I32 + "var func=ffi.func; function f() {var x=i4(1,2,3,4); x=i4(func());} return f");
assertAsmTypeFail('glob', 'ffi', USE_ASM + F32 + "var func=ffi.func; function f() {var x=f4(1,2,3,4); x=f4(func());} return f");
// 3.3 Internal calls
// asm.js -> asm.js
// Retrieving values from asm.js
var code = USE_ASM + I32 + I32A + `
var check = ffi.check;
function g() {
var i = 0;
var y = i4(0,0,0,0);
var tmp = i4(0,0,0,0); var z = i4(1,1,1,1);
var w = i4(5,5,5,5);
for (; (i|0) < 30; i = i + 1 |0)
y = i4a(z, y);
y = i4a(w, y);
check(y.x | 0, y.y | 0, y.z | 0, y.w | 0);
return i4(y);
}
function f(x) {
x = i4(x);
var y = i4(0,0,0,0);
y = i4(g());
check(y.x | 0, y.y | 0, y.z | 0, y.w | 0);
return i4(x);
}
return f;
`;
var v4 = SIMD.int32x4(1,2,3,4);
function check(x, y, z, w) {
assertEq(x, 35);
assertEq(y, 35);
assertEq(z, 35);
assertEq(w, 35);
}
var ffi = {check};
assertEqX4(asmLink(asmCompile('glob', 'ffi', code), this, ffi)(v4), [1,2,3,4]);
// Passing arguments from asm.js to asm.js
// TODO make this code look better with templatized strings
var code = USE_ASM + I32 + I32A + `
var assertEq = ffi.assertEq;
function internal([args]) {
[coerc]
assertEq([last].x | 0, [i] | 0);
assertEq([last].y | 0, [i] + 1 |0);
assertEq([last].z | 0, [i] + 2 |0);
assertEq([last].w | 0, [i] + 3 |0);
}
function external() {
[decls]
internal([args]);
}
return external;
`;
var ffi = {assertEq};
var args = '';
var decls = '';
var coerc = '';
for (var i = 1; i < 10; ++i) {
var j = i;
args += ((i > 1) ? ', ':'') + 'x' + i;
decls += 'var x' + i + ' = i4(' + j++ + ', ' + j++ + ', ' + j++ + ', ' + j++ + ');\n';
coerc += 'x' + i + ' = i4(x' + i + ');\n';
last = 'x' + i;
var c = code.replace(/\[args\]/g, args)
.replace(/\[last\]/g, last)
.replace(/\[decls\]/i, decls)
.replace(/\[coerc\]/i, coerc)
.replace(/\[i\]/g, i);
asmLink(asmCompile('glob', 'ffi', c), this, ffi)();
}
// Stress-test for register spilling code and stack depth checks
var code = `
"use asm";
var i4 = glob.SIMD.int32x4;
var i4a = i4.add;
var assertEq = ffi.assertEq;
function g() {
var x = i4(1,2,3,4);
var y = i4(2,3,4,5);
var z = i4(0,0,0,0);
z = i4a(x, y);
assertEq(z.x | 0, 3);
assertEq(z.y | 0, 5);
assertEq(z.z | 0, 7);
assertEq(z.w | 0, 9);
}
return g
`
asmLink(asmCompile('glob', 'ffi', code), this, assertEqFFI)();
(function() {
var code = `
"use asm";
var i4 = glob.SIMD.int32x4;
var i4a = i4.add;
var assertEq = ffi.assertEq;
var one = ffi.one;
// Function call with arguments on the stack (1 on x64, 3 on x86)
function h(x1, x2, x3, x4, x5, x6, x7) {
x1=x1|0
x2=x2|0
x3=x3|0
x4=x4|0
x5=x5|0
x6=x6|0
x7=x7|0
return x1 + x2 |0
}
function g() {
var x = i4(1,2,3,4);
var y = i4(2,3,4,5);
var z = i4(0,0,0,0);
var w = 1;
z = i4a(x, y);
w = w + (one() | 0) | 0;
assertEq(z.x | 0, 3);
assertEq(z.y | 0, 5);
assertEq(z.z | 0, 7);
assertEq(z.w | 0, 9);
h(1, 2, 3, 4, 42, 42, 42)|0
return w | 0;
}
return g
`;
asmLink(asmCompile('glob', 'ffi', code), this, {assertEq: assertEq, one: () => 1})();
})();
// Function calls with mixed arguments on the stack (SIMD and scalar). In the
// worst case (x64), we have 6 int arg registers and 8 float registers.
(function() {
var code = `
"use asm";
var i4 = glob.SIMD.int32x4;
function h(
// In registers:
gpr1, gpr2, gpr3, gpr4, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8,
// On the stack:
sint1, ssimd1, sdouble1, ssimd2, sint2, sint3, sint4, ssimd3, sdouble2
)
{
gpr1=gpr1|0;
gpr2=gpr2|0;
gpr3=gpr3|0;
gpr4=gpr4|0;
xmm1=+xmm1;
xmm2=+xmm2;
xmm3=+xmm3;
xmm4=+xmm4;
xmm5=+xmm5;
xmm6=+xmm6;
xmm7=+xmm7;
xmm8=+xmm8;
sint1=sint1|0;
ssimd1=i4(ssimd1);
sdouble1=+sdouble1;
ssimd2=i4(ssimd2);
sint2=sint2|0;
sint3=sint3|0;
sint4=sint4|0;
ssimd3=i4(ssimd3);
sdouble2=+sdouble2;
return (ssimd1.x|0) + (ssimd2.y|0) + (ssimd3.z|0) + sint2 + gpr3 | 0;
}
function g() {
var simd1 = i4(1,2,3,4);
var simd2 = i4(5,6,7,8);
var simd3 = i4(9,10,11,12);
return h(1, 2, 3, 4,
1., 2., 3., 4., 5., 6., 7., 8.,
5, simd1, 9., simd2, 6, 7, 8, simd3, 10.) | 0;
}
return g
`;
assertEq(asmLink(asmCompile('glob', 'ffi', code), this)(), 1 + 6 + 11 + 6 + 3);
})();
// Check that the interrupt callback doesn't erase high components of simd
// registers:
// WARNING: must be the last test in this file
(function() {
var iters = 2000000;
var code = `
"use asm";
var i4 = glob.SIMD.int32x4;
var i4a = i4.add;
function _() {
var i = 0;
var n = i4(0,0,0,0);
var one = i4(1,1,1,1);
for (; (i>>>0) < ` + iters + `; i=(i+1)>>>0) {
n = i4a(n, one);
}
return i4(n);
}
return _;`;
// This test relies on the fact that setting the timeout will call the
// interrupt callback at fixed intervals, even before the timeout.
timeout(1000);
var x4 = asmLink(asmCompile('glob', code), this)();
assertEq(x4.x, iters);
assertEq(x4.y, iters);
assertEq(x4.z, iters);
assertEq(x4.w, iters);
})();
} catch(e) {
print('Stack:', e.stack)
print('Error:', e)
throw e;
}

View File

@ -8822,12 +8822,15 @@ CodeGenerator::visitAsmJSCall(LAsmJSCall *ins)
if (mir->spIncrement())
masm.freeStack(mir->spIncrement());
JS_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % StackAlignment == 0);
JS_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % AsmJSStackAlignment == 0);
#ifdef DEBUG
static_assert(AsmJSStackAlignment >= ABIStackAlignment,
"The asm.js stack alignment should subsume the ABI-required alignment");
static_assert(AsmJSStackAlignment % ABIStackAlignment == 0,
"The asm.js stack alignment should subsume the ABI-required alignment");
Label ok;
JS_ASSERT(IsPowerOfTwo(StackAlignment));
masm.branchTestPtr(Assembler::Zero, StackPointer, Imm32(StackAlignment - 1), &ok);
masm.branchTestPtr(Assembler::Zero, StackPointer, Imm32(AsmJSStackAlignment - 1), &ok);
masm.breakpoint();
masm.bind(&ok);
#endif
@ -9066,7 +9069,7 @@ CodeGenerator::visitAsmJSInterruptCheck(LAsmJSInterruptCheck *lir)
masm.branch32(Assembler::Equal, scratch, Imm32(0), &rejoin);
{
uint32_t stackFixup = ComputeByteAlignment(masm.framePushed() + sizeof(AsmJSFrame),
StackAlignment);
ABIStackAlignment);
masm.reserveStack(stackFixup);
masm.call(lir->funcDesc(), lir->interruptExit());
masm.freeStack(stackFixup);

View File

@ -3214,3 +3214,9 @@ jit::JitSupportsFloatingPoint()
{
return js::jit::MacroAssembler::SupportsFloatingPoint();
}
bool
jit::JitSupportsSimd()
{
return js::jit::MacroAssembler::SupportsSimd();
}

View File

@ -207,6 +207,7 @@ bool UpdateForDebugMode(JSContext *maybecx, JSCompartment *comp,
AutoDebugModeInvalidation &invalidate);
bool JitSupportsFloatingPoint();
bool JitSupportsSimd();
} // namespace jit
} // namespace js

View File

@ -1075,7 +1075,7 @@ uint8_t *
alignDoubleSpillWithOffset(uint8_t *pointer, int32_t offset)
{
uint32_t address = reinterpret_cast<uint32_t>(pointer);
address = (address - offset) & ~(StackAlignment - 1);
address = (address - offset) & ~(ABIStackAlignment - 1);
return reinterpret_cast<uint8_t *>(address);
}

View File

@ -1426,11 +1426,11 @@ class MacroAssembler : public MacroAssemblerSpecific
PopRegsInMask(liveRegs);
}
void assertStackAlignment() {
void assertStackAlignment(uint32_t alignment) {
#ifdef DEBUG
Label ok;
JS_ASSERT(IsPowerOfTwo(StackAlignment));
branchTestPtr(Assembler::Zero, StackPointer, Imm32(StackAlignment - 1), &ok);
JS_ASSERT(IsPowerOfTwo(alignment));
branchTestPtr(Assembler::Zero, StackPointer, Imm32(alignment - 1), &ok);
breakpoint();
bind(&ok);
#endif
@ -1508,10 +1508,10 @@ JSOpToCondition(JSOp op, bool isSigned)
}
static inline size_t
StackDecrementForCall(size_t bytesAlreadyPushed, size_t bytesToPush)
StackDecrementForCall(uint32_t alignment, size_t bytesAlreadyPushed, size_t bytesToPush)
{
return bytesToPush +
ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, StackAlignment);
ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, alignment);
}
} // namespace jit

View File

@ -1586,10 +1586,10 @@ class LIRGraph
// platform stack alignment requirement, and so that it's a multiple of
// the number of slots per Value.
uint32_t paddedLocalSlotCount() const {
// Round to StackAlignment, but also round to at least sizeof(Value) in
// case that's greater, because StackOffsetOfPassedArg rounds argument
// slots to 8-byte boundaries.
size_t Alignment = Max(size_t(StackAlignment), sizeof(Value));
// Round to ABIStackAlignment, but also round to at least sizeof(Value)
// in case that's greater, because StackOffsetOfPassedArg rounds
// argument slots to 8-byte boundaries.
size_t Alignment = Max(size_t(ABIStackAlignment), sizeof(Value));
return AlignBytes(localSlotCount(), Alignment);
}
size_t paddedLocalSlotsSize() const {

View File

@ -3553,7 +3553,7 @@ LIRGenerator::visitAsmJSParameter(MAsmJSParameter *ins)
if (abi.argInRegister())
return defineFixed(new(alloc()) LAsmJSParameter, ins, LAllocation(abi.reg()));
JS_ASSERT(IsNumberType(ins->type()));
JS_ASSERT(IsNumberType(ins->type()) || IsSimdType(ins->type()));
return defineFixed(new(alloc()) LAsmJSParameter, ins, LArgument(abi.offsetFromArgBase()));
}
@ -3566,6 +3566,8 @@ LIRGenerator::visitAsmJSReturn(MAsmJSReturn *ins)
lir->setOperand(0, useFixed(rval, ReturnFloat32Reg));
else if (rval->type() == MIRType_Double)
lir->setOperand(0, useFixed(rval, ReturnDoubleReg));
else if (IsSimdType(rval->type()))
lir->setOperand(0, useFixed(rval, ReturnSimdReg));
else if (rval->type() == MIRType_Int32)
lir->setOperand(0, useFixed(rval, ReturnReg));
else
@ -3582,7 +3584,7 @@ LIRGenerator::visitAsmJSVoidReturn(MAsmJSVoidReturn *ins)
bool
LIRGenerator::visitAsmJSPassStackArg(MAsmJSPassStackArg *ins)
{
if (IsFloatingPointType(ins->arg()->type())) {
if (IsFloatingPointType(ins->arg()->type()) || IsSimdType(ins->arg()->type())) {
JS_ASSERT(!ins->arg()->isEmittedAtUses());
return add(new(alloc()) LAsmJSPassStackArg(useRegisterAtStart(ins->arg())), ins);
}

View File

@ -493,6 +493,7 @@ MConstant::New(TempAllocator &alloc, const Value &v, types::CompilerConstraintLi
MConstant *
MConstant::NewAsmJS(TempAllocator &alloc, const Value &v, MIRType type)
{
JS_ASSERT(!IsSimdType(type));
MConstant *constant = new(alloc) MConstant(v, nullptr);
constant->setResultType(type);
return constant;

View File

@ -11187,7 +11187,7 @@ class MAsmJSLoadGlobalVar : public MNullaryInstruction
MAsmJSLoadGlobalVar(MIRType type, unsigned globalDataOffset, bool isConstant)
: globalDataOffset_(globalDataOffset), isConstant_(isConstant)
{
JS_ASSERT(IsNumberType(type));
JS_ASSERT(IsNumberType(type) || IsSimdType(type));
setResultType(type);
setMovable();
}

View File

@ -145,9 +145,8 @@ static MOZ_CONSTEXPR_VAR FloatRegister d15 = {FloatRegisters::d15, VFPRegister::
// load/store) operate in a single cycle when the address they are dealing with
// is 8 byte aligned. Also, the ARM abi wants the stack to be 8 byte aligned at
// function boundaries. I'm trying to make sure this is always true.
static const uint32_t StackAlignment = 8;
static const uint32_t ABIStackAlignment = 8;
static const uint32_t CodeAlignment = 8;
static const bool StackKeptAligned = true;
// This boolean indicates whether we support SIMD instructions flavoured for
// this architecture or not. Rather than a method in the LIRGenerator, it is
@ -156,6 +155,8 @@ static const bool StackKeptAligned = true;
static const bool SupportsSimd = false;
static const uint32_t SimdStackAlignment = 8;
static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
static const Scale ScalePointer = TimesFour;
class Instruction;
@ -1552,6 +1553,9 @@ class Assembler : public AssemblerShared
static bool SupportsFloatingPoint() {
return HasVFP();
}
static bool SupportsSimd() {
return js::jit::SupportsSimd;
}
protected:
void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) {

View File

@ -1880,7 +1880,7 @@ MacroAssemblerARMCompat::freeStack(Register amount)
void
MacroAssembler::PushRegsInMask(RegisterSet set, FloatRegisterSet simdSet)
{
JS_ASSERT(!SupportsSimd && simdSet.size() == 0);
JS_ASSERT(!SupportsSimd() && simdSet.size() == 0);
int32_t diffF = set.fpus().getPushSizeInBytes();
int32_t diffG = set.gprs().size() * sizeof(intptr_t);
@ -1909,7 +1909,7 @@ MacroAssembler::PushRegsInMask(RegisterSet set, FloatRegisterSet simdSet)
void
MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore, FloatRegisterSet simdSet)
{
JS_ASSERT(!SupportsSimd && simdSet.size() == 0);
JS_ASSERT(!SupportsSimd() && simdSet.size() == 0);
int32_t diffG = set.gprs().size() * sizeof(intptr_t);
int32_t diffF = set.fpus().getPushSizeInBytes();
const int32_t reservedG = diffG;
@ -3778,7 +3778,7 @@ MacroAssemblerARMCompat::setupUnalignedABICall(uint32_t args, Register scratch)
ma_mov(sp, scratch);
// Force sp to be aligned.
ma_and(Imm32(~(StackAlignment - 1)), sp, sp);
ma_and(Imm32(~(ABIStackAlignment - 1)), sp, sp);
ma_push(scratch);
}
@ -3937,7 +3937,7 @@ MacroAssemblerARMCompat::passABIArg(FloatRegister freg, MoveOp::Type type)
void MacroAssemblerARMCompat::checkStackAlignment()
{
#ifdef DEBUG
ma_tst(sp, Imm32(StackAlignment - 1));
ma_tst(sp, Imm32(ABIStackAlignment - 1));
breakpoint(NonZero);
#endif
}
@ -3956,11 +3956,11 @@ MacroAssemblerARMCompat::callWithABIPre(uint32_t *stackAdjust, bool callFromAsmJ
if (!dynamicAlignment_) {
*stackAdjust += ComputeByteAlignment(framePushed_ + *stackAdjust + alignmentAtPrologue,
StackAlignment);
ABIStackAlignment);
} else {
// sizeof(intptr_t) accounts for the saved stack pointer pushed by
// setupUnalignedABICall.
*stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), StackAlignment);
*stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), ABIStackAlignment);
}
reserveStack(*stackAdjust);

View File

@ -2117,7 +2117,7 @@ Simulator::softwareInterrupt(SimInstruction *instr)
int32_t saved_lr = get_register(lr);
intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction());
bool stack_aligned = (get_register(sp) & (StackAlignment - 1)) == 0;
bool stack_aligned = (get_register(sp) & (ABIStackAlignment - 1)) == 0;
if (!stack_aligned) {
fprintf(stderr, "Runtime call with unaligned stack!\n");
MOZ_CRASH();
@ -4258,7 +4258,7 @@ Simulator::call(uint8_t* entry, int argument_count, ...)
if (argument_count >= 4)
entry_stack -= (argument_count - 4) * sizeof(int32_t);
entry_stack &= ~StackAlignment;
entry_stack &= ~ABIStackAlignment;
// Store remaining arguments on stack, from low to high memory.
intptr_t *stack_argument = reinterpret_cast<intptr_t*>(entry_stack);

View File

@ -158,9 +158,8 @@ static MOZ_CONSTEXPR_VAR FloatRegister f30 = { FloatRegisters::f30, FloatRegiste
// MIPS CPUs can only load multibyte data that is "naturally"
// four-byte-aligned, sp register should be eight-byte-aligned.
static const uint32_t StackAlignment = 8;
static const uint32_t ABIStackAlignment = 8;
static const uint32_t CodeAlignment = 4;
static const bool StackKeptAligned = true;
// This boolean indicates whether we support SIMD instructions flavoured for
// this architecture or not. Rather than a method in the LIRGenerator, it is
@ -171,6 +170,8 @@ static const bool SupportsSimd = false;
// alignment requirements still need to be explored.
static const uint32_t SimdStackAlignment = 8;
static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
static const Scale ScalePointer = TimesFour;
// MIPS instruction types
@ -238,7 +239,6 @@ static const uint32_t RDMask = ((1 << RDBits) - 1) << RDShift;
static const uint32_t SAMask = ((1 << SABits) - 1) << SAShift;
static const uint32_t FunctionMask = ((1 << FunctionBits) - 1) << FunctionShift;
static const uint32_t RegMask = Registers::Total - 1;
static const uint32_t StackAlignmentMask = StackAlignment - 1;
static const uint32_t MAX_BREAK_CODE = 1024 - 1;

View File

@ -1574,7 +1574,7 @@ MacroAssembler::PushRegsInMask(RegisterSet set, FloatRegisterSet simdSet)
// Double values have to be aligned. We reserve extra space so that we can
// start writing from the first aligned location.
// We reserve a whole extra double so that the buffer has even size.
ma_and(SecondScratchReg, sp, Imm32(~(StackAlignment - 1)));
ma_and(SecondScratchReg, sp, Imm32(~(ABIStackAlignment - 1)));
reserveStack(diffF + sizeof(double));
for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
@ -1596,7 +1596,7 @@ MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore, FloatRe
// Read the buffer form the first aligned location.
ma_addu(SecondScratchReg, sp, Imm32(reservedF + sizeof(double)));
ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(StackAlignment - 1)));
ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(ABIStackAlignment - 1)));
for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
if (!ignore.has(*iter) && ((*iter).code() % 2 == 0))
@ -3158,7 +3158,7 @@ MacroAssemblerMIPSCompat::setupUnalignedABICall(uint32_t args, Register scratch)
// Force sp to be aligned
ma_subu(StackPointer, StackPointer, Imm32(sizeof(uint32_t)));
ma_and(StackPointer, StackPointer, Imm32(~(StackAlignment - 1)));
ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
as_sw(scratch, StackPointer, 0);
}
@ -3259,7 +3259,7 @@ MacroAssemblerMIPSCompat::checkStackAlignment()
{
#ifdef DEBUG
Label aligned;
as_andi(ScratchRegister, sp, StackAlignment - 1);
as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
as_break(MAX_BREAK_CODE);
bind(&aligned);
@ -3271,7 +3271,7 @@ MacroAssemblerMIPSCompat::alignStackPointer()
{
movePtr(StackPointer, SecondScratchReg);
subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
andPtr(Imm32(~(StackAlignment - 1)), StackPointer);
andPtr(Imm32(~(ABIStackAlignment - 1)), StackPointer);
storePtr(SecondScratchReg, Address(StackPointer, 0));
}
@ -3284,13 +3284,13 @@ MacroAssemblerMIPSCompat::restoreStackPointer()
void
MacroAssembler::alignFrameForICArguments(AfterICSaveLive &aic)
{
if (framePushed() % StackAlignment != 0) {
aic.alignmentPadding = StackAlignment - (framePushed() % StackAlignment);
if (framePushed() % ABIStackAlignment != 0) {
aic.alignmentPadding = ABIStackAlignment - (framePushed() % StackAlignment);
reserveStack(aic.alignmentPadding);
} else {
aic.alignmentPadding = 0;
}
MOZ_ASSERT(framePushed() % StackAlignment == 0);
MOZ_ASSERT(framePushed() % ABIStackAlignment == 0);
checkStackAlignment();
}
@ -3316,10 +3316,10 @@ MacroAssemblerMIPSCompat::callWithABIPre(uint32_t *stackAdjust, bool callFromAsm
uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
if (dynamicAlignment_) {
*stackAdjust += ComputeByteAlignment(*stackAdjust, StackAlignment);
*stackAdjust += ComputeByteAlignment(*stackAdjust, ABIStackAlignment);
} else {
*stackAdjust += ComputeByteAlignment(framePushed_ + alignmentAtPrologue + *stackAdjust,
StackAlignment);
ABIStackAlignment);
}
reserveStack(*stackAdjust);
@ -3444,7 +3444,7 @@ void
MacroAssemblerMIPSCompat::handleFailureWithHandler(void *handler)
{
// Reserve space for exception information.
int size = (sizeof(ResumeFromException) + StackAlignment) & ~(StackAlignment - 1);
int size = (sizeof(ResumeFromException) + ABIStackAlignment) & ~(ABIStackAlignment - 1);
ma_subu(StackPointer, StackPointer, Imm32(size));
ma_move(a0, StackPointer); // Use a0 since it is a first function argument

View File

@ -1871,7 +1871,7 @@ Simulator::softwareInterrupt(SimInstruction *instr)
intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction());
bool stack_aligned = (getRegister(sp) & (StackAlignment - 1)) == 0;
bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
if (!stack_aligned) {
fprintf(stderr, "Runtime call with unaligned stack!\n");
MOZ_CRASH();
@ -3405,7 +3405,7 @@ Simulator::call(uint8_t *entry, int argument_count, ...)
else
entry_stack = entry_stack - kCArgsSlotsSize;
entry_stack &= ~StackAlignment;
entry_stack &= ~ABIStackAlignment;
intptr_t *stack_argument = reinterpret_cast<intptr_t*>(entry_stack);

View File

@ -16,6 +16,7 @@ namespace jit {
static const bool SupportsSimd = false;
static const uint32_t SimdStackAlignment = 0;
static const uint32_t AsmJSStackAlignment = 0;
class Registers
{

View File

@ -68,9 +68,8 @@ static MOZ_CONSTEXPR_VAR ValueOperand JSReturnOperand(InvalidReg);
#error "Bad architecture"
#endif
static const uint32_t StackAlignment = 8;
static const uint32_t ABIStackAlignment = 4;
static const uint32_t CodeAlignment = 4;
static const bool StackKeptAligned = false;
static const Scale ScalePointer = TimesOne;

View File

@ -640,9 +640,9 @@ class CallSite : public CallSiteDesc
typedef Vector<CallSite, 0, SystemAllocPolicy> CallSiteVector;
// As an invariant across architectures, within asm.js code:
// $sp % StackAlignment = (sizeof(AsmJSFrame) + masm.framePushed) % StackAlignment
// $sp % AsmJSStackAlignment = (sizeof(AsmJSFrame) + masm.framePushed) % AsmJSStackAlignment
// Thus, AsmJSFrame represents the bytes pushed after the call (which occurred
// with a StackAlignment-aligned StackPointer) that are not included in
// with a AsmJSStackAlignment-aligned StackPointer) that are not included in
// masm.framePushed.
struct AsmJSFrame
{

View File

@ -926,6 +926,7 @@ class AssemblerX86Shared : public AssemblerShared
static bool HasSSE3() { return CPUInfo::IsSSE3Present(); }
static bool HasSSE41() { return CPUInfo::IsSSE41Present(); }
static bool SupportsFloatingPoint() { return CPUInfo::IsSSE2Present(); }
static bool SupportsSimd() { return CPUInfo::IsSSE2Present(); }
// The below cmpl methods switch the lhs and rhs when it invokes the
// macroassembler to conform with intel standard. When calling this

View File

@ -2978,6 +2978,21 @@ public:
m_formatter.prefix(PRE_SSE_F3);
m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
}
void movdqa_rm(XMMRegisterID src, const void* address)
{
spew("movdqa %s, %p",
nameFPReg(src), address);
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_MOVDQ_WdqVdq, (RegisterID)src, address);
}
void movaps_rm(XMMRegisterID src, const void* address)
{
spew("movaps %s, %p",
nameFPReg(src), address);
m_formatter.twoByteOp(OP2_MOVPS_WpsVps, (RegisterID)src, address);
}
#else
JmpSrc movsd_ripr(XMMRegisterID dst)
{
@ -3003,6 +3018,29 @@ public:
m_formatter.twoByteRipOp(OP2_MOVSD_WsdVsd, (RegisterID)src, 0);
return JmpSrc(m_formatter.size());
}
JmpSrc movss_rrip(XMMRegisterID src)
{
spew("movss %s, ?(%%rip)",
nameFPReg(src));
m_formatter.prefix(PRE_SSE_F3);
m_formatter.twoByteRipOp(OP2_MOVSD_WsdVsd, (RegisterID)src, 0);
return JmpSrc(m_formatter.size());
}
JmpSrc movdqa_rrip(XMMRegisterID src)
{
spew("movdqa %s, ?(%%rip)",
nameFPReg(src));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteRipOp(OP2_MOVDQ_WdqVdq, (RegisterID)src, 0);
return JmpSrc(m_formatter.size());
}
JmpSrc movaps_rrip(XMMRegisterID src)
{
spew("movaps %s, ?(%%rip)",
nameFPReg(src));
m_formatter.twoByteRipOp(OP2_MOVPS_WpsVps, (RegisterID)src, 0);
return JmpSrc(m_formatter.size());
}
#endif
void movaps_rr(XMMRegisterID src, XMMRegisterID dst)

View File

@ -69,26 +69,26 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, Mac
if (!gen->compilingAsmJS())
masm.setInstrumentation(&sps_);
// Since asm.js uses the system ABI which does not necessarily use a
// regular array where all slots are sizeof(Value), it maintains the max
// argument stack depth separately.
if (gen->compilingAsmJS()) {
// Since asm.js uses the system ABI which does not necessarily use a
// regular array where all slots are sizeof(Value), it maintains the max
// argument stack depth separately.
JS_ASSERT(graph->argumentSlotCount() == 0);
frameDepth_ += gen->maxAsmJSStackArgBytes();
// An MAsmJSCall does not align the stack pointer at calls sites but instead
// relies on the a priori stack adjustment (in the prologue) on platforms
// (like x64) which require the stack to be aligned.
if (StackKeptAligned || gen->performsCall() || gen->usesSimd()) {
unsigned alignmentAtCall = sizeof(AsmJSFrame) + frameDepth_;
unsigned firstFixup = 0;
if (unsigned rem = alignmentAtCall % StackAlignment)
frameDepth_ += (firstFixup = StackAlignment - rem);
if (gen->usesSimd())
setupSimdAlignment(firstFixup);
// If the function uses any SIMD, we may need to insert padding so that
// local slots are aligned for SIMD.
if (gen->usesSimd()) {
frameInitialAdjustment_ = ComputeByteAlignment(sizeof(AsmJSFrame), AsmJSStackAlignment);
frameDepth_ += frameInitialAdjustment_;
}
// An MAsmJSCall does not align the stack pointer at calls sites but instead
// relies on the a priori stack adjustment. This must be the last
// adjustment of frameDepth_.
if (gen->performsCall())
frameDepth_ += ComputeByteAlignment(sizeof(AsmJSFrame) + frameDepth_, AsmJSStackAlignment);
// FrameSizeClass is only used for bailing, which cannot happen in
// asm.js code.
frameClass_ = FrameSizeClass::None();
@ -97,38 +97,6 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, Mac
}
}
void
CodeGeneratorShared::setupSimdAlignment(unsigned fixup)
{
JS_STATIC_ASSERT(SimdStackAlignment % StackAlignment == 0);
// At this point, we have:
// (frameDepth_ + sizeof(AsmJSFrame)) % StackAlignment == 0
// which means we can add as many SimdStackAlignment as needed.
// The next constraint is to have all stack slots
// aligned for SIMD. That's done by having the first stack slot
// aligned. We need an offset such that:
// (frameDepth_ - offset) % SimdStackAlignment == 0
frameInitialAdjustment_ = frameDepth_ % SimdStackAlignment;
// We need to ensure that the first stack slot is actually
// located in this frame and not beforehand, when taking this
// offset into account, i.e.:
// frameDepth_ - initial adjustment >= frameDepth_ - fixup
// <=> fixup >= initial adjustment
//
// For instance, on x86 with gcc, if the initial frameDepth
// % 16 is 8, then the fixup is 0, although the initial
// adjustment is 8. The first stack slot would be located at
// frameDepth - 8 in this case, which is obviously before
// frameDepth.
//
// If that's not the case, we add SimdStackAlignment to the
// fixup, which will keep on satisfying other constraints.
if (frameInitialAdjustment_ > int32_t(fixup))
frameDepth_ += SimdStackAlignment;
}
bool
CodeGeneratorShared::generateOutOfLineCode()
{

View File

@ -496,8 +496,6 @@ class CodeGeneratorShared : public LInstructionVisitor
private:
void generateInvalidateEpilogue();
void setupSimdAlignment(unsigned fixup);
public:
CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm);

View File

@ -319,10 +319,26 @@ CodeGeneratorX86Shared::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins)
if (ins->arg()->isConstant()) {
masm.storePtr(ImmWord(ToInt32(ins->arg())), dst);
} else {
if (ins->arg()->isGeneralReg())
if (ins->arg()->isGeneralReg()) {
masm.storePtr(ToRegister(ins->arg()), dst);
else
masm.storeDouble(ToFloatRegister(ins->arg()), dst);
} else {
switch (mir->input()->type()) {
case MIRType_Double:
case MIRType_Float32:
masm.storeDouble(ToFloatRegister(ins->arg()), dst);
return true;
// StackPointer is SimdStackAlignment-aligned and ABIArgGenerator guarantees stack
// offsets are SimdStackAlignment-aligned.
case MIRType_Int32x4:
masm.storeAlignedInt32x4(ToFloatRegister(ins->arg()), dst);
return true;
case MIRType_Float32x4:
masm.storeAlignedFloat32x4(ToFloatRegister(ins->arg()), dst);
return true;
default: break;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected mir type in AsmJSPassStackArg");
}
}
return true;
}

View File

@ -154,6 +154,12 @@ LIRGeneratorShared::defineReturn(LInstruction *lir, MDefinition *mir)
case MIRType_Double:
lir->setDef(0, LDefinition(vreg, LDefinition::DOUBLE, LFloatReg(ReturnDoubleReg)));
break;
case MIRType_Int32x4:
lir->setDef(0, LDefinition(vreg, LDefinition::INT32X4, LFloatReg(ReturnSimdReg)));
break;
case MIRType_Float32x4:
lir->setDef(0, LDefinition(vreg, LDefinition::FLOAT32X4, LFloatReg(ReturnSimdReg)));
break;
default:
LDefinition::Type type = LDefinition::TypeFrom(mir->type());
JS_ASSERT(type != LDefinition::DOUBLE && type != LDefinition::FLOAT32);

View File

@ -30,8 +30,17 @@ ABIArgGenerator::next(MIRType type)
#if defined(XP_WIN)
JS_STATIC_ASSERT(NumIntArgRegs == NumFloatArgRegs);
if (regIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t);
if (IsSimdType(type)) {
// On Win64, >64 bit args need to be passed by reference, but asm.js
// doesn't allow passing SIMD values to FFIs. The only way to reach
// here is asm to asm calls, so we can break the ABI here.
stackOffset_ = AlignBytes(stackOffset_, SimdStackAlignment);
current_ = ABIArg(stackOffset_);
stackOffset_ += Simd128DataSize;
} else {
stackOffset_ += sizeof(uint64_t);
current_ = ABIArg(stackOffset_);
}
return current_;
}
switch (type) {
@ -43,6 +52,13 @@ ABIArgGenerator::next(MIRType type)
case MIRType_Double:
current_ = ABIArg(FloatArgRegs[regIndex_++]);
break;
case MIRType_Int32x4:
case MIRType_Float32x4:
// On Win64, >64 bit args need to be passed by reference, but asm.js
// doesn't allow passing SIMD values to FFIs. The only way to reach
// here is asm to asm calls, so we can break the ABI here.
current_ = ABIArg(FloatArgRegs[regIndex_++]);
break;
default:
MOZ_CRASH("Unexpected argument type");
}
@ -67,6 +83,16 @@ ABIArgGenerator::next(MIRType type)
}
current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
break;
case MIRType_Int32x4:
case MIRType_Float32x4:
if (floatRegIndex_ == NumFloatArgRegs) {
stackOffset_ = AlignBytes(stackOffset_, SimdStackAlignment);
current_ = ABIArg(stackOffset_);
stackOffset_ += Simd128DataSize;
break;
}
current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
break;
default:
MOZ_CRASH("Unexpected argument type");
}

View File

@ -184,10 +184,7 @@ static MOZ_CONSTEXPR_VAR Register OsrFrameReg = IntArgReg3;
static MOZ_CONSTEXPR_VAR Register PreBarrierReg = rdx;
// GCC stack is aligned on 16 bytes, but we don't maintain the invariant in
// jitted code.
static const uint32_t StackAlignment = 16;
static const bool StackKeptAligned = false;
static const uint32_t ABIStackAlignment = 16;
static const uint32_t CodeAlignment = 8;
// This boolean indicates whether we support SIMD instructions flavoured for
@ -197,6 +194,8 @@ static const uint32_t CodeAlignment = 8;
static const bool SupportsSimd = true;
static const uint32_t SimdStackAlignment = 16;
static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
static const Scale ScalePointer = TimesEight;
} // namespace jit
@ -603,12 +602,30 @@ class Assembler : public AssemblerX86Shared
CodeOffsetLabel loadRipRelativeDouble(FloatRegister dest) {
return CodeOffsetLabel(masm.movsd_ripr(dest.code()).offset());
}
CodeOffsetLabel loadRipRelativeFloat32(FloatRegister dest) {
return CodeOffsetLabel(masm.movss_ripr(dest.code()).offset());
}
CodeOffsetLabel loadRipRelativeInt32x4(FloatRegister dest) {
return CodeOffsetLabel(masm.movdqa_ripr(dest.code()).offset());
}
CodeOffsetLabel loadRipRelativeFloat32x4(FloatRegister dest) {
return CodeOffsetLabel(masm.movaps_ripr(dest.code()).offset());
}
CodeOffsetLabel storeRipRelativeInt32(Register dest) {
return CodeOffsetLabel(masm.movl_rrip(dest.code()).offset());
}
CodeOffsetLabel storeRipRelativeDouble(FloatRegister dest) {
return CodeOffsetLabel(masm.movsd_rrip(dest.code()).offset());
}
CodeOffsetLabel storeRipRelativeFloat32(FloatRegister dest) {
return CodeOffsetLabel(masm.movss_rrip(dest.code()).offset());
}
CodeOffsetLabel storeRipRelativeInt32x4(FloatRegister dest) {
return CodeOffsetLabel(masm.movdqa_rrip(dest.code()).offset());
}
CodeOffsetLabel storeRipRelativeFloat32x4(FloatRegister dest) {
return CodeOffsetLabel(masm.movaps_rrip(dest.code()).offset());
}
CodeOffsetLabel leaRipRelative(Register dest) {
return CodeOffsetLabel(masm.leaq_rip(dest.code()).offset());
}

View File

@ -349,11 +349,32 @@ CodeGeneratorX64::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
{
MAsmJSLoadGlobalVar *mir = ins->mir();
MIRType type = mir->type();
JS_ASSERT(IsNumberType(type) || IsSimdType(type));
CodeOffsetLabel label;
if (mir->type() == MIRType_Int32)
switch (type) {
case MIRType_Int32:
label = masm.loadRipRelativeInt32(ToRegister(ins->output()));
else
break;
case MIRType_Float32:
label = masm.loadRipRelativeFloat32(ToFloatRegister(ins->output()));
break;
case MIRType_Double:
label = masm.loadRipRelativeDouble(ToFloatRegister(ins->output()));
break;
// Aligned access: code is aligned on PageSize + there is padding
// before the global data section.
case MIRType_Int32x4:
label = masm.loadRipRelativeInt32x4(ToFloatRegister(ins->output()));
break;
case MIRType_Float32x4:
label = masm.loadRipRelativeFloat32x4(ToFloatRegister(ins->output()));
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected type in visitAsmJSLoadGlobalVar");
}
masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
return true;
}
@ -364,13 +385,31 @@ CodeGeneratorX64::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins)
MAsmJSStoreGlobalVar *mir = ins->mir();
MIRType type = mir->value()->type();
JS_ASSERT(IsNumberType(type));
JS_ASSERT(IsNumberType(type) || IsSimdType(type));
CodeOffsetLabel label;
if (type == MIRType_Int32)
switch (type) {
case MIRType_Int32:
label = masm.storeRipRelativeInt32(ToRegister(ins->value()));
else
break;
case MIRType_Float32:
label = masm.storeRipRelativeFloat32(ToFloatRegister(ins->value()));
break;
case MIRType_Double:
label = masm.storeRipRelativeDouble(ToFloatRegister(ins->value()));
break;
// Aligned access: code is aligned on PageSize + there is padding
// before the global data section.
case MIRType_Int32x4:
label = masm.storeRipRelativeInt32x4(ToFloatRegister(ins->value()));
break;
case MIRType_Float32x4:
label = masm.storeRipRelativeFloat32x4(ToFloatRegister(ins->value()));
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected type in visitAsmJSStoreGlobalVar");
}
masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
return true;
}

View File

@ -200,7 +200,7 @@ MacroAssemblerX64::setupUnalignedABICall(uint32_t args, Register scratch)
dynamicAlignment_ = true;
movq(rsp, scratch);
andq(Imm32(~(StackAlignment - 1)), rsp);
andq(Imm32(~(ABIStackAlignment - 1)), rsp);
push(scratch);
}
@ -270,11 +270,11 @@ MacroAssemblerX64::callWithABIPre(uint32_t *stackAdjust)
if (dynamicAlignment_) {
*stackAdjust = stackForCall_
+ ComputeByteAlignment(stackForCall_ + sizeof(intptr_t),
StackAlignment);
ABIStackAlignment);
} else {
*stackAdjust = stackForCall_
+ ComputeByteAlignment(stackForCall_ + framePushed_,
StackAlignment);
ABIStackAlignment);
}
reserveStack(*stackAdjust);
@ -293,7 +293,7 @@ MacroAssemblerX64::callWithABIPre(uint32_t *stackAdjust)
#ifdef DEBUG
{
Label good;
testq(rsp, Imm32(StackAlignment - 1));
testq(rsp, Imm32(ABIStackAlignment - 1));
j(Equal, &good);
breakpoint();
bind(&good);

View File

@ -551,7 +551,6 @@ JitRuntime::generateBailoutHandler(JSContext *cx, ExecutionMode mode)
JitCode *
JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
{
JS_ASSERT(!StackKeptAligned);
JS_ASSERT(functionWrappers_);
JS_ASSERT(functionWrappers_->initialized());
VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);

View File

@ -19,16 +19,26 @@ ABIArgGenerator::ABIArgGenerator()
ABIArg
ABIArgGenerator::next(MIRType type)
{
current_ = ABIArg(stackOffset_);
switch (type) {
case MIRType_Int32:
case MIRType_Pointer:
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t);
break;
case MIRType_Float32: // Float32 moves are actually double moves
case MIRType_Double:
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t);
break;
case MIRType_Int32x4:
case MIRType_Float32x4:
// SIMD values aren't passed in or out of C++, so we can make up
// whatever internal ABI we like. visitAsmJSPassArg assumes
// SimdStackAlignment.
stackOffset_ = AlignBytes(stackOffset_, SimdStackAlignment);
current_ = ABIArg(stackOffset_);
stackOffset_ += Simd128DataSize;
break;
default:
MOZ_CRASH("Unexpected argument type");
}

View File

@ -108,14 +108,13 @@ static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD0 = edi;
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = eax;
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = esi;
// GCC stack is aligned on 16 bytes, but we don't maintain the invariant in
// jitted code.
// GCC stack is aligned on 16 bytes. Ion does not maintain this for internal
// calls. asm.js code does.
#if defined(__GNUC__)
static const uint32_t StackAlignment = 16;
static const uint32_t ABIStackAlignment = 16;
#else
static const uint32_t StackAlignment = 4;
static const uint32_t ABIStackAlignment = 4;
#endif
static const bool StackKeptAligned = false;
static const uint32_t CodeAlignment = 8;
// This boolean indicates whether we support SIMD instructions flavoured for
@ -125,6 +124,8 @@ static const uint32_t CodeAlignment = 8;
static const bool SupportsSimd = true;
static const uint32_t SimdStackAlignment = 16;
static const uint32_t AsmJSStackAlignment = SimdStackAlignment;
struct ImmTag : public Imm32
{
ImmTag(JSValueTag mask)
@ -522,6 +523,16 @@ class Assembler : public AssemblerX86Shared
masm.movsd_mr(src.addr, dest.code());
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel movdqaWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
JS_ASSERT(HasSSE2());
masm.movdqa_mr(src.addr, dest.code());
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel movapsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
JS_ASSERT(HasSSE2());
masm.movaps_mr(src.addr, dest.code());
return CodeOffsetLabel(masm.currentOffset());
}
// Store to *dest where dest can be patched.
CodeOffsetLabel movbWithPatch(Register src, PatchedAbsoluteAddress dest) {
@ -546,6 +557,16 @@ class Assembler : public AssemblerX86Shared
masm.movsd_rm(src.code(), dest.addr);
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel movdqaWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
JS_ASSERT(HasSSE2());
masm.movdqa_rm(src.code(), dest.addr);
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel movapsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
JS_ASSERT(HasSSE2());
masm.movaps_rm(src.code(), dest.addr);
return CodeOffsetLabel(masm.currentOffset());
}
void loadAsmJSActivation(Register dest) {
CodeOffsetLabel label = movlWithPatch(PatchedAbsoluteAddress(), dest);

View File

@ -462,15 +462,30 @@ CodeGeneratorX86::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
{
MAsmJSLoadGlobalVar *mir = ins->mir();
MIRType type = mir->type();
JS_ASSERT(IsNumberType(type));
JS_ASSERT(IsNumberType(type) || IsSimdType(type));
CodeOffsetLabel label;
if (type == MIRType_Int32)
switch (type) {
case MIRType_Int32:
label = masm.movlWithPatch(PatchedAbsoluteAddress(), ToRegister(ins->output()));
else if (type == MIRType_Float32)
break;
case MIRType_Float32:
label = masm.movssWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
else
break;
case MIRType_Double:
label = masm.movsdWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
break;
// Aligned access: code is aligned on PageSize + there is padding
// before the global data section.
case MIRType_Int32x4:
label = masm.movdqaWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
break;
case MIRType_Float32x4:
label = masm.movapsWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected type in visitAsmJSLoadGlobalVar");
}
masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
return true;
}
@ -481,15 +496,30 @@ CodeGeneratorX86::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins)
MAsmJSStoreGlobalVar *mir = ins->mir();
MIRType type = mir->value()->type();
JS_ASSERT(IsNumberType(type));
JS_ASSERT(IsNumberType(type) || IsSimdType(type));
CodeOffsetLabel label;
if (type == MIRType_Int32)
switch (type) {
case MIRType_Int32:
label = masm.movlWithPatch(ToRegister(ins->value()), PatchedAbsoluteAddress());
else if (type == MIRType_Float32)
break;
case MIRType_Float32:
label = masm.movssWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
else
break;
case MIRType_Double:
label = masm.movsdWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
break;
// Aligned access: code is aligned on PageSize + there is padding
// before the global data section.
case MIRType_Int32x4:
label = masm.movdqaWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
break;
case MIRType_Float32x4:
label = masm.movapsWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected type in visitAsmJSStoreGlobalVar");
}
masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
return true;
}

View File

@ -227,7 +227,7 @@ MacroAssemblerX86::setupUnalignedABICall(uint32_t args, Register scratch)
dynamicAlignment_ = true;
movl(esp, scratch);
andl(Imm32(~(StackAlignment - 1)), esp);
andl(Imm32(~(ABIStackAlignment - 1)), esp);
push(scratch);
}
@ -267,11 +267,11 @@ MacroAssemblerX86::callWithABIPre(uint32_t *stackAdjust)
if (dynamicAlignment_) {
*stackAdjust = stackForCall_
+ ComputeByteAlignment(stackForCall_ + sizeof(intptr_t),
StackAlignment);
ABIStackAlignment);
} else {
*stackAdjust = stackForCall_
+ ComputeByteAlignment(stackForCall_ + framePushed_,
StackAlignment);
ABIStackAlignment);
}
reserveStack(*stackAdjust);
@ -291,7 +291,7 @@ MacroAssemblerX86::callWithABIPre(uint32_t *stackAdjust)
{
// Check call alignment.
Label good;
testl(esp, Imm32(StackAlignment - 1));
testl(esp, Imm32(ABIStackAlignment - 1));
j(Equal, &good);
breakpoint();
bind(&good);

View File

@ -590,7 +590,6 @@ JitRuntime::generateBailoutHandler(JSContext *cx, ExecutionMode mode)
JitCode *
JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
{
JS_ASSERT(!StackKeptAligned);
JS_ASSERT(functionWrappers_);
JS_ASSERT(functionWrappers_->initialized());
VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);

View File

@ -291,6 +291,7 @@ MSG_DEF(JSMSG_STRICT_CODE_WITH, 0, JSEXN_SYNTAXERR, "strict mode code may
MSG_DEF(JSMSG_STRICT_FUNCTION_STATEMENT, 0, JSEXN_SYNTAXERR, "in strict mode code, functions may be declared only at top level or immediately within another function")
MSG_DEF(JSMSG_SYNTAX_ERROR, 0, JSEXN_SYNTAXERR, "syntax error")
MSG_DEF(JSMSG_TEMPLSTR_UNTERM_EXPR, 0, JSEXN_SYNTAXERR, "missing } in template string")
MSG_DEF(JSMSG_SIMD_NOT_A_VECTOR, 0, JSEXN_TYPEERR, "value isn't a SIMD value object")
MSG_DEF(JSMSG_TOO_MANY_CASES, 0, JSEXN_INTERNALERR, "too many switch cases")
MSG_DEF(JSMSG_TOO_MANY_CATCH_VARS, 0, JSEXN_SYNTAXERR, "too many catch variables")
MSG_DEF(JSMSG_TOO_MANY_CON_ARGS, 0, JSEXN_SYNTAXERR, "too many constructor arguments")

View File

@ -295,6 +295,7 @@ struct ThreadSafeContext : ContextFriendFields,
bool signalHandlersInstalled() const { return runtime_->signalHandlersInstalled(); }
bool canUseSignalHandlers() const { return runtime_->canUseSignalHandlers(); }
bool jitSupportsFloatingPoint() const { return runtime_->jitSupportsFloatingPoint; }
bool jitSupportsSimd() const { return runtime_->jitSupportsSimd; }
// Thread local data that may be accessed freely.
DtoaState *dtoaState() {

View File

@ -211,6 +211,7 @@ JSRuntime::JSRuntime(JSRuntime *parentRuntime)
wrapObjectCallbacks(&DefaultWrapObjectCallbacks),
preserveWrapperCallback(nullptr),
jitSupportsFloatingPoint(false),
jitSupportsSimd(false),
ionPcScriptCache(nullptr),
threadPool(this),
defaultJSContextCallback(nullptr),
@ -315,6 +316,7 @@ JSRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
nativeStackBase = GetNativeStackBase();
jitSupportsFloatingPoint = js::jit::JitSupportsFloatingPoint();
jitSupportsSimd = js::jit::JitSupportsSimd();
signalHandlersInstalled_ = EnsureAsmJSSignalHandlersInstalled(this);
canUseSignalHandlers_ = signalHandlersInstalled_ && !SignalBasedTriggersDisabled();

View File

@ -1274,6 +1274,7 @@ struct JSRuntime : public JS::shadow::Runtime,
}
bool jitSupportsFloatingPoint;
bool jitSupportsSimd;
// Used to reset stack limit after a signaled interrupt (i.e. jitStackLimit_ = -1)
// has been noticed by Ion/Baseline.

View File

@ -1550,7 +1550,7 @@ jit::JitActivation::markRematerializedFrames(JSTracer *trc)
AsmJSActivation::AsmJSActivation(JSContext *cx, AsmJSModule &module)
: Activation(cx, AsmJS),
module_(module),
errorRejoinSP_(nullptr),
entrySP_(nullptr),
profiler_(nullptr),
resumePC_(nullptr),
fp_(nullptr),
@ -1573,7 +1573,7 @@ AsmJSActivation::AsmJSActivation(JSContext *cx, AsmJSModule &module)
JSRuntime::AutoLockForInterrupt lock(cx->runtime());
cx->mainThread().asmJSActivationStack_ = this;
(void) errorRejoinSP_; // squelch GCC warning
(void) entrySP_; // squelch GCC warning
}
AsmJSActivation::~AsmJSActivation()

View File

@ -1482,7 +1482,7 @@ class AsmJSActivation : public Activation
AsmJSModule &module_;
AsmJSActivation *prevAsmJS_;
AsmJSActivation *prevAsmJSForModule_;
void *errorRejoinSP_;
void *entrySP_;
SPSProfiler *profiler_;
void *resumePC_;
uint8_t *fp_;
@ -1512,7 +1512,7 @@ class AsmJSActivation : public Activation
static unsigned offsetOfResumePC() { return offsetof(AsmJSActivation, resumePC_); }
// Written by JIT code:
static unsigned offsetOfErrorRejoinSP() { return offsetof(AsmJSActivation, errorRejoinSP_); }
static unsigned offsetOfEntrySP() { return offsetof(AsmJSActivation, entrySP_); }
static unsigned offsetOfFP() { return offsetof(AsmJSActivation, fp_); }
static unsigned offsetOfExitReason() { return offsetof(AsmJSActivation, exitReason_); }

View File

@ -27,6 +27,7 @@
#include "gfxMatrix.h"
#include "gfxPrefs.h"
#include "nsSVGIntegrationUtils.h"
#include "nsSVGUtils.h"
#include "nsLayoutUtils.h"
#include "nsIScrollableFrame.h"
#include "nsIFrameInlines.h"
@ -5499,6 +5500,40 @@ bool nsDisplaySVGEffects::TryMerge(nsDisplayListBuilder* aBuilder, nsDisplayItem
return true;
}
gfxRect
nsDisplaySVGEffects::BBoxInUserSpace() const
{
return nsSVGUtils::GetBBox(mFrame);
}
gfxPoint
nsDisplaySVGEffects::UserSpaceOffset() const
{
return nsSVGUtils::FrameSpaceInCSSPxToUserSpaceOffset(mFrame);
}
void
nsDisplaySVGEffects::ComputeInvalidationRegion(nsDisplayListBuilder* aBuilder,
const nsDisplayItemGeometry* aGeometry,
nsRegion* aInvalidRegion)
{
const nsDisplaySVGEffectsGeometry* geometry =
static_cast<const nsDisplaySVGEffectsGeometry*>(aGeometry);
bool snap;
nsRect bounds = GetBounds(aBuilder, &snap);
if (geometry->mFrameOffsetToReferenceFrame != ToReferenceFrame() ||
geometry->mUserSpaceOffset != UserSpaceOffset() ||
!geometry->mBBox.IsEqualInterior(BBoxInUserSpace())) {
// Filter and mask output can depend on the location of the frame's user
// space and on the frame's BBox. We need to invalidate if either of these
// change relative to the reference frame.
// Invalidations from our inactive layer manager are not enough to catch
// some of these cases because filters can produce output even if there's
// nothing in the filter input.
aInvalidRegion->Or(bounds, geometry->mBounds);
}
}
#ifdef MOZ_DUMP_PAINTING
void
nsDisplaySVGEffects::PrintEffects(nsACString& aTo)

View File

@ -3150,13 +3150,17 @@ public:
virtual already_AddRefed<Layer> BuildLayer(nsDisplayListBuilder* aBuilder,
LayerManager* aManager,
const ContainerLayerParameters& aContainerParameters) MOZ_OVERRIDE;
gfxRect BBoxInUserSpace() const;
gfxPoint UserSpaceOffset() const;
virtual nsDisplayItemGeometry* AllocateGeometry(nsDisplayListBuilder* aBuilder) MOZ_OVERRIDE
{
return new nsDisplaySVGEffectsGeometry(this, aBuilder);
}
virtual void ComputeInvalidationRegion(nsDisplayListBuilder* aBuilder,
const nsDisplayItemGeometry* aGeometry,
nsRegion* aInvalidRegion) MOZ_OVERRIDE
{
// We don't need to compute an invalidation region since we have LayerTreeInvalidation
}
nsRegion* aInvalidRegion) MOZ_OVERRIDE;
void PaintAsLayer(nsDisplayListBuilder* aBuilder,
nsRenderingContext* aCtx,

View File

@ -94,3 +94,17 @@ nsDisplayBoxShadowOuterGeometry::nsDisplayBoxShadowOuterGeometry(nsDisplayItem*
: nsDisplayItemGenericGeometry(aItem, aBuilder)
, mOpacity(aOpacity)
{}
nsDisplaySVGEffectsGeometry::nsDisplaySVGEffectsGeometry(nsDisplaySVGEffects* aItem, nsDisplayListBuilder* aBuilder)
: nsDisplayItemGeometry(aItem, aBuilder)
, mBBox(aItem->BBoxInUserSpace())
, mUserSpaceOffset(aItem->UserSpaceOffset())
, mFrameOffsetToReferenceFrame(aItem->ToReferenceFrame())
{}
void
nsDisplaySVGEffectsGeometry::MoveBy(const nsPoint& aOffset)
{
mBounds.MoveBy(aOffset);
mFrameOffsetToReferenceFrame += aOffset;
}

View File

@ -9,11 +9,13 @@
#include "mozilla/Attributes.h"
#include "nsRect.h"
#include "nsColor.h"
#include "gfxRect.h"
class nsDisplayItem;
class nsDisplayListBuilder;
class nsDisplayBackgroundImage;
class nsDisplayThemedBackground;
class nsDisplaySVGEffects;
/**
* This stores the geometry of an nsDisplayItem, and the area
@ -141,4 +143,16 @@ public:
nscolor mColor;
};
class nsDisplaySVGEffectsGeometry : public nsDisplayItemGeometry
{
public:
nsDisplaySVGEffectsGeometry(nsDisplaySVGEffects* aItem, nsDisplayListBuilder* aBuilder);
virtual void MoveBy(const nsPoint& aOffset) MOZ_OVERRIDE;
gfxRect mBBox;
gfxPoint mUserSpaceOffset;
nsPoint mFrameOffsetToReferenceFrame;
};
#endif /*NSDISPLAYLISTINVALIDATION_H_*/

View File

@ -471,6 +471,7 @@ nsMathMLContainerFrame::FinalizeReflow(nsRenderingContext& aRenderingContext,
// If placeOrigin is false we should reach Place() with aPlaceOrigin == true
// through Stretch() eventually.
if (NS_MATHML_HAS_ERROR(mPresentationData.flags) || NS_FAILED(rv)) {
GatherAndStoreOverflow(&aDesiredSize);
DidReflowChildren(GetFirstPrincipalChild());
return rv;
}

View File

@ -0,0 +1,46 @@
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<!DOCTYPE html>
<html lang="en" class="reftest-wait">
<meta charset="utf-8">
<title>When the filtered element's BBox relative to the page changes, the filtered element needs to be invalidated</title>
<style>
#spacer {
height: 100px;
}
#filtered {
width: 100px;
height: 100px;
background-color: lime;
filter: url(#filter);
}
</style>
<svg height="0">
<defs>
<filter id="filter" filterUnits="objectBoundingBox"
x="0%" y="0%" width="100%" height="100%"
color-interpolation-filters="sRGB">
<feMerge><feMergeNode/></feMerge>
</filter>
</defs>
</svg>
<div id="spacer"></div>
<div id="filtered"></div>
<script>
window.addEventListener("MozReftestInvalidate", function () {
document.getElementById("spacer").style.height = "50px";
document.documentElement.removeAttribute("class");
});
</script>

View File

@ -0,0 +1,45 @@
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<!DOCTYPE html>
<html lang="en" class="reftest-wait">
<meta charset="utf-8">
<title>When the filtered element's BBox relative to the page changes, the filtered element needs to be invalidated</title>
<style>
#spacer {
height: 100px;
}
#filtered {
width: 100px;
height: 100px;
filter: url(#filter);
}
</style>
<svg height="0">
<defs>
<filter id="filter" filterUnits="objectBoundingBox"
x="0%" y="0%" width="100%" height="100%"
color-interpolation-filters="sRGB">
<feFlood flood-color="lime"/>
</filter>
</defs>
</svg>
<div id="spacer"></div>
<div id="filtered"></div>
<script>
window.addEventListener("MozReftestInvalidate", function () {
document.getElementById("spacer").style.height = "50px";
document.documentElement.removeAttribute("class");
});
</script>

View File

@ -0,0 +1,46 @@
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<!DOCTYPE html>
<html lang="en" class="reftest-wait">
<meta charset="utf-8">
<title>When the filtered element's BBox relative to the page changes, the filtered element needs to be invalidated</title>
<style>
#spacer {
height: 100px;
}
#filtered {
width: 100px;
height: 100px;
background-color: lime;
filter: url(#filter);
}
</style>
<svg height="0">
<defs>
<filter id="filter" filterUnits="userSpaceOnUse"
x="0" y="0" width="100" height="100"
color-interpolation-filters="sRGB">
<feMerge><feMergeNode/></feMerge>
</filter>
</defs>
</svg>
<div id="spacer"></div>
<div id="filtered"></div>
<script>
window.addEventListener("MozReftestInvalidate", function () {
document.getElementById("spacer").style.height = "50px";
document.documentElement.removeAttribute("class");
});
</script>

View File

@ -0,0 +1,45 @@
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<!DOCTYPE html>
<html lang="en" class="reftest-wait">
<meta charset="utf-8">
<title>When the filtered element's BBox relative to the page changes, the filtered element needs to be invalidated</title>
<style>
#spacer {
height: 100px;
}
#filtered {
width: 100px;
height: 100px;
filter: url(#filter);
}
</style>
<svg height="0">
<defs>
<filter id="filter" filterUnits="userSpaceOnUse"
x="0" y="0" width="100" height="100"
color-interpolation-filters="sRGB">
<feFlood flood-color="lime"/>
</filter>
</defs>
</svg>
<div id="spacer"></div>
<div id="filtered"></div>
<script>
window.addEventListener("MozReftestInvalidate", function () {
document.getElementById("spacer").style.height = "50px";
document.documentElement.removeAttribute("class");
});
</script>

View File

@ -0,0 +1,28 @@
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<!DOCTYPE html>
<html lang="en">
<meta charset="utf-8">
<title>When the filtered element's BBox relative to the page changes, the filtered element needs to be invalidated</title>
<style>
#spacer {
height: 50px;
}
#filtered {
width: 100px;
height: 100px;
background-color: lime;
}
</style>
<svg height="0"></svg>
<div id="spacer"></div>
<div id="filtered"></div>

View File

@ -0,0 +1,44 @@
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<!DOCTYPE html>
<html lang="en">
<meta charset="utf-8">
<title>Make sure that scrolling #scrolledBox into view paints the scrolled strip even while #coveringFixedBar covers that strip</title>
<style>
html {
overflow: hidden;
}
body {
margin: 0;
}
#coveringFixedBar {
position: absolute;
left: 10px;
top: 0;
width: 380px;
height: 20px;
background: blue;
z-index: 100;
}
#scrolledBox {
position: relative;
margin: 0 100px;
opacity: 0.9;
width: 200px;
height: 200px;
background: lime;
border: 1px solid black;
}
</style>
<div id="coveringFixedBar"></div>
<div id="scrolledBox"></div>

View File

@ -0,0 +1,59 @@
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<!DOCTYPE html>
<html lang="en" class="reftest-wait">
<meta charset="utf-8">
<title>Make sure that scrolling #scrolledBox into view paints the scrolled strip even while #coveringFixedBar covers that strip</title>
<style>
html {
overflow: hidden;
}
body {
margin: 0;
height: 2000px;
}
#coveringFixedBar {
position: fixed;
left: 10px;
top: 0;
width: 380px;
height: 20px;
background: blue;
z-index: 100;
}
#scrolledBox {
position: relative;
margin: 0 100px;
opacity: 0.9;
width: 200px;
height: 200px;
background: lime;
border: 1px solid black;
}
</style>
<div id="coveringFixedBar"></div>
<div id="scrolledBox"></div>
<script>
document.documentElement.scrollTop = 40;
window.addEventListener("MozReftestInvalidate", function () {
document.documentElement.scrollTop = 20;
window.requestAnimationFrame(function () {
document.documentElement.scrollTop = 0;
document.documentElement.removeAttribute("class");
});
});
</script>

View File

@ -1815,9 +1815,14 @@ pref(layout.css.overflow-clip-box.enabled,true) == 992447.html 992447-ref.html
pref(layout.css.sticky.enabled,true) == 1005405-1.html 1005405-1-ref.html
fuzzy-if(/^Windows\x20NT\x205\.1/.test(http.oscpu),255,1) == 1013054-1.html 1013054-1-ref.html
pref(layout.css.will-change.enabled,true) == 1018522-1.html 1018522-1-ref.html
== 1021564-1.html 1021564-ref.html
== 1021564-2.html 1021564-ref.html
== 1021564-3.html 1021564-ref.html
== 1021564-4.html 1021564-ref.html
pref(browser.display.use_document_fonts,0) == 1022481-1.html 1022481-1-ref.html
== 1022612-1.html 1022612-1-ref.html
== 1024473-1.html 1024473-1-ref.html
== 1025914-1.html 1025914-1-ref.html
== 1042104-1.html 1042104-1-ref.html
== 1044198-1.html 1044198-1-ref.html
== 1049499-1.html 1049499-1-ref.html

View File

@ -135,6 +135,7 @@ TamperOnce(SECItem& item,
alreadyFoundMatch = true;
memmove(foundFirstByte, to, toLen);
p = foundFirstByte + toLen;
remaining -= toLen;
} else {
p = foundFirstByte + 1;
--remaining;

View File

@ -92,7 +92,7 @@ PatchModuleImports(HMODULE module, PIMAGE_NT_HEADERS headers)
RVAPtr<IMAGE_THUNK_DATA> thunk(module, descriptor->OriginalFirstThunk);
for (; thunk->u1.AddressOfData; ++thunk) {
RVAPtr<IMAGE_IMPORT_BY_NAME> import(module, thunk->u1.AddressOfData);
if (!strcmp(import->Name, "GetLogicalProcessorInformation")) {
if (!strcmp((char*)import->Name, "GetLogicalProcessorInformation")) {
memcpy(import->Name, "DebugBreak", sizeof("DebugBreak"));
}
}