mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 712939 - Make a bunch of JIT code that depends on element-count * sizeof(Value), or slot-count * sizeof(Value), not overflowing int32_t, have markers indicating such. Also add BaseValueIndex for computing Value addresses from a base/index/offset triple, BaseObjectElementIndex for computing addresses of elements, and BaseObjectSlotIndex for computing addresses of slots. r=jandem
This commit is contained in:
parent
d3a173ce72
commit
cccf19997c
@ -4539,8 +4539,7 @@ ICGetElem_Dense::Compiler::generateStubCode(MacroAssembler &masm)
|
||||
masm.branch32(Assembler::BelowOrEqual, initLength, key, &failure);
|
||||
|
||||
// Hole check and load value.
|
||||
JS_STATIC_ASSERT(sizeof(Value) == 8);
|
||||
BaseIndex element(scratchReg, key, TimesEight);
|
||||
BaseObjectElementIndex element(scratchReg, key);
|
||||
masm.branchTestMagic(Assembler::Equal, element, &failure);
|
||||
|
||||
// Check if __noSuchMethod__ should be called.
|
||||
@ -4710,10 +4709,9 @@ ICGetElem_Arguments::Compiler::generateStubCode(MacroAssembler &masm)
|
||||
masm.branch32(Assembler::AboveOrEqual, idx, scratch, &failure);
|
||||
|
||||
// Load argval
|
||||
JS_STATIC_ASSERT(sizeof(Value) == 8);
|
||||
masm.movePtr(BaselineFrameReg, scratch);
|
||||
masm.addPtr(Imm32(BaselineFrame::offsetOfArg(0)), scratch);
|
||||
BaseIndex element(scratch, idx, TimesEight);
|
||||
BaseValueIndex element(scratch, idx);
|
||||
masm.loadValue(element, R0);
|
||||
|
||||
// Enter type monitor IC to type-check result.
|
||||
@ -4787,7 +4785,7 @@ ICGetElem_Arguments::Compiler::generateStubCode(MacroAssembler &masm)
|
||||
regs.add(scratchReg);
|
||||
regs.add(tempReg);
|
||||
ValueOperand tempVal = regs.takeAnyValue();
|
||||
masm.loadValue(BaseIndex(argData, idxReg, ScaleFromElemWidth(sizeof(Value))), tempVal);
|
||||
masm.loadValue(BaseValueIndex(argData, idxReg), tempVal);
|
||||
|
||||
// Makesure that this is not a FORWARD_TO_CALL_SLOT magic value.
|
||||
masm.branchTestMagic(Assembler::Equal, tempVal, &failureReconstructInputs);
|
||||
@ -9267,8 +9265,7 @@ ICCallStubCompiler::pushCallerArguments(MacroAssembler &masm, GeneralRegisterSet
|
||||
masm.loadPtr(Address(BaselineFrameReg, 0), startReg);
|
||||
masm.loadPtr(Address(startReg, BaselineFrame::offsetOfNumActualArgs()), endReg);
|
||||
masm.addPtr(Imm32(BaselineFrame::offsetOfArg(0)), startReg);
|
||||
JS_STATIC_ASSERT(sizeof(Value) == 8);
|
||||
masm.lshiftPtr(Imm32(3), endReg);
|
||||
masm.lshiftPtr(Imm32(ValueShift), endReg);
|
||||
masm.addPtr(startReg, endReg);
|
||||
|
||||
// Copying pre-decrements endReg by 8 until startReg is reached
|
||||
@ -9294,8 +9291,7 @@ ICCallStubCompiler::pushArrayArguments(MacroAssembler &masm, Address arrayVal,
|
||||
masm.extractObject(arrayVal, startReg);
|
||||
masm.loadPtr(Address(startReg, NativeObject::offsetOfElements()), startReg);
|
||||
masm.load32(Address(startReg, ObjectElements::offsetOfInitializedLength()), endReg);
|
||||
JS_STATIC_ASSERT(sizeof(Value) == 8);
|
||||
masm.lshiftPtr(Imm32(3), endReg);
|
||||
masm.lshiftPtr(Imm32(ValueShift), endReg);
|
||||
masm.addPtr(startReg, endReg);
|
||||
|
||||
// Copying pre-decrements endReg by 8 until startReg is reached
|
||||
@ -9454,8 +9450,7 @@ ICCallScriptedCompiler::generateStubCode(MacroAssembler &masm)
|
||||
if (isSpread_) {
|
||||
masm.loadValue(Address(BaselineStackReg, 2 * sizeof(Value) + ICStackValueOffset), R1);
|
||||
} else {
|
||||
BaseIndex calleeSlot(BaselineStackReg, argcReg, TimesEight,
|
||||
ICStackValueOffset + sizeof(Value));
|
||||
BaseValueIndex calleeSlot(BaselineStackReg, argcReg, ICStackValueOffset + sizeof(Value));
|
||||
masm.loadValue(calleeSlot, R1);
|
||||
}
|
||||
regs.take(R1);
|
||||
@ -9515,8 +9510,8 @@ ICCallScriptedCompiler::generateStubCode(MacroAssembler &masm)
|
||||
masm.loadValue(Address(BaselineStackReg,
|
||||
2 * sizeof(Value) + STUB_FRAME_SIZE + sizeof(size_t)), R1);
|
||||
} else {
|
||||
BaseIndex calleeSlot2(BaselineStackReg, argcReg, TimesEight,
|
||||
sizeof(Value) + STUB_FRAME_SIZE + sizeof(size_t));
|
||||
BaseValueIndex calleeSlot2(BaselineStackReg, argcReg,
|
||||
sizeof(Value) + STUB_FRAME_SIZE + sizeof(size_t));
|
||||
masm.loadValue(calleeSlot2, R1);
|
||||
}
|
||||
masm.push(masm.extractObject(R1, ExtractTemp0));
|
||||
@ -9548,7 +9543,7 @@ ICCallScriptedCompiler::generateStubCode(MacroAssembler &masm)
|
||||
if (isSpread_) {
|
||||
masm.storeValue(R0, Address(BaselineStackReg, sizeof(Value) + STUB_FRAME_SIZE));
|
||||
} else {
|
||||
BaseIndex thisSlot(BaselineStackReg, argcReg, TimesEight, STUB_FRAME_SIZE);
|
||||
BaseValueIndex thisSlot(BaselineStackReg, argcReg, STUB_FRAME_SIZE);
|
||||
masm.storeValue(R0, thisSlot);
|
||||
}
|
||||
|
||||
@ -9564,8 +9559,7 @@ ICCallScriptedCompiler::generateStubCode(MacroAssembler &masm)
|
||||
if (isSpread_) {
|
||||
masm.loadValue(Address(BaselineStackReg, 2 * sizeof(Value) + STUB_FRAME_SIZE), R0);
|
||||
} else {
|
||||
BaseIndex calleeSlot3(BaselineStackReg, argcReg, TimesEight,
|
||||
sizeof(Value) + STUB_FRAME_SIZE);
|
||||
BaseValueIndex calleeSlot3(BaselineStackReg, argcReg, sizeof(Value) + STUB_FRAME_SIZE);
|
||||
masm.loadValue(calleeSlot3, R0);
|
||||
}
|
||||
callee = masm.extractObject(R0, ExtractTemp0);
|
||||
@ -9664,8 +9658,8 @@ ICCallScriptedCompiler::generateStubCode(MacroAssembler &masm)
|
||||
masm.add32(Imm32(1), scratchReg);
|
||||
else
|
||||
masm.lshiftPtr(Imm32(1), scratchReg);
|
||||
BaseIndex reloadThisSlot(BaselineStackReg, scratchReg, TimesEight,
|
||||
STUB_FRAME_SIZE + sizeof(Value) + 3*sizeof(size_t));
|
||||
BaseValueIndex reloadThisSlot(BaselineStackReg, scratchReg,
|
||||
STUB_FRAME_SIZE + sizeof(Value) + 3 * sizeof(size_t));
|
||||
masm.loadValue(reloadThisSlot, JSReturnOperand);
|
||||
#ifdef DEBUG
|
||||
masm.branchTestObject(Assembler::Equal, JSReturnOperand, &skipThisReplace);
|
||||
@ -9843,7 +9837,7 @@ ICCall_Native::Compiler::generateStubCode(MacroAssembler &masm)
|
||||
if (isSpread_) {
|
||||
masm.loadValue(Address(BaselineStackReg, ICStackValueOffset + 2 * sizeof(Value)), R1);
|
||||
} else {
|
||||
BaseIndex calleeSlot(BaselineStackReg, argcReg, TimesEight, ICStackValueOffset + sizeof(Value));
|
||||
BaseValueIndex calleeSlot(BaselineStackReg, argcReg, ICStackValueOffset + sizeof(Value));
|
||||
masm.loadValue(calleeSlot, R1);
|
||||
}
|
||||
regs.take(R1);
|
||||
@ -9945,7 +9939,7 @@ ICCall_ClassHook::Compiler::generateStubCode(MacroAssembler &masm)
|
||||
regs.takeUnchecked(BaselineTailCallReg);
|
||||
|
||||
// Load the callee in R1.
|
||||
BaseIndex calleeSlot(BaselineStackReg, argcReg, TimesEight, ICStackValueOffset + sizeof(Value));
|
||||
BaseValueIndex calleeSlot(BaselineStackReg, argcReg, ICStackValueOffset + sizeof(Value));
|
||||
masm.loadValue(calleeSlot, R1);
|
||||
regs.take(R1);
|
||||
|
||||
@ -10249,7 +10243,7 @@ ICCall_ScriptedFunCall::Compiler::generateStubCode(MacroAssembler &masm)
|
||||
|
||||
// Load the callee in R1.
|
||||
// Stack Layout: [ ..., CalleeVal, ThisVal, Arg0Val, ..., ArgNVal, +ICStackValueOffset+ ]
|
||||
BaseIndex calleeSlot(BaselineStackReg, argcReg, TimesEight, ICStackValueOffset + sizeof(Value));
|
||||
BaseValueIndex calleeSlot(BaselineStackReg, argcReg, ICStackValueOffset + sizeof(Value));
|
||||
masm.loadValue(calleeSlot, R1);
|
||||
regs.take(R1);
|
||||
|
||||
|
@ -3278,9 +3278,9 @@ CodeGenerator::emitPushArguments(LApplyArgsGeneric *apply, Register extraStackSp
|
||||
Label loop;
|
||||
masm.bind(&loop);
|
||||
|
||||
// We remove sizeof(void*) from argvOffset because withtout it we target
|
||||
// We remove sizeof(void*) from argvOffset because without it we target
|
||||
// the address after the memory area that we want to copy.
|
||||
BaseIndex disp(StackPointer, argcreg, ScaleFromElemWidth(sizeof(Value)), argvOffset - sizeof(void*));
|
||||
BaseValueIndex disp(StackPointer, argcreg, argvOffset - sizeof(void*));
|
||||
|
||||
// Do not use Push here because other this account to 1 in the framePushed
|
||||
// instead of 0. These push are only counted by argcreg.
|
||||
@ -3298,7 +3298,8 @@ CodeGenerator::emitPushArguments(LApplyArgsGeneric *apply, Register extraStackSp
|
||||
|
||||
// Compute the stack usage.
|
||||
masm.movePtr(argcreg, extraStackSpace);
|
||||
masm.lshiftPtr(Imm32::ShiftOf(ScaleFromElemWidth(sizeof(Value))), extraStackSpace);
|
||||
NativeObject::elementsSizeMustNotOverflow();
|
||||
masm.lshiftPtr(Imm32(ValueShift), extraStackSpace);
|
||||
|
||||
// Join with all arguments copied and the extra stack usage computed.
|
||||
masm.bind(&end);
|
||||
@ -7238,7 +7239,7 @@ CodeGenerator::visitGetFrameArgument(LGetFrameArgument *lir)
|
||||
masm.loadValue(argPtr, result);
|
||||
} else {
|
||||
Register i = ToRegister(index);
|
||||
BaseIndex argPtr(StackPointer, i, ScaleFromElemWidth(sizeof(Value)), argvOffset);
|
||||
BaseValueIndex argPtr(StackPointer, i, argvOffset);
|
||||
masm.loadValue(argPtr, result);
|
||||
}
|
||||
return true;
|
||||
@ -8847,10 +8848,12 @@ CodeGenerator::visitLoadElementV(LLoadElementV *load)
|
||||
Register elements = ToRegister(load->elements());
|
||||
const ValueOperand out = ToOutValue(load);
|
||||
|
||||
if (load->index()->isConstant())
|
||||
if (load->index()->isConstant()) {
|
||||
NativeObject::elementsSizeMustNotOverflow();
|
||||
masm.loadValue(Address(elements, ToInt32(load->index()) * sizeof(Value)), out);
|
||||
else
|
||||
masm.loadValue(BaseIndex(elements, ToRegister(load->index()), TimesEight), out);
|
||||
} else {
|
||||
masm.loadValue(BaseObjectElementIndex(elements, ToRegister(load->index())), out);
|
||||
}
|
||||
|
||||
if (load->mir()->needsHoleCheck()) {
|
||||
Label testMagic;
|
||||
@ -8876,10 +8879,11 @@ CodeGenerator::visitLoadElementHole(LLoadElementHole *lir)
|
||||
Label undefined, done;
|
||||
if (lir->index()->isConstant()) {
|
||||
masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(ToInt32(lir->index())), &undefined);
|
||||
NativeObject::elementsSizeMustNotOverflow();
|
||||
masm.loadValue(Address(elements, ToInt32(lir->index()) * sizeof(Value)), out);
|
||||
} else {
|
||||
masm.branch32(Assembler::BelowOrEqual, initLength, ToRegister(lir->index()), &undefined);
|
||||
masm.loadValue(BaseIndex(elements, ToRegister(lir->index()), TimesEight), out);
|
||||
masm.loadValue(BaseObjectElementIndex(elements, ToRegister(lir->index())), out);
|
||||
}
|
||||
|
||||
// If a hole check is needed, and the value wasn't a hole, we're done.
|
||||
@ -9241,6 +9245,7 @@ CodeGenerator::visitInArray(LInArray *lir)
|
||||
|
||||
masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index), failedInitLength);
|
||||
if (mir->needsHoleCheck()) {
|
||||
NativeObject::elementsSizeMustNotOverflow();
|
||||
Address address = Address(elements, index * sizeof(Value));
|
||||
masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
|
||||
}
|
||||
|
@ -659,6 +659,7 @@ EmitLoadSlot(MacroAssembler &masm, NativeObject *holder, Shape *shape, Register
|
||||
TypedOrValueRegister output, Register scratchReg)
|
||||
{
|
||||
MOZ_ASSERT(holder);
|
||||
NativeObject::slotsSizeMustNotOverflow();
|
||||
if (holder->isFixedSlot(shape->slot())) {
|
||||
Address addr(holderReg, NativeObject::getFixedSlotOffset(shape->slot()));
|
||||
masm.loadTypedOrValue(addr, output);
|
||||
@ -2006,6 +2007,7 @@ GenerateSetSlot(JSContext *cx, MacroAssembler &masm, IonCache::StubAttacher &att
|
||||
}
|
||||
}
|
||||
|
||||
NativeObject::slotsSizeMustNotOverflow();
|
||||
if (obj->isFixedSlot(shape->slot())) {
|
||||
Address addr(object, NativeObject::getFixedSlotOffset(shape->slot()));
|
||||
|
||||
@ -2612,6 +2614,7 @@ GenerateAddSlot(JSContext *cx, MacroAssembler &masm, IonCache::StubAttacher &att
|
||||
|
||||
// Set the value on the object. Since this is an add, obj->lastProperty()
|
||||
// must be the shape of the property we are adding.
|
||||
NativeObject::slotsSizeMustNotOverflow();
|
||||
if (obj->isFixedSlot(newShape->slot())) {
|
||||
Address addr(object, NativeObject::getFixedSlotOffset(newShape->slot()));
|
||||
masm.storeConstantOrRegister(value, addr);
|
||||
@ -3182,7 +3185,7 @@ GenerateDenseElement(JSContext *cx, MacroAssembler &masm, IonCache::StubAttacher
|
||||
masm.branch32(Assembler::BelowOrEqual, initLength, indexReg, &hole);
|
||||
|
||||
// Check for holes & load the value.
|
||||
masm.loadElementTypedOrValue(BaseIndex(object, indexReg, TimesEight),
|
||||
masm.loadElementTypedOrValue(BaseObjectElementIndex(object, indexReg),
|
||||
output, true, &hole);
|
||||
|
||||
masm.pop(object);
|
||||
@ -3426,7 +3429,7 @@ GetElementIC::attachArgumentsElement(JSContext *cx, HandleScript outerScript, Io
|
||||
|
||||
// Restore original index register value, to use for indexing element.
|
||||
masm.pop(indexReg);
|
||||
BaseIndex elemIdx(tmpReg, indexReg, ScaleFromElemWidth(sizeof(Value)));
|
||||
BaseValueIndex elemIdx(tmpReg, indexReg);
|
||||
|
||||
// Ensure result is not magic value, and type-check result.
|
||||
masm.branchTestMagic(Assembler::Equal, elemIdx, &failureRestoreIndex);
|
||||
@ -3600,7 +3603,7 @@ IsTypedArrayElementSetInlineable(JSObject *obj, const Value &idval, const Value
|
||||
|
||||
static void
|
||||
StoreDenseElement(MacroAssembler &masm, ConstantOrRegister value, Register elements,
|
||||
BaseIndex target)
|
||||
BaseObjectElementIndex target)
|
||||
{
|
||||
// If the ObjectElements::CONVERT_DOUBLE_ELEMENTS flag is set, int32 values
|
||||
// have to be converted to double first. If the value is not int32, it can
|
||||
@ -3684,7 +3687,7 @@ GenerateSetDenseElement(JSContext *cx, MacroAssembler &masm, IonCache::StubAttac
|
||||
masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
|
||||
|
||||
// Compute the location of the element.
|
||||
BaseIndex target(elements, index, TimesEight);
|
||||
BaseObjectElementIndex target(elements, index);
|
||||
|
||||
// If TI cannot help us deal with HOLES by preventing indexed properties
|
||||
// on the prototype chain, we have to be very careful to check for ourselves
|
||||
|
@ -38,6 +38,11 @@ enum Scale {
|
||||
TimesEight = 3
|
||||
};
|
||||
|
||||
static_assert(sizeof(JS::Value) == 8,
|
||||
"required for TimesEight and 3 below to be correct");
|
||||
static const Scale ValueScale = TimesEight;
|
||||
static const size_t ValueShift = 3;
|
||||
|
||||
static inline unsigned
|
||||
ScaleToShift(Scale scale)
|
||||
{
|
||||
@ -309,6 +314,40 @@ struct BaseIndex
|
||||
BaseIndex() { mozilla::PodZero(this); }
|
||||
};
|
||||
|
||||
// A BaseIndex used to access Values. Note that |offset| is *not* scaled by
|
||||
// sizeof(Value). Use this *only* if you're indexing into a series of Values
|
||||
// that aren't object elements or object slots (for example, values on the
|
||||
// stack, values in an arguments object, &c.). If you're indexing into an
|
||||
// object's elements or slots, don't use this directly! Use
|
||||
// BaseObject{Element,Slot}Index instead.
|
||||
struct BaseValueIndex : BaseIndex
|
||||
{
|
||||
BaseValueIndex(Register base, Register index, int32_t offset = 0)
|
||||
: BaseIndex(base, index, ValueScale, offset)
|
||||
{ }
|
||||
};
|
||||
|
||||
// Specifies the address of an indexed Value within object elements from a
|
||||
// base. The index must not already be scaled by sizeof(Value)!
|
||||
struct BaseObjectElementIndex : BaseValueIndex
|
||||
{
|
||||
BaseObjectElementIndex(Register base, Register index)
|
||||
: BaseValueIndex(base, index)
|
||||
{
|
||||
NativeObject::elementsSizeMustNotOverflow();
|
||||
}
|
||||
};
|
||||
|
||||
// Like BaseObjectElementIndex, except for object slots.
|
||||
struct BaseObjectSlotIndex : BaseValueIndex
|
||||
{
|
||||
BaseObjectSlotIndex(Register base, Register index)
|
||||
: BaseValueIndex(base, index)
|
||||
{
|
||||
NativeObject::slotsSizeMustNotOverflow();
|
||||
}
|
||||
};
|
||||
|
||||
class Relocation {
|
||||
public:
|
||||
enum Kind {
|
||||
|
@ -26,9 +26,6 @@ using JS::GenericNaN;
|
||||
using mozilla::DebugOnly;
|
||||
using mozilla::RoundUpPow2;
|
||||
|
||||
JS_STATIC_ASSERT(int32_t((NativeObject::NELEMENTS_LIMIT - 1) * sizeof(Value)) ==
|
||||
int64_t((NativeObject::NELEMENTS_LIMIT - 1) * sizeof(Value)));
|
||||
|
||||
PropDesc::PropDesc()
|
||||
{
|
||||
setUndefined();
|
||||
@ -461,6 +458,7 @@ NativeObject::growSlots(ThreadSafeContext *cx, HandleNativeObject obj, uint32_t
|
||||
* the limited number of bits to store shape slots, object growth is
|
||||
* throttled well before the slot capacity can overflow.
|
||||
*/
|
||||
NativeObject::slotsSizeMustNotOverflow();
|
||||
MOZ_ASSERT(newCount < NELEMENTS_LIMIT);
|
||||
|
||||
if (!oldCount) {
|
||||
|
@ -550,6 +550,14 @@ class NativeObject : public JSObject
|
||||
bool shadowingShapeChange(ExclusiveContext *cx, const Shape &shape);
|
||||
bool clearFlag(ExclusiveContext *cx, BaseShape::Flag flag);
|
||||
|
||||
static void slotsSizeMustNotOverflow() {
|
||||
static_assert((NativeObject::NELEMENTS_LIMIT - 1) <= INT32_MAX / sizeof(JS::Value),
|
||||
"every caller of this method requires that a slot "
|
||||
"number (or slot count) count multiplied by "
|
||||
"sizeof(Value) can't overflow uint32_t (and sometimes "
|
||||
"int32_t, too)");
|
||||
}
|
||||
|
||||
uint32_t numFixedSlots() const {
|
||||
return reinterpret_cast<const shadow::Object *>(this)->numFixedSlots();
|
||||
}
|
||||
@ -892,6 +900,13 @@ class NativeObject : public JSObject
|
||||
/* Upper bound on the number of elements in an object. */
|
||||
static const uint32_t NELEMENTS_LIMIT = JS_BIT(28);
|
||||
|
||||
static void elementsSizeMustNotOverflow() {
|
||||
static_assert((NativeObject::NELEMENTS_LIMIT - 1) <= INT32_MAX / sizeof(JS::Value),
|
||||
"every caller of this method require that an element "
|
||||
"count multiplied by sizeof(Value) can't overflow "
|
||||
"uint32_t (and sometimes int32_t ,too)");
|
||||
}
|
||||
|
||||
ObjectElements * getElementsHeader() const {
|
||||
return ObjectElements::fromElements(elements_);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user