Implement typed arrays on ARM (bug 740733, r=jbramley,dvander)

This commit is contained in:
Marty Rosenberg 2012-04-06 17:10:46 -07:00
parent e8fdddb988
commit aa697837d1
8 changed files with 289 additions and 123 deletions

View File

@ -531,7 +531,7 @@ CodeGenerator::visitCallGeneric(LCallGeneric *call)
// This is equivalent to testing if any of the bits in JSFUN_KINDMASK are set.
if (!call->hasSingleTarget()) {
Address flags(calleereg, offsetof(JSFunction, flags));
masm.load16_mask(flags, Imm32(JSFUN_KINDMASK), nargsreg);
masm.load16ZeroExtend_mask(flags, Imm32(JSFUN_KINDMASK), nargsreg);
masm.branch32(Assembler::LessThan, nargsreg, Imm32(JSFUN_INTERPRETED), &invoke);
} else {
// Native single targets are handled by LCallNative.
@ -561,7 +561,7 @@ CodeGenerator::visitCallGeneric(LCallGeneric *call)
JS_ASSERT(call->getSingleTarget()->nargs <= call->nargs());
} else {
// Check whether the provided arguments satisfy target argc.
masm.load16(Address(calleereg, offsetof(JSFunction, nargs)), nargsreg);
masm.load16ZeroExtend(Address(calleereg, offsetof(JSFunction, nargs)), nargsreg);
masm.cmp32(nargsreg, Imm32(call->nargs()));
masm.j(Assembler::Above, &thunk);
}
@ -1316,7 +1316,7 @@ CodeGenerator::visitCharCodeAt(LCharCodeAt *lir)
// getChars
Address charsAddr(str, JSString::offsetOfChars());
masm.loadPtr(charsAddr, output);
masm.load16(BaseIndex(output, index, TimesTwo, 0), output);
masm.load16ZeroExtend(BaseIndex(output, index, TimesTwo, 0), output);
return true;
}

View File

@ -3524,11 +3524,9 @@ IonBuilder::jsop_getelem()
if (oracle->elementReadIsDenseArray(script, pc))
return jsop_getelem_dense();
#ifndef JS_CPU_ARM
int arrayType = TypedArray::TYPE_MAX;
if (oracle->elementReadIsTypedArray(script, pc, &arrayType))
return jsop_getelem_typed(arrayType);
#endif
MDefinition *rhs = current->pop();
MDefinition *lhs = current->pop();

View File

@ -208,22 +208,19 @@ void
MacroAssembler::loadFromTypedArray(int arrayType, const T &src, AnyRegister dest, Register temp,
Label *fail)
{
#ifdef JS_CPU_ARM
JS_NOT_REACHED("NYI typed arrays ARM");
#else
switch (arrayType) {
case TypedArray::TYPE_INT8:
load8SignExtend(src, dest.gpr());
break;
case TypedArray::TYPE_UINT8:
case TypedArray::TYPE_UINT8_CLAMPED:
load8(src, dest.gpr());
load8ZeroExtend(src, dest.gpr());
break;
case TypedArray::TYPE_INT16:
load16SignExtend(src, dest.gpr());
break;
case TypedArray::TYPE_UINT16:
load16(src, dest.gpr());
load16ZeroExtend(src, dest.gpr());
break;
case TypedArray::TYPE_INT32:
load32(src, dest.gpr());
@ -242,7 +239,7 @@ MacroAssembler::loadFromTypedArray(int arrayType, const T &src, AnyRegister dest
case TypedArray::TYPE_FLOAT64:
{
if (arrayType == js::TypedArray::TYPE_FLOAT32)
loadFloat(src, dest.fpu());
loadFloatAsDouble(src, dest.fpu());
else
loadDouble(src, dest.fpu());
@ -259,7 +256,6 @@ MacroAssembler::loadFromTypedArray(int arrayType, const T &src, AnyRegister dest
JS_NOT_REACHED("Invalid typed array type");
break;
}
#endif
}
template void MacroAssembler::loadFromTypedArray(int arrayType, const Address &src, AnyRegister dest,
@ -272,9 +268,6 @@ void
MacroAssembler::loadFromTypedArray(int arrayType, const T &src, const ValueOperand &dest,
bool allowDouble, Label *fail)
{
#ifdef JS_CPU_ARM
JS_NOT_REACHED("NYI typed arrays ARM");
#else
switch (arrayType) {
case TypedArray::TYPE_INT8:
case TypedArray::TYPE_UINT8:
@ -318,7 +311,6 @@ MacroAssembler::loadFromTypedArray(int arrayType, const T &src, const ValueOpera
JS_NOT_REACHED("Invalid typed array type");
break;
}
#endif
}
template void MacroAssembler::loadFromTypedArray(int arrayType, const Address &src, const ValueOperand &dest,

View File

@ -1377,6 +1377,8 @@ Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
case 8:
JS_ASSERT(IsSigned);
JS_ASSERT(ls!=IsStore);
extra_bits1 = 0x1;
extra_bits2 = 0x2;
break;
case 16:
//case 32:

View File

@ -66,6 +66,16 @@ MacroAssemblerARM::convertInt32ToDouble(const Register &src, const FloatRegister
as_vcvt(dest, dest.sintOverlay());
}
void
MacroAssemblerARM::convertUInt32ToDouble(const Register &src, const FloatRegister &dest_)
{
// direct conversions aren't possible.
VFPRegister dest = VFPRegister(dest_);
as_vxfer(src, InvalidReg, dest.uintOverlay(),
CoreToFloat);
as_vcvt(dest, dest.uintOverlay());
}
// there are two options for implementing emitTruncateDouble.
// 1) convert the floating point value to an integer, if it did not fit,
// then it was clamped to INT_MIN/INT_MAX, and we can test it.
@ -789,21 +799,7 @@ void
MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
Index mode, Assembler::Condition cc)
{
int off = offset.value;
if (off < 4096 && off > -4096) {
// simplest offset, just use an immediate
as_dtr(ls, 32, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc);
return;
}
// see if we can attempt to encode it as a standard imm8m offset
datastore::Imm8mData imm = Imm8::encodeImm(off & (~0xfff));
if (!imm.invalid) {
as_add(ScratchRegister, rn, imm);
as_dtr(ls, 32, mode, rt, DTRAddr(ScratchRegister, DtrOffImm(off & 0xfff)), cc);
} else {
ma_mov(offset, ScratchRegister);
as_dtr(ls, 32, mode, rt, DTRAddr(rn, DtrRegImmShift(ScratchRegister, LSL, 0)));
}
ma_dataTransferN(ls, 32, true, rn, offset, rt, mode, cc);
}
void
@ -822,47 +818,11 @@ MacroAssemblerARM::ma_str(Register rt, DTRAddr addr, Index mode, Condition cc)
void
MacroAssemblerARM::ma_dtr(LoadStore ls, Register rt, const Operand &addr, Index mode, Condition cc)
{
int off = addr.disp();
Register base = Register::FromCode(addr.base());
if (off > -4096 && off < 4096) {
as_dtr(ls, 32, mode, rt, addr.toDTRAddr(), cc);
return;
}
// We cannot encode this offset in a a single ldr. Try to encode it as
// an add scratch, base, imm; ldr dest, [scratch, +offset].
int bottom = off & 0xfff;
int neg_bottom = 0x1000 - bottom;
// at this point, both off - bottom and off + neg_bottom will be reasonable-ish
// quantities.
if (off < 0) {
Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off
if (!sub_off.invalid) {
as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = off - bottom
as_dtr(ls, 32, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
return;
}
sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off
if (!sub_off.invalid) {
as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off
as_dtr(ls, 32, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
return;
}
} else {
Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom
if (!sub_off.invalid) {
as_add(ScratchRegister, base, sub_off, NoSetCond, cc); // sub_off = off - bottom
as_dtr(ls, 32, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
return;
}
sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off
if (!sub_off.invalid) {
as_add(ScratchRegister, base, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off
as_dtr(ls, 32, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
return;
}
}
JS_NOT_REACHED("TODO: implement bigger offsets :(");
ma_dataTransferN(ls, 32, true,
Register::FromCode(addr.base()), Imm32(addr.disp()),
rt, mode, cc);
}
void
MacroAssemblerARM::ma_str(Register rt, const Operand &addr, Index mode, Condition cc)
{
@ -928,24 +888,98 @@ MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
void
MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
Register rn, Imm32 offset, Register rt,
Index mode, Assembler::Condition cc)
Register rn, Imm32 offset, Register rt,
Index mode, Assembler::Condition cc)
{
int x = offset.value;
int off = offset.value;
// we can encode this as a standard ldr... MAKE IT SO
if (size == 32 || (size == 8 && !IsSigned) ) {
if (x < 4096 && x > -4096) {
as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(x)), cc);
} else {
JS_NOT_REACHED("Feature NYI");
if (off < 4096 && off > -4096) {
as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc);
return;
}
// We cannot encode this offset in a a single ldr. Try to encode it as
// an add scratch, base, imm; ldr dest, [scratch, +offset].
int bottom = off & 0xfff;
int neg_bottom = 0x1000 - bottom;
// at this point, both off - bottom and off + neg_bottom will be reasonable-ish
// quantities.
if (off < 0) {
Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off
if (!sub_off.invalid) {
as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom
as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
return;
}
sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off
if (!sub_off.invalid) {
as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off
as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
return;
}
} else {
Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom
if (!sub_off.invalid) {
as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = off - bottom
as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
return;
}
sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off
if (!sub_off.invalid) {
as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off
as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
return;
}
}
JS_NOT_REACHED("TODO: implement bigger offsets :(");
} else {
// should attempt to use the extended load/store instructions
if (x < 256 && x > -256) {
as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffImm(x)), cc);
} else {
JS_NOT_REACHED("Feature NYI");
if (off < 256 && off > -256) {
as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffImm(off)), cc);
}
// We cannot encode this offset in a a single extldr. Try to encode it as
// an add scratch, base, imm; extldr dest, [scratch, +offset].
int bottom = off & 0xff;
int neg_bottom = 0x100 - bottom;
// at this point, both off - bottom and off + neg_bottom will be reasonable-ish
// quantities.
if (off < 0) {
Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off
if (!sub_off.invalid) {
as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom
as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(ScratchRegister, EDtrOffImm(bottom)),
cc);
return;
}
sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off
if (!sub_off.invalid) {
as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off
as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)),
cc);
return;
}
} else {
Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom
if (!sub_off.invalid) {
as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = off - bottom
as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(ScratchRegister, EDtrOffImm(bottom)),
cc);
return;
}
sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off
if (!sub_off.invalid) {
as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off
as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)),
cc);
return;
}
}
JS_NOT_REACHED("TODO: implement bigger offsets :(");
}
}
void
@ -1139,7 +1173,7 @@ MacroAssemblerARM::ma_vxfer(VFPRegister src, Register dest1, Register dest2)
}
void
MacroAssemblerARM::ma_vdtr(LoadStore ls, const Operand &addr, FloatRegister rt, Condition cc)
MacroAssemblerARM::ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister rt, Condition cc)
{
int off = addr.disp();
JS_ASSERT((off & 3) == 0);
@ -1339,22 +1373,94 @@ MacroAssemblerARMCompat::movePtr(const Address &src, const Register &dest)
{
loadPtr(src, dest);
}
void
MacroAssemblerARMCompat::load8ZeroExtend(const Address &address, const Register &dest)
{
ma_dataTransferN(IsLoad, 8, false, address.base, Imm32(address.offset), dest);
}
void
MacroAssemblerARMCompat::load16(const Address &address, const Register &dest)
MacroAssemblerARMCompat::load8ZeroExtend(const BaseIndex &src, const Register &dest)
{
Register base = src.base;
uint32 scale = Imm32::ShiftOf(src.scale).value;
if (src.offset != 0) {
ma_mov(base, ScratchRegister);
base = ScratchRegister;
ma_add(base, Imm32(src.offset), base);
}
ma_ldrb(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest);
}
void
MacroAssemblerARMCompat::load8SignExtend(const Address &address, const Register &dest)
{
ma_dataTransferN(IsLoad, 8, true, address.base, Imm32(address.offset), dest);
}
void
MacroAssemblerARMCompat::load8SignExtend(const BaseIndex &src, const Register &dest)
{
Register index = src.index;
// ARMv7 does not have LSL on an index register with an extended load.
if (src.scale != TimesOne) {
ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister);
index = ScratchRegister;
}
if (src.offset != 0) {
if (index != ScratchRegister) {
ma_mov(index, ScratchRegister);
index = ScratchRegister;
}
ma_add(Imm32(src.offset), index);
}
ma_ldrsb(EDtrAddr(src.base, EDtrOffReg(index)), dest);
}
void
MacroAssemblerARMCompat::load16ZeroExtend(const Address &address, const Register &dest)
{
ma_dataTransferN(IsLoad, 16, false, address.base, Imm32(address.offset), dest);
}
void
MacroAssemblerARMCompat::load16_mask(const Address &address, Imm32 mask, const Register &dest)
MacroAssemblerARMCompat::load16ZeroExtend_mask(const Address &address, Imm32 mask, const Register &dest)
{
load16(address, dest);
load16ZeroExtend(address, dest);
ma_and(mask, dest, dest);
}
void
MacroAssemblerARMCompat::load16(const BaseIndex &src, const Register &dest)
MacroAssemblerARMCompat::load16ZeroExtend(const BaseIndex &src, const Register &dest)
{
Register index = src.index;
// ARMv7 does not have LSL on an index register with an extended load.
if (src.scale != TimesOne) {
ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister);
index = ScratchRegister;
}
if (src.offset != 0) {
if (index != ScratchRegister) {
ma_mov(index, ScratchRegister);
index = ScratchRegister;
}
ma_add(Imm32(src.offset), index);
}
ma_ldrh(EDtrAddr(src.base, EDtrOffReg(index)), dest);
}
void
MacroAssemblerARMCompat::load16SignExtend(const Address &address, const Register &dest)
{
ma_dataTransferN(IsLoad, 16, true, address.base, Imm32(address.offset), dest);
}
void
MacroAssemblerARMCompat::load16SignExtend(const BaseIndex &src, const Register &dest)
{
Register index = src.index;
@ -1370,7 +1476,7 @@ MacroAssemblerARMCompat::load16(const BaseIndex &src, const Register &dest)
}
ma_add(Imm32(src.offset), index);
}
ma_ldrh(EDtrAddr(src.base, EDtrOffReg(index)), dest);
ma_ldrsh(EDtrAddr(src.base, EDtrOffReg(index)), dest);
}
void
@ -1390,7 +1496,6 @@ MacroAssemblerARMCompat::load32(const AbsoluteAddress &address, const Register &
{
loadPtr(address, dest);
}
void
MacroAssemblerARMCompat::loadPtr(const Address &address, const Register &dest)
{
@ -1430,6 +1535,50 @@ MacroAssemblerARMCompat::loadPrivate(const Address &address, const Register &des
ma_ldr(payloadOf(address), dest);
}
void
MacroAssemblerARMCompat::loadDouble(const Address &address, const FloatRegister &dest)
{
ma_vldr(Operand(address), dest);
}
void
MacroAssemblerARMCompat::loadDouble(const BaseIndex &src, const FloatRegister &dest)
{
// VFP instructions don't even support register Base + register Index modes, so
// just add the index, then handle the offset like normal
Register base = src.base;
Register index = src.index;
uint32 scale = Imm32::ShiftOf(src.scale).value;
int32 offset = src.offset;
as_add(ScratchRegister, base, lsl(index, scale));
ma_vldr(Operand(ScratchRegister, offset), dest);
}
void
MacroAssemblerARMCompat::loadFloatAsDouble(const Address &address, const FloatRegister &dest)
{
VFPRegister rt = dest;
ma_vdtr(IsLoad, address, rt.singleOverlay());
as_vcvt(rt, rt.singleOverlay());
}
void
MacroAssemblerARMCompat::loadFloatAsDouble(const BaseIndex &src, const FloatRegister &dest)
{
// VFP instructions don't even support register Base + register Index modes, so
// just add the index, then handle the offset like normal
Register base = src.base;
Register index = src.index;
uint32 scale = Imm32::ShiftOf(src.scale).value;
int32 offset = src.offset;
VFPRegister rt = dest;
as_add(ScratchRegister, base, lsl(index, scale));
ma_vdtr(IsLoad, Operand(ScratchRegister, offset), rt.singleOverlay());
as_vcvt(rt, rt.singleOverlay());
}
void
MacroAssemblerARMCompat::store16(const Register &src, const Address &address)
{
@ -1828,6 +1977,14 @@ MacroAssemblerARMCompat::unboxValue(const ValueOperand &src, AnyRegister dest)
}
}
void
MacroAssemblerARMCompat::boxDouble(const FloatRegister &src, const ValueOperand &dest)
{
as_vxfer(dest.payloadReg(), dest.typeReg(),
VFPRegister(src), FloatToCore);
}
void
MacroAssemblerARMCompat::boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest)
{
@ -1893,18 +2050,16 @@ MacroAssemblerARMCompat::loadInt32OrDouble(Register base, Register index, const
bind(&end);
}
// This functon almost certainly should not be called.
// It has two uses in indep code, loading NaN, and loading 0.0
// loading 1.0, then subtracting from itself will almost certainly be faster.
void
MacroAssemblerARMCompat::loadConstantDouble(double dp, const FloatRegister &dest)
{
as_FImm64Pool(dest, dp);
}
void
MacroAssemblerARMCompat::loadStaticDouble(const double *dp, const FloatRegister &dest)
{
ma_mov(Imm32((uint32)dp), ScratchRegister);
as_vdtr(IsLoad, dest, VFPAddr(ScratchRegister, VFPOffImm(0)));
#if 0
_vldr()
movsd(dp, dest);
#endif
loadConstantDouble(*dp, dest);
}
// treat the value as a boolean, and set condition codes accordingly

View File

@ -59,7 +59,7 @@ class MacroAssemblerARM : public Assembler
{
public:
void convertInt32ToDouble(const Register &src, const FloatRegister &dest);
void convertUInt32ToDouble(const Register &src, const FloatRegister &dest);
void branchTruncateDouble(const FloatRegister &src, const Register &dest, Label *fail);
// somewhat direct wrappers for the low-level assembler funcitons
@ -308,7 +308,7 @@ class MacroAssemblerARM : public Assembler
void ma_vxfer(VFPRegister src, Register dest);
void ma_vxfer(VFPRegister src, Register dest1, Register dest2);
void ma_vdtr(LoadStore ls, const Operand &addr, FloatRegister dest, Condition cc = Always);
void ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister dest, Condition cc = Always);
void ma_vldr(VFPAddr addr, FloatRegister dest);
void ma_vldr(const Operand &addr, FloatRegister dest);
@ -544,6 +544,9 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void unboxDouble(const ValueOperand &operand, const FloatRegister &dest);
void unboxValue(const ValueOperand &src, AnyRegister dest);
// boxing code
void boxDouble(const FloatRegister &src, const ValueOperand &dest);
// Extended unboxing API. If the payload is already in a register, returns
// that register. Otherwise, provides a move to the given scratch register,
// and returns that.
@ -563,6 +566,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void loadInt32OrDouble(Register base, Register index,
const FloatRegister &dest, int32 shift = defaultShift);
void loadStaticDouble(const double *dp, const FloatRegister &dest);
void loadConstantDouble(double dp, const FloatRegister &dest);
// treat the value as a boolean, and set condition codes accordingly
Condition testInt32Truthy(bool truthy, const ValueOperand &operand);
Condition testBooleanTruthy(bool truthy, const ValueOperand &operand);
@ -809,18 +813,6 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_orr(Imm32(type), frameSizeReg);
}
void loadDouble(Address addr, FloatRegister dest) {
ma_vldr(Operand(addr), dest);
}
void storeDouble(FloatRegister src, Address addr) {
ma_vstr(src, Operand(addr));
}
void storeDouble(FloatRegister src, BaseIndex addr) {
// Harder cases not handled yet.
JS_ASSERT(addr.offset == 0);
ma_vstr(src, addr.base, addr.index);
}
void linkExitFrame();
void handleException();
@ -892,17 +884,36 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void movePtr(const ImmGCPtr &imm, const Register &dest);
void movePtr(const Address &src, const Register &dest);
void load16(const Address &address, const Register &dest);
void load16_mask(const Address &address, Imm32 mask, const Register &dest);
void load16(const BaseIndex &src, const Register &dest);
void load8SignExtend(const Address &address, const Register &dest);
void load8SignExtend(const BaseIndex &src, const Register &dest);
void load8ZeroExtend(const Address &address, const Register &dest);
void load8ZeroExtend(const BaseIndex &src, const Register &dest);
void load16SignExtend(const Address &address, const Register &dest);
void load16SignExtend(const BaseIndex &src, const Register &dest);
void load16ZeroExtend(const Address &address, const Register &dest);
void load16ZeroExtend_mask(const Address &address, Imm32 mask, const Register &dest);
void load16ZeroExtend(const BaseIndex &src, const Register &dest);
void load32(const Address &address, const Register &dest);
void load32(const BaseIndex &address, const Register &dest);
void load32(const AbsoluteAddress &address, const Register &dest);
void loadPtr(const Address &address, const Register &dest);
void loadPtr(const BaseIndex &src, const Register &dest);
void loadPtr(const AbsoluteAddress &address, const Register &dest);
void loadPrivate(const Address &address, const Register &dest);
void loadDouble(const Address &addr, const FloatRegister &dest);
void loadDouble(const BaseIndex &src, const FloatRegister &dest);
// Load a float value into a register, then expand it to a double.
void loadFloatAsDouble(const Address &addr, const FloatRegister &dest);
void loadFloatAsDouble(const BaseIndex &src, const FloatRegister &dest);
void store16(const Register &src, const Address &address);
void store32(Register src, const AbsoluteAddress &address);
void store32(Register src, const Address &address);
@ -911,6 +922,14 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void storePtr(ImmGCPtr imm, const Address &address);
void storePtr(Register src, const Address &address);
void storePtr(const Register &src, const AbsoluteAddress &dest);
void storeDouble(FloatRegister src, Address addr) {
ma_vstr(src, Operand(addr));
}
void storeDouble(FloatRegister src, BaseIndex addr) {
// Harder cases not handled yet.
JS_ASSERT(addr.offset == 0);
ma_vstr(src, addr.base, addr.index);
}
void clampIntToUint8(Register src, Register dest) {
JS_NOT_REACHED("NYI clampIntToUint8");

View File

@ -627,7 +627,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
for (LoadOffsets::Iterator iter = p->loadOffsets.begin();
iter != p->loadOffsets.end(); ++iter, ++idx)
{
JS_ASSERT(iter->getOffset() > perforation.getOffset());
JS_ASSERT(iter->getOffset() >= perforation.getOffset());
// Everything here is known, we can safely do the necessary substitutions
Inst * inst = this->getInst(*iter);
// Manually compute the offset, including a possible bias.

View File

@ -211,10 +211,10 @@ class MacroAssemblerX86Shared : public Assembler
cmpl(dest, Imm32(INT_MIN));
j(Assembler::Equal, fail);
}
void load8(const Address &src, const Register &dest) {
void load8ZeroExtend(const Address &src, const Register &dest) {
movzbl(Operand(src), dest);
}
void load8(const BaseIndex &src, const Register &dest) {
void load8ZeroExtend(const BaseIndex &src, const Register &dest) {
movzbl(Operand(src), dest);
}
void load8SignExtend(const Address &src, const Register &dest) {
@ -227,17 +227,17 @@ class MacroAssemblerX86Shared : public Assembler
void store8(const S &src, const T &dest) {
movb(src, Operand(dest));
}
void load16(const Address &src, const Register &dest) {
void load16ZeroExtend(const Address &src, const Register &dest) {
movzwl(Operand(src), dest);
}
void load16(const BaseIndex &src, const Register &dest) {
void load16ZeroExtend(const BaseIndex &src, const Register &dest) {
movzwl(Operand(src), dest);
}
template <typename S, typename T>
void store16(const S &src, const T &dest) {
movw(src, Operand(dest));
}
void load16_mask(const Address &src, Imm32 mask, const Register &dest) {
void load16ZeroExtend_mask(const Address &src, Imm32 mask, const Register &dest) {
load32(src, dest);
and32(mask, dest);
}
@ -278,11 +278,11 @@ class MacroAssemblerX86Shared : public Assembler
void convertDoubleToFloat(const FloatRegister &src, const FloatRegister &dest) {
cvtsd2ss(src, dest);
}
void loadFloat(const Address &src, FloatRegister dest) {
void loadFloatAsDouble(const Address &src, FloatRegister dest) {
movss(Operand(src), dest);
cvtss2sd(dest, dest);
}
void loadFloat(const BaseIndex &src, FloatRegister dest) {
void loadFloatAsDouble(const BaseIndex &src, FloatRegister dest) {
movss(Operand(src), dest);
cvtss2sd(dest, dest);
}