Bug 569441: Update Nitro assembler to SVN rev 60501 + our changes, no_r=me

This commit is contained in:
David Mandelin 2010-06-01 18:09:55 -07:00
parent 7bdb82c9d9
commit e7faa0e1c8
20 changed files with 753 additions and 156 deletions

View File

@ -25,7 +25,7 @@
*/
#include <wtf/Platform.h> // MOCO
#if ENABLE_ASSEMBLER && WTF_CPU_ARM_TRADITIONAL
#include "ARMAssembler.h"
@ -34,39 +34,6 @@ namespace JSC {
// Patching helpers
ARMWord* ARMAssembler::getLdrImmAddress(ARMWord* insn, uint32_t* constPool)
{
// Must be an ldr ..., [pc +/- imm]
ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
if (constPool && (*insn & 0x1))
return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1));
ARMWord addr = reinterpret_cast<ARMWord>(insn) + 2 * sizeof(ARMWord);
if (*insn & DT_UP)
return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK));
else
return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK));
}
void ARMAssembler::linkBranch(void* code, JmpSrc from, void* to, int useConstantPool)
{
ARMWord* insn = reinterpret_cast<ARMWord*>(code) + (from.m_offset / sizeof(ARMWord));
if (!useConstantPool) {
int diff = reinterpret_cast<ARMWord*>(to) - reinterpret_cast<ARMWord*>(insn + 2);
if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
*insn = B | getConditionalField(*insn) | (diff & BRANCH_MASK);
ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
return;
}
}
ARMWord* addr = getLdrImmAddress(insn);
*addr = reinterpret_cast<ARMWord>(to);
ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
}
void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
{
ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
@ -272,10 +239,8 @@ void ARMAssembler::moveImm(ARMWord imm, int dest)
ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
{
ARMWord tmp;
#if WTF_ARM_ARCH_VERSION >= 7
tmp = getImm16Op2(imm);
ARMWord tmp = getImm16Op2(imm);
if (tmp != INVALID_IMM) {
movw_r(dest, tmp);
return dest;
@ -297,28 +262,29 @@ ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
// Memory load/store helpers
void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset)
void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset, bool bytes)
{
ARMWord transferFlag = bytes ? DT_BYTE : 0;
if (offset >= 0) {
if (offset <= 0xfff)
dtr_u(isLoad, srcDst, base, offset);
dtr_u(isLoad, srcDst, base, offset | transferFlag);
else if (offset <= 0xfffff) {
add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
dtr_u(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff);
dtr_u(isLoad, srcDst, ARMRegisters::S0, (offset & 0xfff) | transferFlag);
} else {
ARMWord reg = getImm(offset, ARMRegisters::S0);
dtr_ur(isLoad, srcDst, base, reg);
dtr_ur(isLoad, srcDst, base, reg | transferFlag);
}
} else {
offset = -offset;
if (offset <= 0xfff)
dtr_d(isLoad, srcDst, base, offset);
dtr_d(isLoad, srcDst, base, offset | transferFlag);
else if (offset <= 0xfffff) {
sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
dtr_d(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff);
dtr_d(isLoad, srcDst, ARMRegisters::S0, (offset & 0xfff) | transferFlag);
} else {
ARMWord reg = getImm(offset, ARMRegisters::S0);
dtr_dr(isLoad, srcDst, base, reg);
dtr_dr(isLoad, srcDst, base, reg | transferFlag);
}
}
}
@ -390,10 +356,17 @@ void* ARMAssembler::executableCopy(ExecutablePool* allocator)
// The last bit is set if the constant must be placed on constant pool.
int pos = (*iter) & (~0x1);
ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + pos);
ARMWord offset = *getLdrImmAddress(ldrAddr);
if (offset != 0xffffffff) {
JmpSrc jmpSrc(pos);
linkBranch(data, jmpSrc, data + offset, ((*iter) & 1));
ARMWord* addr = getLdrImmAddress(ldrAddr);
if (*addr != InvalidBranchTarget) {
if (!(*iter & 1)) {
int diff = reinterpret_cast<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetching);
if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
*ldrAddr = B | getConditionalField(*ldrAddr) | (diff & BRANCH_MASK);
continue;
}
}
*addr = reinterpret_cast<ARMWord>(data + *addr);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2009 University of Szeged
* Copyright (C) 2009, 2010 University of Szeged
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -143,6 +143,7 @@ namespace JSC {
FSUBD = 0x0e300b40,
FMULD = 0x0e200b00,
FCMPD = 0x0eb40b40,
FSQRTD = 0x0eb10bc0,
DTR = 0x05000000,
LDRH = 0x00100090,
STRH = 0x00000090,
@ -151,22 +152,22 @@ namespace JSC {
FDTR = 0x0d000b00,
B = 0x0a000000,
BL = 0x0b000000,
#ifndef __ARM_ARCH_4__
BX = 0x012fff10, // Only on ARMv4T+!
#if WTF_ARM_VERSION >= 5 || defined(__ARM_ARCH_4T__)
BX = 0x012fff10,
#endif
FMSR = 0x0e000a10,
FMRS = 0x0e100a10,
FSITOD = 0x0eb80bc0,
FTOSID = 0x0ebd0b40,
FMSTAT = 0x0ef1fa10
FMSTAT = 0x0ef1fa10,
#if WTF_ARM_ARCH_VERSION >= 5
,CLZ = 0x016f0f10,
CLZ = 0x016f0f10,
BKPT = 0xe120070,
BLX_R = 0x012fff30
BLX = 0x012fff30,
#endif
#if WTF_ARM_ARCH_VERSION >= 7
,MOVW = 0x03000000,
MOVT = 0x03400000
MOVW = 0x03000000,
MOVT = 0x03400000,
#endif
};
@ -177,11 +178,12 @@ namespace JSC {
SET_CC = (1 << 20),
OP2_OFSREG = (1 << 25),
DT_UP = (1 << 23),
DT_BYTE = (1 << 22),
DT_WB = (1 << 21),
// This flag is inlcuded in LDR and STR
DT_PRE = (1 << 24),
HDT_UH = (1 << 5),
DT_LOAD = (1 << 20)
DT_LOAD = (1 << 20),
};
// Masks of ARM instructions
@ -189,19 +191,19 @@ namespace JSC {
BRANCH_MASK = 0x00ffffff,
NONARM = 0xf0000000,
SDT_MASK = 0x0c000000,
SDT_OFFSET_MASK = 0xfff
SDT_OFFSET_MASK = 0xfff,
};
enum {
BOFFSET_MIN = -0x00800000,
BOFFSET_MAX = 0x007fffff,
SDT = 0x04000000
SDT = 0x04000000,
};
enum {
padForAlign8 = 0x00,
padForAlign16 = 0x0000,
padForAlign32 = 0xee120070
padForAlign32 = 0xee120070,
};
typedef enum {
@ -212,6 +214,8 @@ namespace JSC {
} Shift;
static const ARMWord INVALID_IMM = 0xf0000000;
static const ARMWord InvalidBranchTarget = 0xffffffff;
static const int DefaultPrefetching = 2;
class JmpSrc {
friend class ARMAssembler;
@ -491,6 +495,12 @@ namespace JSC {
emitInst(static_cast<ARMWord>(cc) | FCMPD, dd, 0, dm);
}
void fsqrtd_r(int dd, int dm, Condition cc = AL)
{
FIXME_INSN_PRINTING;
emitInst(static_cast<ARMWord>(cc) | FSQRTD, dd, 0, dm);
}
void ldr_imm(int rd, ARMWord imm, Condition cc = AL)
{
char mnemonic[16];
@ -671,6 +681,30 @@ namespace JSC {
#endif
}
void bx(int rm, Condition cc = AL)
{
#if WTF_ARM_ARCH_VERSION >= 5 || defined(__ARM_ARCH_4T__)
emitInst(static_cast<ARMWord>(cc) | BX, 0, 0, RM(rm));
#else
mov_r(ARMRegisters::pc, RM(rm), cc);
#endif
}
JmpSrc blx(int rm, Condition cc = AL)
{
#if WTF_ARM_ARCH_AT_LEAST(5)
int s = m_buffer.uncheckedSize();
emitInst(static_cast<ARMWord>(cc) | BLX, 0, 0, RM(rm));
#else
ASSERT(rm != 14);
ensureSpace(2 * sizeof(ARMWord), 0);
mov_r(ARMRegisters::lr, ARMRegisters::pc, cc);
int s = m_buffer.uncheckedSize();
bx(rm, cc);
#endif
return JmpSrc(s);
}
// BX is emitted where possible, or an equivalent sequence on ARMv4.
void bx_r(int rm, Condition cc = AL)
{
@ -786,7 +820,7 @@ namespace JSC {
{
ensureSpace(sizeof(ARMWord), sizeof(ARMWord));
int s = m_buffer.uncheckedSize();
ldr_un_imm(rd, 0xffffffff, cc);
ldr_un_imm(rd, InvalidBranchTarget, cc);
m_jumps.append(s | (useConstantPool & 0x1));
return JmpSrc(s);
}
@ -800,15 +834,40 @@ namespace JSC {
// Patching helpers
static ARMWord* getLdrImmAddress(ARMWord* insn, uint32_t* constPool = 0);
static void linkBranch(void* code, JmpSrc from, void* to, int useConstantPool = 0);
static ARMWord* getLdrImmAddress(ARMWord* insn)
{
#if WTF_ARM_ARCH_AT_LEAST(5)
// Check for call
if ((*insn & 0x0f7f0000) != 0x051f0000) {
// Must be BLX
ASSERT((*insn & 0x012fff30) == 0x012fff30);
insn--;
}
#endif
// Must be an ldr ..., [pc +/- imm]
ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
ARMWord addr = reinterpret_cast<ARMWord>(insn) + DefaultPrefetching * sizeof(ARMWord);
if (*insn & DT_UP)
return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK));
return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK));
}
static ARMWord* getLdrImmAddressOnPool(ARMWord* insn, uint32_t* constPool)
{
// Must be an ldr ..., [pc +/- imm]
ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
if (*insn & 0x1)
return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1));
return getLdrImmAddress(insn);
}
static void patchPointerInternal(intptr_t from, void* to)
{
ARMWord* insn = reinterpret_cast<ARMWord*>(from);
ARMWord* addr = getLdrImmAddress(insn);
*addr = reinterpret_cast<ARMWord>(to);
ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
}
static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
@ -865,12 +924,13 @@ namespace JSC {
void linkJump(JmpSrc from, JmpDst to)
{
ARMWord* insn = reinterpret_cast<ARMWord*>(m_buffer.data()) + (from.m_offset / sizeof(ARMWord));
*getLdrImmAddress(insn, m_buffer.poolAddress()) = static_cast<ARMWord>(to.m_offset);
ARMWord* addr = getLdrImmAddressOnPool(insn, m_buffer.poolAddress());
*addr = static_cast<ARMWord>(to.m_offset);
}
static void linkJump(void* code, JmpSrc from, void* to)
{
linkBranch(code, from, to);
patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
}
static void relinkJump(void* from, void* to)
@ -880,12 +940,12 @@ namespace JSC {
static void linkCall(void* code, JmpSrc from, void* to)
{
linkBranch(code, from, to, true);
patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
}
static void relinkCall(void* from, void* to)
{
relinkJump(from, to);
patchPointerInternal(reinterpret_cast<intptr_t>(from) - sizeof(ARMWord), to);
}
// Address operations
@ -939,9 +999,18 @@ namespace JSC {
void moveImm(ARMWord imm, int dest);
ARMWord encodeComplexImm(ARMWord imm, int dest);
ARMWord getOffsetForHalfwordDataTransfer(ARMWord imm, int tmpReg)
{
// Encode immediate data in the instruction if it is possible
if (imm <= 0xff)
return getOp2Byte(imm);
// Otherwise, store the data in a temporary register
return encodeComplexImm(imm, tmpReg);
}
// Memory load/store helpers
void dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset);
void dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset, bool bytes = false);
void baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
void doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset);

View File

@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 Apple Inc. All rights reserved.
* Copyright (C) 2010 University of Szeged
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -201,10 +202,10 @@ class ARMThumbImmediate {
ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
{
if (value & ~((1<<N)-1)) /* check for any of the top N bits (of 2N bits) are set */ \
value >>= N; /* if any were set, lose the bottom N */ \
else /* if none of the top N bits are set, */ \
zeros += N; /* then we have identified N leading zeros */
if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
value >>= N; /* if any were set, lose the bottom N */
else /* if none of the top N bits are set, */
zeros += N; /* then we have identified N leading zeros */
}
static int32_t countLeadingZeros(uint32_t value)
@ -582,11 +583,13 @@ private:
OP_MOV_reg_T1 = 0x4600,
OP_BLX = 0x4700,
OP_BX = 0x4700,
OP_LDRH_reg_T1 = 0x5A00,
OP_STR_reg_T1 = 0x5000,
OP_LDR_reg_T1 = 0x5800,
OP_LDRH_reg_T1 = 0x5A00,
OP_LDRB_reg_T1 = 0x5C00,
OP_STR_imm_T1 = 0x6000,
OP_LDR_imm_T1 = 0x6800,
OP_LDRB_imm_T1 = 0x7800,
OP_LDRH_imm_T1 = 0x8800,
OP_STR_imm_T2 = 0x9000,
OP_LDR_imm_T2 = 0x9800,
@ -631,12 +634,15 @@ private:
OP_SUB_imm_T4 = 0xF2A0,
OP_MOVT = 0xF2C0,
OP_NOP_T2a = 0xF3AF,
OP_LDRB_imm_T3 = 0xF810,
OP_LDRB_reg_T2 = 0xF810,
OP_LDRH_reg_T2 = 0xF830,
OP_LDRH_imm_T3 = 0xF830,
OP_STR_imm_T4 = 0xF840,
OP_STR_reg_T2 = 0xF840,
OP_LDR_imm_T4 = 0xF850,
OP_LDR_reg_T2 = 0xF850,
OP_LDRB_imm_T2 = 0xF890,
OP_LDRH_imm_T2 = 0xF8B0,
OP_STR_imm_T3 = 0xF8C0,
OP_LDR_imm_T3 = 0xF8D0,
@ -1080,6 +1086,52 @@ public:
m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
}
void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
{
ASSERT(rn != ARMRegisters::pc); // LDR (literal)
ASSERT(imm.isUInt12());
if (!((rt | rn) & 8) && imm.isUInt5())
m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
else
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
}
void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
{
ASSERT(rt != ARMRegisters::pc);
ASSERT(rn != ARMRegisters::pc);
ASSERT(index || wback);
ASSERT(!wback | (rt != rn));
bool add = true;
if (offset < 0) {
add = false;
offset = -offset;
}
ASSERT(!(offset & ~0xff));
offset |= (wback << 8);
offset |= (add << 9);
offset |= (index << 10);
offset |= (1 << 11);
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
}
void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
{
ASSERT(rn != ARMRegisters::pc); // LDR (literal)
ASSERT(!BadReg(rm));
ASSERT(shift <= 3);
if (!shift && !((rt | rn | rm) & 8))
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
else
m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
}
void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
{
ASSERT(!BadReg(rd));
@ -1718,7 +1770,20 @@ private:
|| (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)) );
intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
if (((relative << 7) >> 7) == relative) {
// From Cortex-A8 errata:
// If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
// the target of the branch falls within the first region it is
// possible for the processor to incorrectly determine the branch
// instruction, and it is also possible in some cases for the processor
// to enter a deadlock state.
// The instruction is spanning two pages if it ends at an address ending 0x002
bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
// The target is in the first page if the jump branch back by [3..0x1002] bytes
bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
if (((relative << 7) >> 7) == relative && !wouldTriggerA8Errata) {
// ARM encoding for the top two bits below the sign bit is 'peculiar'.
if (relative >= 0)
relative ^= 0xC00000;

View File

@ -64,7 +64,7 @@ public:
TimesOne,
TimesTwo,
TimesFour,
TimesEight
TimesEight,
};
// Address:
@ -81,6 +81,17 @@ public:
int32_t offset;
};
struct ExtendedAddress {
explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
: base(base)
, offset(offset)
{
}
RegisterID base;
intptr_t offset;
};
// ImplicitAddress:
//
// This class is used for explicit 'load' and 'store' operations
@ -149,7 +160,7 @@ public:
// in a class requiring explicit construction in order to differentiate
// from pointers used as absolute addresses to memory operations
struct ImmPtr {
explicit ImmPtr(void* value)
explicit ImmPtr(const void* value)
: m_value(value)
{
}
@ -159,7 +170,7 @@ public:
return reinterpret_cast<intptr_t>(m_value);
}
void* m_value;
const void* m_value;
};
// Imm32:
@ -171,7 +182,7 @@ public:
struct Imm32 {
explicit Imm32(int32_t value)
: m_value(value)
#if WTF_CPU_ARM
#if WTF_CPU_ARM || WTF_CPU_MIPS
, m_isPointer(false)
#endif
{
@ -180,7 +191,7 @@ public:
#if !WTF_CPU_X86_64
explicit Imm32(ImmPtr ptr)
: m_value(ptr.asIntptr())
#if WTF_CPU_ARM
#if WTF_CPU_ARM || WTF_CPU_MIPS
, m_isPointer(true)
#endif
{
@ -188,13 +199,14 @@ public:
#endif
int32_t m_value;
#if WTF_CPU_ARM
#if WTF_CPU_ARM || WTF_CPU_MIPS
// We rely on being able to regenerate code to recover exception handling
// information. Since ARMv7 supports 16-bit immediates there is a danger
// that if pointer values change the layout of the generated code will change.
// To avoid this problem, always generate pointers (and thus Imm32s constructed
// from ImmPtrs) with a code sequence that is able to represent any pointer
// value - don't use a more compact form in these cases.
// Same for MIPS.
bool m_isPointer;
#endif
};
@ -295,7 +307,7 @@ public:
None = 0x0,
Linkable = 0x1,
Near = 0x2,
LinkableNear = 0x3
LinkableNear = 0x3,
};
Call()

View File

@ -50,7 +50,6 @@ namespace JSC {
~AssemblerBuffer()
{
if (m_buffer != m_inlineBuffer)
//fastFree(m_buffer);
free(m_buffer);
}
@ -158,11 +157,9 @@ namespace JSC {
m_capacity += m_capacity / 2 + extraCapacity;
if (m_buffer == m_inlineBuffer) {
//char* newBuffer = static_cast<char*>(fastMalloc(m_capacity));
char* newBuffer = static_cast<char*>(malloc(m_capacity));
m_buffer = static_cast<char*>(memcpy(newBuffer, m_buffer, m_size));
} else
//m_buffer = static_cast<char*>(fastRealloc(m_buffer, m_capacity));
m_buffer = static_cast<char*>(realloc(m_buffer, m_capacity));
}

View File

@ -91,7 +91,7 @@ public:
enum {
UniqueConst,
ReusableConst,
UnusedEntry
UnusedEntry,
};
AssemblerBufferWithConstantPool()
@ -100,17 +100,13 @@ public:
, m_maxDistance(maxPoolSize)
, m_lastConstDelta(0)
{
//m_pool = static_cast<uint32_t*>(fastMalloc(maxPoolSize));
m_pool = static_cast<uint32_t*>(malloc(maxPoolSize));
//m_mask = static_cast<char*>(fastMalloc(maxPoolSize / sizeof(uint32_t)));
m_mask = static_cast<char*>(malloc(maxPoolSize / sizeof(uint32_t)));
}
~AssemblerBufferWithConstantPool()
{
//fastFree(m_mask);
free(m_mask);
//fastFree(m_pool);
free(m_pool);
}

View File

@ -61,10 +61,8 @@ public:
// Note: Initialization sequence is significant, since executablePool is a PassRefPtr.
// First, executablePool is copied into m_executablePool, then the initialization of
// m_code uses m_executablePool, *not* executablePool, since this is no longer valid.
//LinkBuffer(MacroAssembler* masm, PassRefPtr<ExecutablePool> executablePool)
LinkBuffer(MacroAssembler* masm, ExecutablePool* executablePool)
: m_executablePool(executablePool)
//, m_code(masm->m_assembler.executableCopy(m_executablePool.get()))
, m_code(masm->m_assembler.executableCopy(m_executablePool))
, m_size(masm->m_assembler.size())
#ifndef NDEBUG
@ -103,7 +101,6 @@ public:
void link(JumpList list, CodeLocationLabel label)
{
//for (unsigned i = 0; i < list.m_jumps.size(); ++i)
for (unsigned i = 0; i < list.m_jumps.length(); ++i)
MacroAssembler::linkJump(code(), list.m_jumps[i], label);
}
@ -192,7 +189,6 @@ private:
ExecutableAllocator::cacheFlush(code(), m_size);
}
//RefPtr<ExecutablePool> m_executablePool;
ExecutablePool* m_executablePool;
void* m_code;
size_t m_size;

View File

@ -38,6 +38,10 @@ namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }
#include "MacroAssemblerARM.h"
namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; }
#elif WTF_CPU_MIPS
#include "MacroAssemblerMIPS.h"
namespace JSC { typedef MacroAssemblerMIPS MacroAssemblerBase; }
#elif WTF_CPU_X86
#include "MacroAssemblerX86.h"
namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; }
@ -362,6 +366,12 @@ public:
{
return branchSub32(cond, imm, dest);
}
using MacroAssemblerBase::branchTest8;
Jump branchTest8(Condition cond, ExtendedAddress address, Imm32 mask = Imm32(-1))
{
return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
}
void rshiftPtr(Imm32 imm, RegisterID dest)
{

View File

@ -1,6 +1,6 @@
/*
* Copyright (C) 2008 Apple Inc.
* Copyright (C) 2009 University of Szeged
* Copyright (C) 2009, 2010 University of Szeged
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -73,7 +73,7 @@ public:
DoubleGreaterThanOrUnordered = ARMAssembler::HI,
DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
DoubleLessThanOrUnordered = ARMAssembler::LT,
DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE
DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
};
static const RegisterID stackPointerRegister = ARMRegisters::sp;
@ -187,6 +187,20 @@ public:
{
m_assembler.movs_r(dest, m_assembler.asr(dest, imm.m_value & 0x1f));
}
void urshift32(RegisterID shift_amount, RegisterID dest)
{
ARMWord w = ARMAssembler::getOp2(0x1f);
ASSERT(w != ARMAssembler::INVALID_IMM);
m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
m_assembler.movs_r(dest, m_assembler.lsr_r(dest, ARMRegisters::S0));
}
void urshift32(Imm32 imm, RegisterID dest)
{
m_assembler.movs_r(dest, m_assembler.lsr(dest, imm.m_value & 0x1f));
}
void sub32(RegisterID src, RegisterID dest)
{
@ -227,6 +241,11 @@ public:
m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
}
void load8(ImplicitAddress address, RegisterID dest)
{
m_assembler.dataTransfer32(true, dest, address.base, address.offset, true);
}
void load32(ImplicitAddress address, RegisterID dest)
{
m_assembler.dataTransfer32(true, dest, address.base, address.offset);
@ -263,11 +282,16 @@ public:
void load16(BaseIndex address, RegisterID dest)
{
m_assembler.add_r(ARMRegisters::S0, address.base, m_assembler.lsl(address.index, address.scale));
if (address.offset>=0)
m_assembler.ldrh_u(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset));
m_assembler.add_r(ARMRegisters::S1, address.base, m_assembler.lsl(address.index, address.scale));
load16(Address(ARMRegisters::S1, address.offset), dest);
}
void load16(ImplicitAddress address, RegisterID dest)
{
if (address.offset >= 0)
m_assembler.ldrh_u(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(address.offset, ARMRegisters::S0));
else
m_assembler.ldrh_d(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset));
m_assembler.ldrh_d(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(-address.offset, ARMRegisters::S0));
}
DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
@ -381,6 +405,12 @@ public:
move(src, dest);
}
Jump branch8(Condition cond, Address left, Imm32 right)
{
load8(left, ARMRegisters::S1);
return branch32(cond, ARMRegisters::S1, right);
}
Jump branch32(Condition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
{
m_assembler.cmp_r(left, right);
@ -444,6 +474,12 @@ public:
return m_assembler.jmp(ARMCondition(cond));
}
Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
{
load8(address, ARMRegisters::S1);
return branchTest32(cond, ARMRegisters::S1, mask);
}
Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
{
ASSERT((cond == Zero) || (cond == NonZero));
@ -552,6 +588,13 @@ public:
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
Jump branchNeg32(Condition cond, RegisterID srcDest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
neg32(srcDest);
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
@ -780,12 +823,17 @@ public:
return false;
}
bool supportsFloatingPointSqrt() const
{
return s_isVFPPresent;
}
void loadDouble(ImplicitAddress address, FPRegisterID dest)
{
m_assembler.doubleTransfer(true, dest, address.base, address.offset);
}
void loadDouble(void* address, FPRegisterID dest)
void loadDouble(const void* address, FPRegisterID dest)
{
m_assembler.ldr_un_imm(ARMRegisters::S0, (ARMWord)address);
m_assembler.fdtr_u(true, dest, ARMRegisters::S0, 0);
@ -841,6 +889,11 @@ public:
mulDouble(ARMRegisters::SD0, dest);
}
void sqrtDouble(FPRegisterID src, FPRegisterID dest)
{
m_assembler.fsqrtd_r(dest, src);
}
void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
{
m_assembler.fmsr_r(dest, src);
@ -879,8 +932,8 @@ public:
// (specifically, in this case, INT_MIN).
Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
{
(void)(src);
(void)(dest);
UNUSED_PARAM(src);
UNUSED_PARAM(dest);
ASSERT_NOT_REACHED();
return jump();
}
@ -927,46 +980,58 @@ protected:
#if WTF_ARM_ARCH_VERSION < 5
void prepareCall()
{
#if WTF_ARM_ARCH_VERSION < 5
ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
m_assembler.mov_r(linkRegister, ARMRegisters::pc);
#endif
}
#endif
#if WTF_ARM_ARCH_VERSION < 5
void call32(RegisterID base, int32_t offset)
{
#if WTF_ARM_ARCH_VERSION >= 5
int targetReg = ARMRegisters::S1;
#else
int targetReg = ARMRegisters::pc;
#endif
int tmpReg = ARMRegisters::S1;
if (base == ARMRegisters::sp)
offset += 4;
if (offset >= 0) {
if (offset <= 0xfff) {
prepareCall();
m_assembler.dtr_u(true, ARMRegisters::pc, base, offset);
m_assembler.dtr_u(true, targetReg, base, offset);
} else if (offset <= 0xfffff) {
m_assembler.add_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
m_assembler.add_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
prepareCall();
m_assembler.dtr_u(true, ARMRegisters::pc, ARMRegisters::S0, offset & 0xfff);
m_assembler.dtr_u(true, targetReg, tmpReg, offset & 0xfff);
} else {
ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0);
ARMWord reg = m_assembler.getImm(offset, tmpReg);
prepareCall();
m_assembler.dtr_ur(true, ARMRegisters::pc, base, reg);
m_assembler.dtr_ur(true, targetReg, base, reg);
}
} else {
offset = -offset;
if (offset <= 0xfff) {
prepareCall();
m_assembler.dtr_d(true, ARMRegisters::pc, base, offset);
m_assembler.dtr_d(true, targetReg, base, offset);
} else if (offset <= 0xfffff) {
m_assembler.sub_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
m_assembler.sub_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
prepareCall();
m_assembler.dtr_d(true, ARMRegisters::pc, ARMRegisters::S0, offset & 0xfff);
m_assembler.dtr_d(true, targetReg, tmpReg, offset & 0xfff);
} else {
ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0);
ARMWord reg = m_assembler.getImm(offset, tmpReg);
prepareCall();
m_assembler.dtr_dr(true, ARMRegisters::pc, base, reg);
m_assembler.dtr_dr(true, targetReg, base, reg);
}
}
#if WTF_ARM_ARCH_VERSION >= 5
m_assembler.blx(targetReg);
#endif
}
#else
void call32(RegisterID base, int32_t offset)

View File

@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 Apple Inc. All rights reserved.
* Copyright (C) 2010 University of Szeged
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -259,6 +260,21 @@ public:
{
m_assembler.asr(dest, dest, imm.m_value & 0x1f);
}
void urshift32(RegisterID shift_amount, RegisterID dest)
{
// Clamp the shift to the range 0..31
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
ASSERT(armImm.isValid());
m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
m_assembler.lsr(dest, dest, dataTempRegister);
}
void urshift32(Imm32 imm, RegisterID dest)
{
m_assembler.lsr(dest, dest, imm.m_value & 0x1f);
}
void sub32(RegisterID src, RegisterID dest)
{
@ -369,6 +385,20 @@ private:
}
}
void load8(ArmAddress address, RegisterID dest)
{
if (address.type == ArmAddress::HasIndex)
m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
else if (address.u.offset >= 0) {
ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
ASSERT(armImm.isValid());
m_assembler.ldrb(dest, address.base, armImm);
} else {
ASSERT(address.u.offset >= -255);
m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
}
}
void store32(RegisterID src, ArmAddress address)
{
if (address.type == ArmAddress::HasIndex)
@ -405,6 +435,11 @@ public:
m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
}
void load8(ImplicitAddress address, RegisterID dest)
{
load8(setupArmAddress(address), dest);
}
DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
{
DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
@ -424,6 +459,11 @@ public:
{
m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
}
void load16(ImplicitAddress address, RegisterID dest)
{
m_assembler.ldrh(dest, address.base, address.offset);
}
DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
{
@ -477,6 +517,11 @@ public:
// In short, FIXME:.
bool supportsFloatingPointTruncate() const { return false; }
bool supportsFloatingPointSqrt() const
{
return false;
}
void loadDouble(ImplicitAddress address, FPRegisterID dest)
{
RegisterID base = address.base;
@ -540,6 +585,11 @@ public:
mulDouble(fpTempRegister, dest);
}
void sqrtDouble(FPRegisterID, FPRegisterID)
{
ASSERT_NOT_REACHED();
}
void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
{
m_assembler.vmov(fpTempRegister, src);
@ -794,6 +844,19 @@ public:
return branch32(cond, addressTempRegister, Imm32(right.m_value << 16));
}
Jump branch8(Condition cond, RegisterID left, Imm32 right)
{
compare32(left, right);
return Jump(makeBranch(cond));
}
Jump branch8(Condition cond, Address left, Imm32 right)
{
// use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
load8(left, addressTempRegister);
return branch8(cond, addressTempRegister, right);
}
Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
{
ASSERT((cond == Zero) || (cond == NonZero));
@ -824,6 +887,21 @@ public:
return branchTest32(cond, addressTempRegister, mask);
}
Jump branchTest8(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
{
ASSERT((cond == Zero) || (cond == NonZero));
test32(reg, mask);
return Jump(makeBranch(cond));
}
Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
{
ASSERT((cond == Zero) || (cond == NonZero));
// use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
load8(address, addressTempRegister);
return branchTest8(cond, addressTempRegister, mask);
}
Jump jump()
{
return Jump(makeJump());
@ -974,6 +1052,14 @@ public:
m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
}
void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
{
load8(address, dataTempRegister);
test32(dataTempRegister, mask);
m_assembler.it(armV7Condition(cond), false);
m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
}
DataLabel32 moveWithPatch(Imm32 imm, RegisterID dst)
{

View File

@ -171,7 +171,6 @@ public:
{
}
//MacroAssemblerCodeRef(void* code, PassRefPtr<ExecutablePool> executablePool, size_t size)
MacroAssemblerCodeRef(void* code, ExecutablePool* executablePool, size_t size)
: m_code(code)
, m_executablePool(executablePool)
@ -180,7 +179,6 @@ public:
}
MacroAssemblerCodePtr m_code;
//RefPtr<ExecutablePool> m_executablePool;
ExecutablePool* m_executablePool;
size_t m_size;
};

View File

@ -100,7 +100,7 @@ public:
m_assembler.movl_mr(address, dest);
}
void loadDouble(void* address, FPRegisterID dest)
void loadDouble(const void* address, FPRegisterID dest)
{
ASSERT(isSSE2Present());
m_assembler.movsd_mr(address, dest);
@ -185,6 +185,7 @@ public:
bool supportsFloatingPoint() const { return m_isSSE2Present; }
// See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
bool supportsFloatingPointTruncate() const { return m_isSSE2Present; }
bool supportsFloatingPointSqrt() const { return m_isSSE2Present; }
private:
const bool m_isSSE2Present;

View File

@ -73,7 +73,7 @@ public:
DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
DoubleLessThanOrUnordered = X86Assembler::ConditionB,
DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE
DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
};
COMPILE_ASSERT(
!((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
@ -258,6 +258,33 @@ public:
{
m_assembler.sarl_i8r(imm.m_value, dest);
}
void urshift32(RegisterID shift_amount, RegisterID dest)
{
// On x86 we can only shift by ecx; if asked to shift by another register we'll
// need rejig the shift amount into ecx first, and restore the registers afterwards.
if (shift_amount != X86Registers::ecx) {
swap(shift_amount, X86Registers::ecx);
// E.g. transform "shrl %eax, %eax" -> "xchgl %eax, %ecx; shrl %ecx, %ecx; xchgl %eax, %ecx"
if (dest == shift_amount)
m_assembler.shrl_CLr(X86Registers::ecx);
// E.g. transform "shrl %eax, %ecx" -> "xchgl %eax, %ecx; shrl %ecx, %eax; xchgl %eax, %ecx"
else if (dest == X86Registers::ecx)
m_assembler.shrl_CLr(shift_amount);
// E.g. transform "shrl %eax, %ebx" -> "xchgl %eax, %ecx; shrl %ecx, %ebx; xchgl %eax, %ecx"
else
m_assembler.shrl_CLr(dest);
swap(shift_amount, X86Registers::ecx);
} else
m_assembler.shrl_CLr(dest);
}
void urshift32(Imm32 imm, RegisterID dest)
{
m_assembler.shrl_i8r(imm.m_value, dest);
}
void sub32(RegisterID src, RegisterID dest)
{
@ -310,6 +337,10 @@ public:
m_assembler.xorl_mr(src.offset, src.base, dest);
}
void sqrtDouble(FPRegisterID src, FPRegisterID dst)
{
m_assembler.sqrtsd_rr(src, dst);
}
// Memory access operations:
//
@ -339,15 +370,15 @@ public:
return DataLabel32(this);
}
void load16(Address address, RegisterID dest)
{
m_assembler.movzwl_mr(address.offset, address.base, dest);
}
void load16(BaseIndex address, RegisterID dest)
{
m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
}
void load16(Address address, RegisterID dest)
{
m_assembler.movzwl_mr(address.offset, address.base, dest);
}
DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
{
@ -637,6 +668,12 @@ public:
// an optional second operand of a mask under which to perform the test.
public:
Jump branch8(Condition cond, Address left, Imm32 right)
{
m_assembler.cmpb_im(right.m_value, left.offset, left.base);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branch32(Condition cond, RegisterID left, RegisterID right)
{
m_assembler.cmpl_rr(right, left);
@ -742,6 +779,26 @@ public:
m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
{
ASSERT((cond == Zero) || (cond == NonZero));
if (mask.m_value == -1)
m_assembler.cmpb_im(0, address.offset, address.base);
else
m_assembler.testb_im(mask.m_value, address.offset, address.base);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchTest8(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
{
ASSERT((cond == Zero) || (cond == NonZero));
if (mask.m_value == -1)
m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
else
m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump jump()
{
@ -861,6 +918,13 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchNeg32(Condition cond, RegisterID srcDest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
neg32(srcDest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
@ -956,10 +1020,11 @@ public:
void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
{
if (mask.m_value == -1)
m_assembler.cmpl_im(0, address.offset, address.base);
m_assembler.cmpb_im(0, address.offset, address.base);
else
m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
m_assembler.testb_im(mask.m_value, address.offset, address.base);
m_assembler.setCC_r(x86Condition(cond), dest);
m_assembler.movzbl_rr(dest, dest);
}
void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)

View File

@ -91,7 +91,7 @@ public:
}
}
void loadDouble(void* address, FPRegisterID dest)
void loadDouble(const void* address, FPRegisterID dest)
{
move(ImmPtr(address), scratchRegister);
loadDouble(scratchRegister, dest);
@ -490,6 +490,14 @@ public:
return label;
}
using MacroAssemblerX86Common::branchTest8;
Jump branchTest8(Condition cond, ExtendedAddress address, Imm32 mask = Imm32(-1))
{
ImmPtr addr(reinterpret_cast<void*>(address.offset));
MacroAssemblerX86Common::move(addr, scratchRegister);
return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask);
}
Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
{
Label label(this);
@ -500,6 +508,7 @@ public:
bool supportsFloatingPoint() const { return true; }
// See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
bool supportsFloatingPointTruncate() const { return true; }
bool supportsFloatingPointSqrt() const { return true; }
private:
friend class LinkBuffer;

View File

@ -62,7 +62,7 @@ namespace X86Registers {
esp,
ebp,
esi,
edi
edi,
#if WTF_CPU_X86_64
,r8,
@ -72,7 +72,7 @@ namespace X86Registers {
r12,
r13,
r14,
r15
r15,
#endif
} RegisterID;
@ -84,7 +84,7 @@ namespace X86Registers {
xmm4,
xmm5,
xmm6,
xmm7
xmm7,
} XMMRegisterID;
static const char* nameIReg(int szB, RegisterID reg)
@ -148,7 +148,7 @@ public:
ConditionG,
ConditionC = ConditionB,
ConditionNC = ConditionAE
ConditionNC = ConditionAE,
} Condition;
static const char* nameCC(Condition cc)
@ -188,6 +188,7 @@ private:
PRE_SSE_66 = 0x66,
OP_PUSH_Iz = 0x68,
OP_IMUL_GvEvIz = 0x69,
OP_GROUP1_EbIb = 0x80,
OP_GROUP1_EvIz = 0x81,
OP_GROUP1_EvIb = 0x83,
OP_TEST_EvGv = 0x85,
@ -213,7 +214,7 @@ private:
OP_GROUP3_EbIb = 0xF6,
OP_GROUP3_Ev = 0xF7,
OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
OP_GROUP5_Ev = 0xFF
OP_GROUP5_Ev = 0xFF,
} OneByteOpcodeID;
typedef enum {
@ -226,6 +227,7 @@ private:
OP2_MULSD_VsdWsd = 0x59,
OP2_SUBSD_VsdWsd = 0x5C,
OP2_DIVSD_VsdWsd = 0x5E,
OP2_SQRTSD_VsdWsd = 0x51,
OP2_XORPD_VpdWpd = 0x57,
OP2_MOVD_VdEd = 0x6E,
OP2_MOVD_EdVd = 0x7E,
@ -234,7 +236,7 @@ private:
OP2_IMUL_GvEv = 0xAF,
OP2_MOVZX_GvEb = 0xB6,
OP2_MOVZX_GvEw = 0xB7,
OP2_PEXTRW_GdUdIb = 0xC5
OP2_PEXTRW_GdUdIb = 0xC5,
} TwoByteOpcodeID;
TwoByteOpcodeID jccRel32(Condition cond)
@ -259,6 +261,7 @@ private:
GROUP1A_OP_POP = 0,
GROUP2_OP_SHL = 4,
GROUP2_OP_SHR = 5,
GROUP2_OP_SAR = 7,
GROUP3_OP_TEST = 0,
@ -270,7 +273,7 @@ private:
GROUP5_OP_JMPN = 4,
GROUP5_OP_PUSH = 6,
GROUP11_MOV = 0
GROUP11_MOV = 0,
} GroupOpcodeID;
class X86InstructionFormatter;
@ -841,6 +844,21 @@ public:
IPFX "sarl %%cl, %s\n", nameIReg(4, dst));
m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
}
void shrl_i8r(int imm, RegisterID dst)
{
if (imm == 1)
m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
else {
m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
m_formatter.immediate8(imm);
}
}
void shrl_CLr(RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
}
void shll_i8r(int imm, RegisterID dst)
{
@ -964,7 +982,7 @@ public:
m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
m_formatter.immediate32(imm);
}
void cmpl_im(int imm, int offset, RegisterID base)
{
js::JaegerSpew(js::JSpew_Insns,
@ -978,6 +996,18 @@ public:
m_formatter.immediate32(imm);
}
}
void cmpb_im(int imm, int offset, RegisterID base)
{
m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
m_formatter.immediate8(imm);
}
void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
{
m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
m_formatter.immediate8(imm);
}
void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
{
@ -1122,6 +1152,18 @@ public:
m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
m_formatter.immediate32(imm);
}
void testb_im(int imm, int offset, RegisterID base)
{
m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
m_formatter.immediate8(imm);
}
void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
{
m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
m_formatter.immediate8(imm);
}
void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
{
@ -1776,7 +1818,7 @@ public:
}
#if !WTF_CPU_X86_64
void movsd_mr(void* address, XMMRegisterID dst)
void movsd_mr(const void* address, XMMRegisterID dst)
{
FIXME_INSN_PRINTING;
m_formatter.prefix(PRE_SSE_F2);
@ -1855,6 +1897,12 @@ public:
m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
}
void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
}
// Misc instructions:
void int3()
@ -2178,7 +2226,7 @@ private:
}
#if !WTF_CPU_X86_64
void twoByteOp(TwoByteOpcodeID opcode, int reg, void* address)
void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
{
m_buffer.ensureSpace(maxInstructionSize);
m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
@ -2399,7 +2447,7 @@ private:
ModRmMemoryNoDisp,
ModRmMemoryDisp8,
ModRmMemoryDisp32,
ModRmRegister
ModRmRegister,
};
void putModRm(ModRmMode mode, int reg, RegisterID rm)
@ -2490,7 +2538,7 @@ private:
}
#if !WTF_CPU_X86_64
void memoryModRM(int reg, void* address)
void memoryModRM(int reg, const void* address)
{
// noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
putModRm(ModRmMemoryNoDisp, reg, noBase);

View File

@ -42,6 +42,10 @@
#include <e32std.h>
#endif
#if WTF_CPU_MIPS && WTF_PLATFORM_LINUX
#include <sys/cachectl.h>
#endif
#if WTF_PLATFORM_WINCE
// From pkfuncs.h (private header file from the Platform Builder)
#define CACHE_SYNC_ALL 0x07F
@ -88,7 +92,6 @@ namespace JSC {
// These are reference-counted. A new one (from the constructor or create)
// starts with a count of 1.
class ExecutablePool {
//: public RefCounted<ExecutablePool> {
private:
struct Allocation {
char* pages;
@ -116,7 +119,6 @@ public:
//static PassRefPtr<ExecutablePool> create(size_t n)
static ExecutablePool* create(size_t n)
{
//return adoptRef(new ExecutablePool(n));
return new ExecutablePool(n);
}
@ -141,14 +143,11 @@ public:
~ExecutablePool()
{
//AllocationList::const_iterator end = m_pools.end();
Allocation* end = m_pools.end();
//for (AllocationList::const_iterator ptr = m_pools.begin(); ptr != end; ++ptr)
for (Allocation* ptr = m_pools.begin(); ptr != end; ++ptr)
ExecutablePool::systemRelease(*ptr);
}
//size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
size_t available() const { return (m_pools.length() > 1) ? 0 : m_end - m_freePtr; }
private:
@ -163,7 +162,7 @@ private:
char* m_end;
AllocationList m_pools;
};
class ExecutableAllocator {
enum ProtectionSeting { Writable, Executable };
@ -182,7 +181,6 @@ public:
// to the object; i.e., poolForSize increments the count before returning the
// object.
//PassRefPtr<ExecutablePool> poolForSize(size_t n)
ExecutablePool* poolForSize(size_t n)
{
// Try to fit in the existing small allocator
@ -196,20 +194,18 @@ public:
return ExecutablePool::create(n);
// Create a new allocator
//RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
ExecutablePool* pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
// At this point, local |pool| is the owner.
// At this point, local |pool| is the owner.
// If the new allocator will result in more free space than in
// the current small allocator, then we will use it instead
if ((pool->available() - n) > m_smallAllocationPool->available()) {
m_smallAllocationPool->release();
m_smallAllocationPool->release();
m_smallAllocationPool = pool;
pool->addRef();
}
//return pool.release();
pool->addRef();
}
// Pass ownership to the caller.
// Pass ownership to the caller.
return pool;
}
@ -233,7 +229,33 @@ public:
static void cacheFlush(void*, size_t)
{
}
#elif WTF_CPU_ARM_THUMB2 && WTF_PLATFORM_IPHONE
#elif WTF_CPU_MIPS
static void cacheFlush(void* code, size_t size)
{
#if WTF_COMPILER_GCC && (GCC_VERSION >= 40300)
#if WTF_MIPS_ISA_REV(2) && (GCC_VERSION < 40403)
int lineSize;
asm("rdhwr %0, $1" : "=r" (lineSize));
//
// Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
// mips_expand_synci_loop that may execute synci one more time.
// "start" points to the fisrt byte of the cache line.
// "end" points to the last byte of the line before the last cache line.
// Because size is always a multiple of 4, this is safe to set
// "end" to the last byte.
//
intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
__builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
#else
intptr_t end = reinterpret_cast<intptr_t>(code) + size;
__builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
#endif
#else
_flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
#endif
}
#elif CPU(ARM_THUMB2) && OS(IPHONE_OS)
static void cacheFlush(void* code, size_t size)
{
sys_dcache_flush(code, size);
@ -260,7 +282,9 @@ public:
{
User::IMB_Range(code, static_cast<char*>(code) + size);
}
#elif WTF_CPU_ARM_TRADITIONAL && WTF_PLATFORM_LINUX
#elif WTF_CPU_ARM_TRADITIONAL && WTF_PLATFORM_LINUX && WTF_COMPILER_RVCT
static __asm void cacheFlush(void* code, size_t size);
#elif WTF_CPU_ARM_TRADITIONAL && WTF_PLATFORM_LINUX && WTF_COMPILER_RVCT
static void cacheFlush(void* code, size_t size)
{
asm volatile (
@ -291,7 +315,6 @@ private:
static void reprotectRegion(void*, size_t, ProtectionSeting);
#endif
//RefPtr<ExecutablePool> m_smallAllocationPool;
ExecutablePool* m_smallAllocationPool;
static void intializePageSize();
};

View File

@ -29,9 +29,12 @@
#include <sys/mman.h>
#include <unistd.h>
#include <wtf/VMTags.h>
namespace JSC {
#if !(WTF_PLATFORM_DARWIN && WTF_CPU_X86_64)
void ExecutableAllocator::intializePageSize()
{
ExecutableAllocator::pageSize = getpagesize();
@ -39,7 +42,7 @@ void ExecutableAllocator::intializePageSize()
ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
{
void* allocation = mmap(NULL, n, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, -1/*VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY*/, 0);
void* allocation = mmap(NULL, n, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
if (allocation == MAP_FAILED)
CRASH();
ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(allocation), n };
@ -52,6 +55,8 @@ void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
ASSERT_UNUSED(result, !result);
}
#endif // !(OS(DARWIN) && CPU(X86_64))
#if WTF_ENABLE_ASSEMBLER_WX_EXCLUSIVE
void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSeting setting)
{
@ -73,6 +78,21 @@ void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSe
}
#endif
#if WTF_CPU_ARM_TRADITIONAL && WTF_PLATFORM_LINUX && WTF_COMPILER_RVCT
__asm void ExecutableAllocator::cacheFlush(void* code, size_t size)
{
ARM
push {r7}
add r1, r1, r0
mov r7, #0xf0000
add r7, r7, #0x2
mov r2, #0x0
svc #0x0
pop {r7}
bx lr
}
#endif
}
#endif // HAVE(ASSEMBLER)

View File

@ -0,0 +1,75 @@
/*
* Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA
*
*/
#include "config.h"
#include "ExecutableAllocator.h"
#if ENABLE_ASSEMBLER && WTF_PLATFORM_SYMBIAN
#include <e32hal.h>
#include <e32std.h>
// Set the page size to 256 Kb to compensate for moving memory model limitation
const size_t MOVING_MEM_PAGE_SIZE = 256 * 1024;
namespace JSC {
void ExecutableAllocator::intializePageSize()
{
#if WTF_CPU_ARMV5_OR_LOWER
// The moving memory model (as used in ARMv5 and earlier platforms)
// on Symbian OS limits the number of chunks for each process to 16.
// To mitigate this limitation increase the pagesize to
// allocate less of larger chunks.
ExecutableAllocator::pageSize = MOVING_MEM_PAGE_SIZE;
#else
TInt page_size;
UserHal::PageSizeInBytes(page_size);
ExecutableAllocator::pageSize = page_size;
#endif
}
ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
{
RChunk* codeChunk = new RChunk();
TInt errorCode = codeChunk->CreateLocalCode(n, n);
char* allocation = reinterpret_cast<char*>(codeChunk->Base());
if (!allocation)
CRASH();
ExecutablePool::Allocation alloc = { allocation, n, codeChunk };
return alloc;
}
void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
{
alloc.chunk->Close();
delete alloc.chunk;
}
#if ENABLE_ASSEMBLER_WX_EXCLUSIVE
#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
#endif
}
#endif // HAVE(ASSEMBLER)

View File

@ -23,7 +23,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//#include "config.h"
#include "ExecutableAllocator.h"

View File

@ -0,0 +1,90 @@
/*
* Copyright (C) 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef VMTags_h
#define VMTags_h
// On Mac OS X, the VM subsystem allows tagging memory requested from mmap and vm_map
// in order to aid tools that inspect system memory use.
#if WTF_PLATFORM_DARWIN
#include <mach/vm_statistics.h>
#if !defined(TARGETING_TIGER)
#if defined(VM_MEMORY_TCMALLOC)
#define VM_TAG_FOR_TCMALLOC_MEMORY VM_MAKE_TAG(VM_MEMORY_TCMALLOC)
#else
#define VM_TAG_FOR_TCMALLOC_MEMORY VM_MAKE_TAG(53)
#endif // defined(VM_MEMORY_TCMALLOC)
#if defined(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR)
#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR)
#else
#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY VM_MAKE_TAG(64)
#endif // defined(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR)
#if defined(VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE)
#define VM_TAG_FOR_REGISTERFILE_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE)
#else
#define VM_TAG_FOR_REGISTERFILE_MEMORY VM_MAKE_TAG(65)
#endif // defined(VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE)
#else // !defined(TARGETING_TIGER)
// mmap on Tiger fails with tags that work on Leopard, so fall
// back to Tiger-compatible tags (that also work on Leopard)
// when targeting Tiger.
#define VM_TAG_FOR_TCMALLOC_MEMORY -1
#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY -1
#define VM_TAG_FOR_REGISTERFILE_MEMORY -1
#endif // !defined(TARGETING_TIGER)
// Tags for vm_map and vm_allocate work on both Tiger and Leopard.
#if defined(VM_MEMORY_JAVASCRIPT_CORE)
#define VM_TAG_FOR_COLLECTOR_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_CORE)
#else
#define VM_TAG_FOR_COLLECTOR_MEMORY VM_MAKE_TAG(63)
#endif // defined(VM_MEMORY_JAVASCRIPT_CORE)
#if defined(VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS)
#define VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY VM_MAKE_TAG(VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS)
#else
#define VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY VM_MAKE_TAG(69)
#endif // defined(VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS)
#else // OS(DARWIN)
#define VM_TAG_FOR_TCMALLOC_MEMORY -1
#define VM_TAG_FOR_COLLECTOR_MEMORY -1
#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY -1
#define VM_TAG_FOR_REGISTERFILE_MEMORY -1
#define VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY -1
#endif // OS(DARWIN)
#endif // VMTags_h