Bug 1028064 - Remove lots of dead code in js/src/assembler/. r=jandem.

--HG--
extra : rebase_source : e5696eb5818871e8214ad5e5e4b54805260e6c35
This commit is contained in:
Nicholas Nethercote 2014-06-26 17:31:45 -07:00
parent 5414ff6193
commit 1d574b1c28
29 changed files with 19 additions and 18915 deletions

View File

@ -482,9 +482,9 @@ def do_file(filename, inclname, file_kind, f, all_inclnames, included_h_inclname
error(filename, str(include1.linenum) + ':' + str(include2.linenum),
include1.quote() + ' should be included after ' + include2.quote())
# The #include statements in the files in assembler/ and yarr/ have all manner of implicit
# The #include statements in the files in assembler/ have all manner of implicit
# ordering requirements. Boo. Ignore them.
skip_order_checking = inclname.startswith(('assembler/', 'yarr/'))
skip_order_checking = inclname.startswith('assembler/')
# Check the extracted #include statements, both individually, and the ordering of
# adjacent pairs that live in the same block.

View File

@ -1,933 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// A short test program with which to experiment with the assembler.
//satisfies CPU(X86_64)
//#define WTF_CPU_X86_64
// satisfies ENABLE(ASSEMBLER)
#define ENABLE_ASSEMBLER 1
// satisfies ENABLE(JIT)
#define ENABLE_JIT 1
#define USE_SYSTEM_MALLOC 1
// leads to FORCE_SYSTEM_MALLOC in wtf/FastMalloc.cpp
#include "assembler/jit/ExecutableAllocator.h"
#include "assembler/assembler/LinkBuffer.h"
#include "assembler/assembler/CodeLocation.h"
#include "assembler/assembler/RepatchBuffer.h"
#include "assembler/assembler/MacroAssembler.h"
#include <stdio.h>
/////////////////////////////////////////////////////////////////
// Temporary scaffolding for selecting the arch
#undef ARCH_x86
#undef ARCH_amd64
#undef ARCH_arm
#if defined(__APPLE__) && defined(__i386__)
# define ARCH_x86 1
#elif defined(__APPLE__) && defined(__x86_64__)
# define ARCH_amd64 1
#elif defined(__linux__) && defined(__i386__)
# define ARCH_x86 1
#elif defined(__linux__) && defined(__x86_64__)
# define ARCH_amd64 1
#elif defined(__linux__) && defined(__arm__)
# define ARCH_arm 1
#elif defined(_MSC_VER) && defined(_M_IX86)
# define ARCH_x86 1
#endif
/////////////////////////////////////////////////////////////////
// just somewhere convenient to put a breakpoint, before
// running gdb
#if WTF_COMPILER_GCC
__attribute__((noinline))
#endif
void pre_run ( void ) { }
/////////////////////////////////////////////////////////////////
//// test1 (simple straight line code)
#if WTF_COMPILER_GCC
void test1 ( void )
{
printf("\n------------ Test 1 (straight line code) ------------\n\n" );
// Create new assembler
JSC::MacroAssembler* am = new JSC::MacroAssembler();
#if defined(ARCH_amd64)
JSC::X86Registers::RegisterID areg = JSC::X86Registers::r15;
// dump some instructions into it
// xor %r15,%r15
// add $0x7b,%r15
// add $0x141,%r15
// retq
am->xorPtr(areg,areg);
am->addPtr(JSC::MacroAssembler::Imm32(123), areg);
am->addPtr(JSC::MacroAssembler::Imm32(321), areg);
am->ret();
#endif
#if defined(ARCH_x86)
JSC::X86Registers::RegisterID areg = JSC::X86Registers::edi;
// dump some instructions into it
// xor %edi,%edi
// add $0x7b,%edi
// add $0x141,%edi
// ret
am->xorPtr(areg,areg);
am->addPtr(JSC::MacroAssembler::Imm32(123), areg);
am->addPtr(JSC::MacroAssembler::Imm32(321), areg);
am->ret();
#endif
#if defined(ARCH_arm)
JSC::ARMRegisters::RegisterID areg = JSC::ARMRegisters::r8;
// eors r8, r8, r8
// adds r8, r8, #123 ; 0x7b
// mov r3, #256 ; 0x100
// orr r3, r3, #65 ; 0x41
// adds r8, r8, r3
// mov pc, lr
am->xorPtr(areg,areg);
am->addPtr(JSC::MacroAssembler::Imm32(123), areg);
am->addPtr(JSC::MacroAssembler::Imm32(321), areg);
am->ret();
#endif
// prepare a link buffer, into which we can copy the completed insns
JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
// intermediate step .. get the pool suited for the size of code in 'am'
//WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
// constructor for LinkBuffer asks ep to allocate r-x memory,
// then copies it there.
JSC::LinkBuffer patchBuffer(am, ep, JSC::METHOD_CODE);
// finalize
JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
// cr now holds a pointer to the final runnable code.
void* entry = cr.m_code.executableAddress();
printf("disas %p %p\n",
entry, (char*)entry + cr.m_size);
pre_run();
unsigned long result = 0x55555555;
#if defined(ARCH_amd64)
// call the generated piece of code. It puts its result in r15.
__asm__ __volatile__(
"callq *%1" "\n\t"
"movq %%r15, %0" "\n"
:/*out*/ "=r"(result)
:/*in*/ "r"(entry)
:/*trash*/ "r15","cc"
);
#endif
#if defined(ARCH_x86)
// call the generated piece of code. It puts its result in edi.
__asm__ __volatile__(
"calll *%1" "\n\t"
"movl %%edi, %0" "\n"
:/*out*/ "=r"(result)
:/*in*/ "r"(entry)
:/*trash*/ "edi","cc"
);
#endif
#if defined(ARCH_arm)
// call the generated piece of code. It puts its result in r8.
__asm__ __volatile__(
"blx %1" "\n\t"
"mov %0, %%r8" "\n"
:/*out*/ "=r"(result)
:/*in*/ "r"(entry)
:/*trash*/ "r8","cc"
);
#endif
printf("\n");
printf("value computed is %lu (expected 444)\n", result);
printf("\n");
delete eal;
delete am;
}
#endif /* WTF_COMPILER_GCC */
/////////////////////////////////////////////////////////////////
//// test2 (a simple counting-down loop)
#if WTF_COMPILER_GCC
void test2 ( void )
{
printf("\n------------ Test 2 (mini loop) ------------\n\n" );
// Create new assembler
JSC::MacroAssembler* am = new JSC::MacroAssembler();
#if defined(ARCH_amd64)
JSC::X86Registers::RegisterID areg = JSC::X86Registers::r15;
// xor %r15,%r15
// add $0x7b,%r15
// add $0x141,%r15
// sub $0x1,%r15
// mov $0x0,%r11
// cmp %r11,%r15
// jne 0x7ff6d3e6a00e
// retq
// so r15 always winds up being zero
am->xorPtr(areg,areg);
am->addPtr(JSC::MacroAssembler::Imm32(123), areg);
am->addPtr(JSC::MacroAssembler::Imm32(321), areg);
JSC::MacroAssembler::Label loopHeadLabel(am);
am->subPtr(JSC::MacroAssembler::Imm32(1), areg);
JSC::MacroAssembler::Jump j
= am->branchPtr(JSC::MacroAssembler::NotEqual,
areg, JSC::MacroAssembler::ImmPtr(0));
j.linkTo(loopHeadLabel, am);
am->ret();
#endif
#if defined(ARCH_x86)
JSC::X86Registers::RegisterID areg = JSC::X86Registers::edi;
// xor %edi,%edi
// add $0x7b,%edi
// add $0x141,%edi
// sub $0x1,%edi
// test %edi,%edi
// jne 0xf7f9700b
// ret
// so edi always winds up being zero
am->xorPtr(areg,areg);
am->addPtr(JSC::MacroAssembler::Imm32(123), areg);
am->addPtr(JSC::MacroAssembler::Imm32(321), areg);
JSC::MacroAssembler::Label loopHeadLabel(am);
am->subPtr(JSC::MacroAssembler::Imm32(1), areg);
JSC::MacroAssembler::Jump j
= am->branchPtr(JSC::MacroAssembler::NotEqual,
areg, JSC::MacroAssembler::ImmPtr(0));
j.linkTo(loopHeadLabel, am);
am->ret();
#endif
#if defined(ARCH_arm)
JSC::ARMRegisters::RegisterID areg = JSC::ARMRegisters::r8;
// eors r8, r8, r8
// adds r8, r8, #123 ; 0x7b
// mov r3, #256 ; 0x100
// orr r3, r3, #65 ; 0x41
// adds r8, r8, r3
// subs r8, r8, #1 ; 0x1
// ldr r3, [pc, #8] ; 0x40026028
// cmp r8, r3
// bne 0x40026014
// mov pc, lr
// andeq r0, r0, r0 // DATA (0)
// andeq r0, r0, r4, lsl r0 // DATA (?? what's this for?)
// so r8 always winds up being zero
am->xorPtr(areg,areg);
am->addPtr(JSC::MacroAssembler::Imm32(123), areg);
am->addPtr(JSC::MacroAssembler::Imm32(321), areg);
JSC::MacroAssembler::Label loopHeadLabel(am);
am->subPtr(JSC::MacroAssembler::Imm32(1), areg);
JSC::MacroAssembler::Jump j
= am->branchPtr(JSC::MacroAssembler::NotEqual,
areg, JSC::MacroAssembler::ImmPtr(0));
j.linkTo(loopHeadLabel, am);
am->ret();
#endif
// prepare a link buffer, into which we can copy the completed insns
JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
// intermediate step .. get the pool suited for the size of code in 'am'
//WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
// constructor for LinkBuffer asks ep to allocate r-x memory,
// then copies it there.
JSC::LinkBuffer patchBuffer(am, ep, JSC::METHOD_CODE);
// finalize
JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
// cr now holds a pointer to the final runnable code.
void* entry = cr.m_code.executableAddress();
printf("disas %p %p\n",
entry, (char*)entry + cr.m_size);
pre_run();
unsigned long result = 0x55555555;
#if defined(ARCH_amd64)
// call the generated piece of code. It puts its result in r15.
__asm__ __volatile__(
"callq *%1" "\n\t"
"movq %%r15, %0" "\n"
:/*out*/ "=r"(result)
:/*in*/ "r"(entry)
:/*trash*/ "r15","cc"
);
#endif
#if defined(ARCH_x86)
// call the generated piece of code. It puts its result in edi.
__asm__ __volatile__(
"calll *%1" "\n\t"
"movl %%edi, %0" "\n"
:/*out*/ "=r"(result)
:/*in*/ "r"(entry)
:/*trash*/ "edi","cc"
);
#endif
#if defined(ARCH_arm)
// call the generated piece of code. It puts its result in r8.
__asm__ __volatile__(
"blx %1" "\n\t"
"mov %0, %%r8" "\n"
:/*out*/ "=r"(result)
:/*in*/ "r"(entry)
:/*trash*/ "r8","cc"
);
#endif
printf("\n");
printf("value computed is %lu (expected 0)\n", result);
printf("\n");
delete eal;
delete am;
}
#endif /* WTF_COMPILER_GCC */
/////////////////////////////////////////////////////////////////
//// test3 (if-then-else)
#if WTF_COMPILER_GCC
void test3 ( void )
{
printf("\n------------ Test 3 (if-then-else) ------------\n\n" );
// Create new assembler
JSC::MacroAssembler* am = new JSC::MacroAssembler();
#if defined(ARCH_amd64)
JSC::X86Registers::RegisterID areg = JSC::X86Registers::r15;
// mov $0x64,%r15d
// mov $0x0,%r11
// cmp %r11,%r15
// jne 0x7ff6d3e6a024
// mov $0x40,%r15d
// jmpq 0x7ff6d3e6a02a
// mov $0x4,%r15d
// retq
// so r15 ends up being 4
// put a value in reg
am->move(JSC::MacroAssembler::Imm32(100), areg);
// test, and conditionally jump to 'else' branch
JSC::MacroAssembler::Jump jToElse
= am->branchPtr(JSC::MacroAssembler::NotEqual,
areg, JSC::MacroAssembler::ImmPtr(0));
// 'then' branch
am->move(JSC::MacroAssembler::Imm32(64), areg);
JSC::MacroAssembler::Jump jToAfter
= am->jump();
// 'else' branch
JSC::MacroAssembler::Label elseLbl(am);
am->move(JSC::MacroAssembler::Imm32(4), areg);
// after
JSC::MacroAssembler::Label afterLbl(am);
am->ret();
#endif
#if defined(ARCH_x86)
JSC::X86Registers::RegisterID areg = JSC::X86Registers::edi;
// mov $0x64,%edi
// test %edi,%edi
// jne 0xf7f22017
// mov $0x40,%edi
// jmp 0xf7f2201c
// mov $0x4,%edi
// ret
// so edi ends up being 4
// put a value in reg
am->move(JSC::MacroAssembler::Imm32(100), areg);
// test, and conditionally jump to 'else' branch
JSC::MacroAssembler::Jump jToElse
= am->branchPtr(JSC::MacroAssembler::NotEqual,
areg, JSC::MacroAssembler::ImmPtr(0));
// 'then' branch
am->move(JSC::MacroAssembler::Imm32(64), areg);
JSC::MacroAssembler::Jump jToAfter
= am->jump();
// 'else' branch
JSC::MacroAssembler::Label elseLbl(am);
am->move(JSC::MacroAssembler::Imm32(4), areg);
// after
JSC::MacroAssembler::Label afterLbl(am);
am->ret();
#endif
#if defined(ARCH_arm)
JSC::ARMRegisters::RegisterID areg = JSC::ARMRegisters::r8;
// mov r8, #100 ; 0x64
// ldr r3, [pc, #20] ; 0x40026020
// cmp r8, r3
// bne 0x40026018
// mov r8, #64 ; 0x40
// b 0x4002601c
// mov r8, #4 ; 0x4
// mov pc, lr
// andeq r0, r0, r0 // DATA
// andeq r0, r0, r8, lsl r0 // DATA
// andeq r0, r0, r12, lsl r0 // DATA
// ldr r3, [r3, -r3] // DATA
// so r8 ends up being 4
// put a value in reg
am->move(JSC::MacroAssembler::Imm32(100), areg);
// test, and conditionally jump to 'else' branch
JSC::MacroAssembler::Jump jToElse
= am->branchPtr(JSC::MacroAssembler::NotEqual,
areg, JSC::MacroAssembler::ImmPtr(0));
// 'then' branch
am->move(JSC::MacroAssembler::Imm32(64), areg);
JSC::MacroAssembler::Jump jToAfter
= am->jump();
// 'else' branch
JSC::MacroAssembler::Label elseLbl(am);
am->move(JSC::MacroAssembler::Imm32(4), areg);
// after
JSC::MacroAssembler::Label afterLbl(am);
am->ret();
#endif
// set branch targets appropriately
jToElse.linkTo(elseLbl, am);
jToAfter.linkTo(afterLbl, am);
// prepare a link buffer, into which we can copy the completed insns
JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
// intermediate step .. get the pool suited for the size of code in 'am'
//WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
// constructor for LinkBuffer asks ep to allocate r-x memory,
// then copies it there.
JSC::LinkBuffer patchBuffer(am, ep, JSC::METHOD_CODE);
// finalize
JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
// cr now holds a pointer to the final runnable code.
void* entry = cr.m_code.executableAddress();
printf("disas %p %p\n",
entry, (char*)entry + cr.m_size);
pre_run();
unsigned long result = 0x55555555;
#if defined(ARCH_amd64)
// call the generated piece of code. It puts its result in r15.
__asm__ __volatile__(
"callq *%1" "\n\t"
"movq %%r15, %0" "\n"
:/*out*/ "=r"(result)
:/*in*/ "r"(entry)
:/*trash*/ "r15","cc"
);
#endif
#if defined(ARCH_x86)
// call the generated piece of code. It puts its result in edi.
__asm__ __volatile__(
"calll *%1" "\n\t"
"movl %%edi, %0" "\n"
:/*out*/ "=r"(result)
:/*in*/ "r"(entry)
:/*trash*/ "edi","cc"
);
#endif
#if defined(ARCH_arm)
// call the generated piece of code. It puts its result in r8.
__asm__ __volatile__(
"blx %1" "\n\t"
"mov %0, %%r8" "\n"
:/*out*/ "=r"(result)
:/*in*/ "r"(entry)
:/*trash*/ "r8","cc"
);
#endif
printf("\n");
printf("value computed is %lu (expected 4)\n", result);
printf("\n");
delete eal;
delete am;
}
#endif /* WTF_COMPILER_GCC */
/////////////////////////////////////////////////////////////////
//// test4 (callable function)
void test4 ( void )
{
printf("\n------------ Test 4 (callable fn) ------------\n\n" );
// Create new assembler
JSC::MacroAssembler* am = new JSC::MacroAssembler();
#if defined(ARCH_amd64)
// ADD FN PROLOGUE/EPILOGUE so as to make a mini-function
// push %rbp
// mov %rsp,%rbp
// push %rbx
// push %r12
// push %r13
// push %r14
// push %r15
// xor %rax,%rax
// add $0x7b,%rax
// add $0x141,%rax
// pop %r15
// pop %r14
// pop %r13
// pop %r12
// pop %rbx
// mov %rbp,%rsp
// pop %rbp
// retq
// callable as a normal function, returns 444
JSC::X86Registers::RegisterID rreg = JSC::X86Registers::eax;
am->push(JSC::X86Registers::ebp);
am->move(JSC::X86Registers::esp, JSC::X86Registers::ebp);
am->push(JSC::X86Registers::ebx);
am->push(JSC::X86Registers::r12);
am->push(JSC::X86Registers::r13);
am->push(JSC::X86Registers::r14);
am->push(JSC::X86Registers::r15);
am->xorPtr(rreg,rreg);
am->addPtr(JSC::MacroAssembler::Imm32(123), rreg);
am->addPtr(JSC::MacroAssembler::Imm32(321), rreg);
am->pop(JSC::X86Registers::r15);
am->pop(JSC::X86Registers::r14);
am->pop(JSC::X86Registers::r13);
am->pop(JSC::X86Registers::r12);
am->pop(JSC::X86Registers::ebx);
am->move(JSC::X86Registers::ebp, JSC::X86Registers::esp);
am->pop(JSC::X86Registers::ebp);
am->ret();
#endif
#if defined(ARCH_x86)
// ADD FN PROLOGUE/EPILOGUE so as to make a mini-function
// push %ebp
// mov %esp,%ebp
// push %ebx
// push %esi
// push %edi
// xor %eax,%eax
// add $0x7b,%eax
// add $0x141,%eax
// pop %edi
// pop %esi
// pop %ebx
// mov %ebp,%esp
// pop %ebp
// ret
// callable as a normal function, returns 444
JSC::X86Registers::RegisterID rreg = JSC::X86Registers::eax;
am->push(JSC::X86Registers::ebp);
am->move(JSC::X86Registers::esp, JSC::X86Registers::ebp);
am->push(JSC::X86Registers::ebx);
am->push(JSC::X86Registers::esi);
am->push(JSC::X86Registers::edi);
am->xorPtr(rreg,rreg);
am->addPtr(JSC::MacroAssembler::Imm32(123), rreg);
am->addPtr(JSC::MacroAssembler::Imm32(321), rreg);
am->pop(JSC::X86Registers::edi);
am->pop(JSC::X86Registers::esi);
am->pop(JSC::X86Registers::ebx);
am->move(JSC::X86Registers::ebp, JSC::X86Registers::esp);
am->pop(JSC::X86Registers::ebp);
am->ret();
#endif
#if defined(ARCH_arm)
// ADD FN PROLOGUE/EPILOGUE so as to make a mini-function
// push {r4} ; (str r4, [sp, #-4]!)
// push {r5} ; (str r5, [sp, #-4]!)
// push {r6} ; (str r6, [sp, #-4]!)
// push {r7} ; (str r7, [sp, #-4]!)
// push {r8} ; (str r8, [sp, #-4]!)
// push {r9} ; (str r9, [sp, #-4]!)
// push {r10} ; (str r10, [sp, #-4]!)
// push {r11} ; (str r11, [sp, #-4]!)
// eors r0, r0, r0
// adds r0, r0, #123 ; 0x7b
// mov r3, #256 ; 0x100
// orr r3, r3, #65 ; 0x41
// adds r0, r0, r3
// pop {r11} ; (ldr r11, [sp], #4)
// pop {r10} ; (ldr r10, [sp], #4)
// pop {r9} ; (ldr r9, [sp], #4)
// pop {r8} ; (ldr r8, [sp], #4)
// pop {r7} ; (ldr r7, [sp], #4)
// pop {r6} ; (ldr r6, [sp], #4)
// pop {r5} ; (ldr r5, [sp], #4)
// pop {r4} ; (ldr r4, [sp], #4)
// mov pc, lr
// callable as a normal function, returns 444
JSC::ARMRegisters::RegisterID rreg = JSC::ARMRegisters::r0;
am->push(JSC::ARMRegisters::r4);
am->push(JSC::ARMRegisters::r5);
am->push(JSC::ARMRegisters::r6);
am->push(JSC::ARMRegisters::r7);
am->push(JSC::ARMRegisters::r8);
am->push(JSC::ARMRegisters::r9);
am->push(JSC::ARMRegisters::r10);
am->push(JSC::ARMRegisters::r11);
am->xorPtr(rreg,rreg);
am->addPtr(JSC::MacroAssembler::Imm32(123), rreg);
am->addPtr(JSC::MacroAssembler::Imm32(321), rreg);
am->pop(JSC::ARMRegisters::r11);
am->pop(JSC::ARMRegisters::r10);
am->pop(JSC::ARMRegisters::r9);
am->pop(JSC::ARMRegisters::r8);
am->pop(JSC::ARMRegisters::r7);
am->pop(JSC::ARMRegisters::r6);
am->pop(JSC::ARMRegisters::r5);
am->pop(JSC::ARMRegisters::r4);
am->ret();
#endif
// prepare a link buffer, into which we can copy the completed insns
JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
// intermediate step .. get the pool suited for the size of code in 'am'
//WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
// constructor for LinkBuffer asks ep to allocate r-x memory,
// then copies it there.
JSC::LinkBuffer patchBuffer(am, ep, JSC::METHOD_CODE);
// now fix up any branches/calls
//JSC::FunctionPtr target = JSC::FunctionPtr::FunctionPtr( &cube );
// finalize
JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
// cr now holds a pointer to the final runnable code.
void* entry = cr.m_code.executableAddress();
printf("disas %p %p\n",
entry, (char*)entry + cr.m_size);
pre_run();
// call the function
unsigned long (*fn)(void) = (unsigned long (*)())entry;
unsigned long result = fn();
printf("\n");
printf("value computed is %lu (expected 444)\n", result);
printf("\n");
delete eal;
delete am;
}
/////////////////////////////////////////////////////////////////
//// test5 (call in, out, repatch)
// a function which we will call from the JIT generated code
unsigned long cube ( unsigned long x ) { return x * x * x; }
unsigned long square ( unsigned long x ) { return x * x; }
void test5 ( void )
{
printf("\n--------- Test 5 (call in, out, repatch) ---------\n\n" );
// Create new assembler
JSC::MacroAssembler* am = new JSC::MacroAssembler();
JSC::MacroAssembler::Call cl;
ptrdiff_t offset_of_call_insn;
#if defined(ARCH_amd64)
// ADD FN PROLOGUE/EPILOGUE so as to make a mini-function
// and then call a non-JIT-generated helper from within
// this code
// push %rbp
// mov %rsp,%rbp
// push %rbx
// push %r12
// push %r13
// push %r14
// push %r15
// mov $0x9,%edi
// mov $0x40187e,%r11
// callq *%r11
// pop %r15
// pop %r14
// pop %r13
// pop %r12
// pop %rbx
// mov %rbp,%rsp
// pop %rbp
// retq
JSC::MacroAssembler::Label startOfFnLbl(am);
am->push(JSC::X86Registers::ebp);
am->move(JSC::X86Registers::esp, JSC::X86Registers::ebp);
am->push(JSC::X86Registers::ebx);
am->push(JSC::X86Registers::r12);
am->push(JSC::X86Registers::r13);
am->push(JSC::X86Registers::r14);
am->push(JSC::X86Registers::r15);
// let's compute cube(9). Move $9 to the first arg reg.
am->move(JSC::MacroAssembler::Imm32(9), JSC::X86Registers::edi);
cl = am->JSC::MacroAssembler::call();
// result is now in %rax. Leave it ther and just return.
am->pop(JSC::X86Registers::r15);
am->pop(JSC::X86Registers::r14);
am->pop(JSC::X86Registers::r13);
am->pop(JSC::X86Registers::r12);
am->pop(JSC::X86Registers::ebx);
am->move(JSC::X86Registers::ebp, JSC::X86Registers::esp);
am->pop(JSC::X86Registers::ebp);
am->ret();
offset_of_call_insn
= am->JSC::MacroAssembler::differenceBetween(startOfFnLbl, cl);
if (0) printf("XXXXXXXX offset = %lu\n", offset_of_call_insn);
#endif
#if defined(ARCH_x86)
// ADD FN PROLOGUE/EPILOGUE so as to make a mini-function
// and then call a non-JIT-generated helper from within
// this code
// push %ebp
// mov %esp,%ebp
// push %ebx
// push %esi
// push %edi
// push $0x9
// call 0x80490e9 <_Z4cubem>
// add $0x4,%esp
// pop %edi
// pop %esi
// pop %ebx
// mov %ebp,%esp
// pop %ebp
// ret
JSC::MacroAssembler::Label startOfFnLbl(am);
am->push(JSC::X86Registers::ebp);
am->move(JSC::X86Registers::esp, JSC::X86Registers::ebp);
am->push(JSC::X86Registers::ebx);
am->push(JSC::X86Registers::esi);
am->push(JSC::X86Registers::edi);
// let's compute cube(9). Push $9 on the stack.
am->push(JSC::MacroAssembler::Imm32(9));
cl = am->JSC::MacroAssembler::call();
am->addPtr(JSC::MacroAssembler::Imm32(4), JSC::X86Registers::esp);
// result is now in %eax. Leave it there and just return.
am->pop(JSC::X86Registers::edi);
am->pop(JSC::X86Registers::esi);
am->pop(JSC::X86Registers::ebx);
am->move(JSC::X86Registers::ebp, JSC::X86Registers::esp);
am->pop(JSC::X86Registers::ebp);
am->ret();
offset_of_call_insn
= am->JSC::MacroAssembler::differenceBetween(startOfFnLbl, cl);
if (0) printf("XXXXXXXX offset = %lu\n",
(unsigned long)offset_of_call_insn);
#endif
#if defined(ARCH_arm)
// ADD FN PROLOGUE/EPILOGUE so as to make a mini-function
// push {r4} ; (str r4, [sp, #-4]!)
// push {r5} ; (str r5, [sp, #-4]!)
// push {r6} ; (str r6, [sp, #-4]!)
// push {r7} ; (str r7, [sp, #-4]!)
// push {r8} ; (str r8, [sp, #-4]!)
// push {r9} ; (str r9, [sp, #-4]!)
// push {r10} ; (str r10, [sp, #-4]!)
// push {r11} ; (str r11, [sp, #-4]!)
// eors r0, r0, r0
// adds r0, r0, #123 ; 0x7b
// mov r3, #256 ; 0x100
// orr r3, r3, #65 ; 0x41
// adds r0, r0, r3
// pop {r11} ; (ldr r11, [sp], #4)
// pop {r10} ; (ldr r10, [sp], #4)
// pop {r9} ; (ldr r9, [sp], #4)
// pop {r8} ; (ldr r8, [sp], #4)
// pop {r7} ; (ldr r7, [sp], #4)
// pop {r6} ; (ldr r6, [sp], #4)
// pop {r5} ; (ldr r5, [sp], #4)
// pop {r4} ; (ldr r4, [sp], #4)
// mov pc, lr
// callable as a normal function, returns 444
JSC::MacroAssembler::Label startOfFnLbl(am);
am->push(JSC::ARMRegisters::r4);
am->push(JSC::ARMRegisters::r5);
am->push(JSC::ARMRegisters::r6);
am->push(JSC::ARMRegisters::r7);
am->push(JSC::ARMRegisters::r8);
am->push(JSC::ARMRegisters::r9);
am->push(JSC::ARMRegisters::r10);
am->push(JSC::ARMRegisters::r11);
am->push(JSC::ARMRegisters::lr);
// let's compute cube(9). Get $9 into r0.
am->move(JSC::MacroAssembler::Imm32(9), JSC::ARMRegisters::r0);
cl = am->JSC::MacroAssembler::call();
// result is now in r0. Leave it there and just return.
am->pop(JSC::ARMRegisters::lr);
am->pop(JSC::ARMRegisters::r11);
am->pop(JSC::ARMRegisters::r10);
am->pop(JSC::ARMRegisters::r9);
am->pop(JSC::ARMRegisters::r8);
am->pop(JSC::ARMRegisters::r7);
am->pop(JSC::ARMRegisters::r6);
am->pop(JSC::ARMRegisters::r5);
am->pop(JSC::ARMRegisters::r4);
am->ret();
offset_of_call_insn
= am->JSC::MacroAssembler::differenceBetween(startOfFnLbl, cl);
if (0) printf("XXXXXXXX offset = %lu\n",
(unsigned long)offset_of_call_insn);
#endif
// prepare a link buffer, into which we can copy the completed insns
JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
// intermediate step .. get the pool suited for the size of code in 'am'
//WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
// constructor for LinkBuffer asks ep to allocate r-x memory,
// then copies it there.
JSC::LinkBuffer patchBuffer(am, ep, JSC::METHOD_CODE);
// now fix up any branches/calls
JSC::FunctionPtr target = JSC::FunctionPtr::FunctionPtr( &cube );
patchBuffer.link(cl, target);
JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
// cr now holds a pointer to the final runnable code.
void* entry = cr.m_code.executableAddress();
printf("disas %p %p\n",
entry, (char*)entry + cr.m_size);
pre_run();
printf("\n");
unsigned long (*fn)() = (unsigned long(*)())entry;
unsigned long result = fn();
printf("value computed is %lu (expected 729)\n", result);
printf("\n");
// now repatch the call in the JITted code to go elsewhere
JSC::JITCode jc = JSC::JITCode::JITCode(entry, cr.m_size);
JSC::CodeBlock cb = JSC::CodeBlock::CodeBlock(jc);
// the address of the call insn, that we want to prod
JSC::MacroAssemblerCodePtr cp
= JSC::MacroAssemblerCodePtr( ((char*)entry) + offset_of_call_insn );
JSC::RepatchBuffer repatchBuffer(&cb);
repatchBuffer.relink( JSC::CodeLocationCall(cp),
JSC::FunctionPtr::FunctionPtr( &square ));
result = fn();
printf("value computed is %lu (expected 81)\n", result);
printf("\n\n");
delete eal;
delete am;
}
/////////////////////////////////////////////////////////////////
int main ( void )
{
#if WTF_COMPILER_GCC
test1();
test2();
test3();
#endif
test4();
test5();
return 0;
}

View File

@ -1,669 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Copyright (C) 2009 University of Szeged
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
#include "assembler/wtf/Platform.h"
#if ENABLE_ASSEMBLER && WTF_CPU_ARM_TRADITIONAL
#include "assembler/assembler/ARMAssembler.h"
namespace JSC {
// Patching helpers
void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
{
ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
ARMWord index = (*ldr & 0xfff) >> 1;
ASSERT(diff >= 1);
if (diff >= 2 || index > 0) {
diff = (diff + index - 2) * sizeof(ARMWord);
ASSERT(diff <= 0xfff);
*ldr = (*ldr & ~0xfff) | diff;
} else
*ldr = (*ldr & ~(0xfff | ARMAssembler::DT_UP)) | sizeof(ARMWord);
}
// Handle immediates
ARMWord ARMAssembler::getOp2(ARMWord imm)
{
int rol;
if (imm <= 0xff)
return OP2_IMM | imm;
if ((imm & 0xff000000) == 0) {
imm <<= 8;
rol = 8;
}
else {
imm = (imm << 24) | (imm >> 8);
rol = 0;
}
if ((imm & 0xff000000) == 0) {
imm <<= 8;
rol += 4;
}
if ((imm & 0xf0000000) == 0) {
imm <<= 4;
rol += 2;
}
if ((imm & 0xc0000000) == 0) {
imm <<= 2;
rol += 1;
}
if ((imm & 0x00ffffff) == 0)
return OP2_IMM | (imm >> 24) | (rol << 8);
return INVALID_IMM;
}
ARMWord ARMAssembler::getOp2RegScale(RegisterID reg, ARMWord scale)
{
// The field that this method constructs looks like this:
// [11:7] Shift immediate.
// [ 6:5] Shift type. Only LSL ("00") is used here.
// [ 4:4] 0.
// [ 3:0] The register to shift.
ARMWord shift; // Shift field. This is log2(scale).
ARMWord lz; // Leading zeroes.
// Calculate shift=log2(scale).
#if WTF_ARM_ARCH_AT_LEAST_5
asm (
" clz %[lz], %[scale]\n"
: [lz] "=r" (lz)
: [scale] "r" (scale)
: // No clobbers.
);
#else
lz = 0; // Accumulate leading zeroes.
for (ARMWord s = 16; s > 0; s /= 2) {
ARMWord mask = 0xffffffff << (32-lz-s);
if ((scale & mask) == 0) {
lz += s;
}
}
#endif
if (lz >= 32) {
return INVALID_IMM;
}
shift = 31-lz;
// Check that scale was a power of 2.
if ((1u<<shift) != scale) {
return INVALID_IMM;
}
return (shift << 7) | (reg);
}
int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
{
// Step1: Search a non-immediate part
ARMWord mask;
ARMWord imm1;
ARMWord imm2;
int rol;
mask = 0xff000000;
rol = 8;
while(1) {
if ((imm & mask) == 0) {
imm = (imm << rol) | (imm >> (32 - rol));
rol = 4 + (rol >> 1);
break;
}
rol += 2;
mask >>= 2;
if (mask & 0x3) {
// rol 8
imm = (imm << 8) | (imm >> 24);
mask = 0xff00;
rol = 24;
while (1) {
if ((imm & mask) == 0) {
imm = (imm << rol) | (imm >> (32 - rol));
rol = (rol >> 1) - 8;
break;
}
rol += 2;
mask >>= 2;
if (mask & 0x3)
return 0;
}
break;
}
}
ASSERT((imm & 0xff) == 0);
if ((imm & 0xff000000) == 0) {
imm1 = OP2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
imm2 = OP2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
} else if (imm & 0xc0000000) {
imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
imm <<= 8;
rol += 4;
if ((imm & 0xff000000) == 0) {
imm <<= 8;
rol += 4;
}
if ((imm & 0xf0000000) == 0) {
imm <<= 4;
rol += 2;
}
if ((imm & 0xc0000000) == 0) {
imm <<= 2;
rol += 1;
}
if ((imm & 0x00ffffff) == 0)
imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
else
return 0;
} else {
if ((imm & 0xf0000000) == 0) {
imm <<= 4;
rol += 2;
}
if ((imm & 0xc0000000) == 0) {
imm <<= 2;
rol += 1;
}
imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
imm <<= 8;
rol += 4;
if ((imm & 0xf0000000) == 0) {
imm <<= 4;
rol += 2;
}
if ((imm & 0xc0000000) == 0) {
imm <<= 2;
rol += 1;
}
if ((imm & 0x00ffffff) == 0)
imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
else
return 0;
}
if (positive) {
mov_r(reg, imm1);
orr_r(reg, reg, imm2);
} else {
mvn_r(reg, imm1);
bic_r(reg, reg, imm2);
}
return 1;
}
#ifdef __GNUC__
// If the result of this function isn't used, the caller should probably be
// using movImm.
__attribute__((warn_unused_result))
#endif
ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
{
ARMWord tmp;
// Do it by 1 instruction
tmp = getOp2(imm);
if (tmp != INVALID_IMM)
return tmp;
tmp = getOp2(~imm);
if (tmp != INVALID_IMM) {
if (invert)
return tmp | OP2_INV_IMM;
mvn_r(tmpReg, tmp);
return tmpReg;
}
return encodeComplexImm(imm, tmpReg);
}
void ARMAssembler::moveImm(ARMWord imm, int dest)
{
ARMWord tmp;
// Do it by 1 instruction
tmp = getOp2(imm);
if (tmp != INVALID_IMM) {
mov_r(dest, tmp);
return;
}
tmp = getOp2(~imm);
if (tmp != INVALID_IMM) {
mvn_r(dest, tmp);
return;
}
encodeComplexImm(imm, dest);
}
ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
{
#if WTF_ARM_ARCH_VERSION >= 7
ARMWord tmp = getImm16Op2(imm);
if (tmp != INVALID_IMM) {
movw_r(dest, tmp);
return dest;
}
movw_r(dest, getImm16Op2(imm & 0xffff));
movt_r(dest, getImm16Op2(imm >> 16));
return dest;
#else
// Do it by 2 instruction
if (genInt(dest, imm, true))
return dest;
if (genInt(dest, ~imm, false))
return dest;
ldr_imm(dest, imm);
return dest;
#endif
}
// Memory load/store helpers
// TODO: this does not take advantage of all of ARMv7's instruction encodings, it should.
void ARMAssembler::dataTransferN(bool isLoad, bool isSigned, int size, RegisterID rt, RegisterID base, int32_t offset)
{
bool posOffset = true;
// There may be more elegant ways of handling this, but this one works.
if (offset == int32_t(0x80000000)) {
// For even bigger offsets, load the entire offset into a register, then do an
// indexed load using the base register and the index register.
moveImm(offset, ARMRegisters::S0);
mem_reg_off(isLoad, isSigned, size, posOffset, rt, base, ARMRegisters::S0);
return;
}
if (offset < 0) {
offset = - offset;
posOffset = false;
}
// max_ldr is also a mask.
int max_ldr = 0xfff;
int ldr_bits = 12;
if (size == 16 || (size == 8 && isSigned)) {
max_ldr = 0xff;
ldr_bits = 8;
}
if (offset <= max_ldr) {
// LDR rd, [rb, #+offset]
mem_imm_off(isLoad, isSigned, size, posOffset, rt, base, offset);
} else if (offset <= ((max_ldr << 8) | 0xff)) {
// Add upper bits of offset to the base, and store the result into the temp register.
if (posOffset) {
add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> ldr_bits) | getOp2RotLSL(ldr_bits));
} else {
sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> ldr_bits) | getOp2RotLSL(ldr_bits));
}
// Load using the lower bits of the offset, using max_ldr as a mask.
mem_imm_off(isLoad, isSigned, size, posOffset, rt,
ARMRegisters::S0, (offset & max_ldr));
} else {
// For even bigger offsets, load the entire offset into a register, then do an
// indexed load using the base register and the index register.
moveImm(offset, ARMRegisters::S0);
mem_reg_off(isLoad, isSigned, size, posOffset, rt, base, ARMRegisters::S0);
}
}
void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset)
{
if (offset >= 0) {
if (offset <= 0xfff) {
// LDR rd, [rb, +offset]
dtr_u(isLoad, srcDst, base, offset);
} else if (offset <= 0xfffff) {
// Add upper bits of offset to the base, and store the result into the temp register.
add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | getOp2RotLSL(12));
// Load using the lower bits of the register.
dtr_u(isLoad, srcDst, ARMRegisters::S0, (offset & 0xfff));
} else {
// For even bigger offsets, load the entire offset into a register, then do an
// indexed load using the base register and the index register.
moveImm(offset, ARMRegisters::S0);
dtr_ur(isLoad, srcDst, base, ARMRegisters::S0);
}
} else {
// Negative offsets.
if (offset >= -0xfff) {
dtr_d(isLoad, srcDst, base, -offset);
} else if (offset >= -0xfffff) {
sub_r(ARMRegisters::S0, base, OP2_IMM | (-offset >> 12) | getOp2RotLSL(12));
dtr_d(isLoad, srcDst, ARMRegisters::S0, (-offset & 0xfff));
} else {
moveImm(offset, ARMRegisters::S0);
dtr_ur(isLoad, srcDst, base, ARMRegisters::S0);
}
}
}
/* this is large, ugly and obsolete. dataTransferN is superior.*/
void ARMAssembler::dataTransfer8(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset, bool isSigned)
{
if (offset >= 0) {
if (offset <= 0xfff) {
if (isSigned)
mem_imm_off(isLoad, true, 8, true, srcDst, base, offset);
else
dtrb_u(isLoad, srcDst, base, offset);
} else if (offset <= 0xfffff) {
add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | getOp2RotLSL(12));
if (isSigned)
mem_imm_off(isLoad, true, 8, true, srcDst, ARMRegisters::S0, (offset & 0xfff));
else
dtrb_u(isLoad, srcDst, ARMRegisters::S0, (offset & 0xfff));
} else {
moveImm(offset, ARMRegisters::S0);
if (isSigned)
mem_reg_off(isLoad, true, 8, true, srcDst, base, ARMRegisters::S0);
else
dtrb_ur(isLoad, srcDst, base, ARMRegisters::S0);
}
} else {
if (offset >= -0xfff) {
if (isSigned)
mem_imm_off(isLoad, true, 8, false, srcDst, base, -offset);
else
dtrb_d(isLoad, srcDst, base, -offset);
} else if (offset >= -0xfffff) {
sub_r(ARMRegisters::S0, base, OP2_IMM | (-offset >> 12) | getOp2RotLSL(12));
if (isSigned)
mem_imm_off(isLoad, true, 8, false, srcDst, ARMRegisters::S0, (-offset & 0xfff));
else
dtrb_d(isLoad, srcDst, ARMRegisters::S0, (-offset & 0xfff));
} else {
moveImm(offset, ARMRegisters::S0);
if (isSigned)
mem_reg_off(isLoad, true, 8, true, srcDst, base, ARMRegisters::S0);
else
dtrb_ur(isLoad, srcDst, base, ARMRegisters::S0);
}
}
}
// rather X86-like, implements dest <- [base, index * shift + offset]
void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
{
ARMWord op2;
ASSERT(scale >= 0 && scale <= 3);
op2 = lsl(index, scale);
if (offset >= 0 && offset <= 0xfff) {
add_r(ARMRegisters::S0, base, op2);
dtr_u(isLoad, srcDst, ARMRegisters::S0, offset);
return;
}
if (offset <= 0 && offset >= -0xfff) {
add_r(ARMRegisters::S0, base, op2);
dtr_d(isLoad, srcDst, ARMRegisters::S0, -offset);
return;
}
ldr_un_imm(ARMRegisters::S0, offset);
add_r(ARMRegisters::S0, ARMRegisters::S0, op2);
dtr_ur(isLoad, srcDst, base, ARMRegisters::S0);
}
void ARMAssembler::baseIndexTransferN(bool isLoad, bool isSigned, int size, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
{
ARMWord op2;
ASSERT(scale >= 0 && scale <= 3);
op2 = lsl(index, scale);
if (offset >= -0xfff && offset <= 0xfff) {
add_r(ARMRegisters::S0, base, op2);
bool posOffset = true;
if (offset < 0) {
posOffset = false;
offset = -offset;
}
mem_imm_off(isLoad, isSigned, size, posOffset, srcDst, ARMRegisters::S0, offset);
return;
}
ldr_un_imm(ARMRegisters::S0, offset);
add_r(ARMRegisters::S0, ARMRegisters::S0, op2);
mem_reg_off(isLoad, isSigned, size, true, srcDst, base, ARMRegisters::S0);
}
void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset)
{
// VFP cannot directly access memory that is not four-byte-aligned, so
// special-case support will be required for such cases. However, we don't
// currently use any unaligned floating-point memory accesses and probably
// never will, so for now just assert that the offset is aligned.
//
// Note that we cannot assert that the base register is aligned, but in
// that case, an alignment fault will be raised at run-time.
ASSERT((offset & 0x3) == 0);
// Try to use a single load/store instruction, or at least a simple address
// calculation.
if (offset >= 0) {
if (offset <= 0x3ff) {
fmem_imm_off(isLoad, true, true, srcDst, base, offset >> 2);
return;
}
if (offset <= 0x3ffff) {
add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | getOp2RotLSL(10));
fmem_imm_off(isLoad, true, true, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
return;
}
} else {
if (offset >= -0x3ff) {
fmem_imm_off(isLoad, true, false, srcDst, base, -offset >> 2);
return;
}
if (offset >= -0x3ffff) {
sub_r(ARMRegisters::S0, base, OP2_IMM | (-offset >> 10) | getOp2RotLSL(10));
fmem_imm_off(isLoad, true, false, srcDst, ARMRegisters::S0, (-offset >> 2) & 0xff);
return;
}
}
// Slow case for long-range accesses.
ldr_un_imm(ARMRegisters::S0, offset);
add_r(ARMRegisters::S0, ARMRegisters::S0, base);
fmem_imm_off(isLoad, true, true, srcDst, ARMRegisters::S0, 0);
}
void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset, RegisterID index, int32_t scale)
{
// This variant accesses memory at base+offset+(index*scale). VLDR and VSTR
// don't have such an addressing mode, so this access will require some
// arithmetic instructions.
// This method does not support accesses that are not four-byte-aligned.
ASSERT((offset & 0x3) == 0);
// Catch the trivial case, where scale is 0.
if (scale == 0) {
doubleTransfer(isLoad, srcDst, base, offset);
return;
}
// Calculate the address, excluding the non-scaled offset. This is
// efficient for scale factors that are powers of two.
ARMWord op2_index = getOp2RegScale(index, scale);
if (op2_index == INVALID_IMM) {
// Use MUL to calculate scale factors that are not powers of two.
moveImm(scale, ARMRegisters::S0);
mul_r(ARMRegisters::S0, index, ARMRegisters::S0);
op2_index = ARMRegisters::S0;
}
add_r(ARMRegisters::S0, base, op2_index);
doubleTransfer(isLoad, srcDst, ARMRegisters::S0, offset);
}
void ARMAssembler::floatTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset)
{
// Assert that the access is aligned, as in doubleTransfer.
ASSERT((offset & 0x3) == 0);
// Try to use a single load/store instruction, or at least a simple address
// calculation.
if (offset >= 0) {
if (offset <= 0x3ff) {
fmem_imm_off(isLoad, false, true, srcDst, base, offset >> 2);
return;
}
if (offset <= 0x3ffff) {
add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | getOp2RotLSL(10));
fmem_imm_off(isLoad, false, true, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
return;
}
} else {
if (offset >= -0x3ff) {
fmem_imm_off(isLoad, false, false, srcDst, base, -offset >> 2);
return;
}
if (offset >= -0x3ffff) {
sub_r(ARMRegisters::S0, base, OP2_IMM | (-offset >> 10) | getOp2RotLSL(10));
fmem_imm_off(isLoad, false, false, srcDst, ARMRegisters::S0, (-offset >> 2) & 0xff);
return;
}
}
// Slow case for long-range accesses.
ldr_un_imm(ARMRegisters::S0, offset);
add_r(ARMRegisters::S0, ARMRegisters::S0, base);
fmem_imm_off(isLoad, false, true, srcDst, ARMRegisters::S0, 0);
}
void ARMAssembler::baseIndexFloatTransfer(bool isLoad, bool isDouble, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
{
ARMWord op2;
ASSERT(scale >= 0 && scale <= 3);
op2 = lsl(index, scale);
// vldr/vstr have a more restricted range than your standard ldr.
// they get 8 bits that are implicitly shifted left by 2.
if (offset >= -(0xff<<2) && offset <= (0xff<<2)) {
add_r(ARMRegisters::S0, base, op2);
bool posOffset = true;
if (offset < 0) {
posOffset = false;
offset = -offset;
}
fmem_imm_off(isLoad, isDouble, posOffset, srcDst, ARMRegisters::S0, offset >> 2);
return;
}
ldr_un_imm(ARMRegisters::S0, offset);
// vldr/vstr do not allow register-indexed operations, so we get to do this *manually*.
add_r(ARMRegisters::S0, ARMRegisters::S0, op2);
add_r(ARMRegisters::S0, ARMRegisters::S0, base);
fmem_imm_off(isLoad, isDouble, true, srcDst, ARMRegisters::S0, 0);
}
// Fix up the offsets and literal-pool loads in buffer. The buffer should
// already contain the code from m_buffer.
inline void ARMAssembler::fixUpOffsets(void * buffer)
{
char * data = reinterpret_cast<char *>(buffer);
for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
// The last bit is set if the constant must be placed on constant pool.
int pos = (*iter) & (~0x1);
ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + pos);
ARMWord* addr = getLdrImmAddress(ldrAddr);
if (*addr != InvalidBranchTarget) {
// The following is disabled for JM because we patch some branches after
// calling fixUpOffset, and the branch patcher doesn't know how to handle 'B'
// instructions.
#if 0
if (!(*iter & 1)) {
int diff = reinterpret_cast<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetching);
if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
*ldrAddr = B | getConditionalField(*ldrAddr) | (diff & BRANCH_MASK);
continue;
}
}
#endif
*addr = reinterpret_cast<ARMWord>(data + *addr);
}
}
}
void* ARMAssembler::executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool **poolp, CodeKind kind)
{
// 64-bit alignment is required for next constant pool and JIT code as well
m_buffer.flushWithoutBarrier(true);
if (m_buffer.uncheckedSize() & 0x7)
bkpt(0);
void * data = m_buffer.executableAllocAndCopy(allocator, poolp, kind);
if (data)
fixUpOffsets(data);
return data;
}
// This just dumps the code into the specified buffer, fixing up absolute
// offsets and literal pool loads as it goes. The buffer is assumed to be large
// enough to hold the code, and any pre-existing literal pool is assumed to
// have been flushed.
void ARMAssembler::executableCopy(void * buffer)
{
ASSERT(m_buffer.sizeOfConstantPool() == 0);
memcpy(buffer, m_buffer.data(), m_buffer.size());
fixUpOffsets(buffer);
}
} // namespace JSC
#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,659 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Copyright (C) 2008 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
#ifndef assembler_assembler_AbstractMacroAssembler_h
#define assembler_assembler_AbstractMacroAssembler_h
#include "assembler/wtf/Platform.h"
#include "assembler/assembler/MacroAssemblerCodeRef.h"
#include "assembler/assembler/CodeLocation.h"
#if ENABLE_ASSEMBLER
namespace JSC {
class LinkBuffer;
class RepatchBuffer;
template <class AssemblerType>
class AbstractMacroAssembler {
public:
typedef AssemblerType AssemblerType_T;
typedef MacroAssemblerCodePtr CodePtr;
typedef MacroAssemblerCodeRef CodeRef;
class Jump;
typedef typename AssemblerType::RegisterID RegisterID;
typedef typename AssemblerType::FPRegisterID FPRegisterID;
typedef typename AssemblerType::JmpSrc JmpSrc;
typedef typename AssemblerType::JmpDst JmpDst;
#ifdef DEBUG
void setSpewPath(bool isOOLPath)
{
m_assembler.isOOLPath = isOOLPath;
}
#endif
// Section 1: MacroAssembler operand types
//
// The following types are used as operands to MacroAssembler operations,
// describing immediate and memory operands to the instructions to be planted.
enum Scale {
TimesOne,
TimesTwo,
TimesFour,
TimesEight
};
// Address:
//
// Describes a simple base-offset address.
struct Address {
explicit Address() {}
explicit Address(RegisterID base, int32_t offset = 0)
: base(base)
, offset(offset)
{
}
RegisterID base;
int32_t offset;
};
struct ExtendedAddress {
explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
: base(base)
, offset(offset)
{
}
RegisterID base;
intptr_t offset;
};
// ImplicitAddress:
//
// This class is used for explicit 'load' and 'store' operations
// (as opposed to situations in which a memory operand is provided
// to a generic operation, such as an integer arithmetic instruction).
//
// In the case of a load (or store) operation we want to permit
// addresses to be implicitly constructed, e.g. the two calls:
//
// load32(Address(addrReg), destReg);
// load32(addrReg, destReg);
//
// Are equivalent, and the explicit wrapping of the Address in the former
// is unnecessary.
struct ImplicitAddress {
explicit ImplicitAddress(RegisterID base)
: base(base)
, offset(0)
{
}
MOZ_IMPLICIT ImplicitAddress(Address address)
: base(address.base)
, offset(address.offset)
{
}
RegisterID base;
int32_t offset;
};
// BaseIndex:
//
// Describes a complex addressing mode.
struct BaseIndex {
BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
: base(base)
, index(index)
, scale(scale)
, offset(offset)
{
}
RegisterID base;
RegisterID index;
Scale scale;
int32_t offset;
};
// AbsoluteAddress:
//
// Describes an memory operand given by a pointer. For regular load & store
// operations an unwrapped void* will be used, rather than using this.
struct AbsoluteAddress {
explicit AbsoluteAddress(const void* ptr)
: m_ptr(ptr)
{
}
const void* m_ptr;
};
// TrustedImmPtr:
//
// A pointer sized immediate operand to an instruction - this is wrapped
// in a class requiring explicit construction in order to differentiate
// from pointers used as absolute addresses to memory operations
struct TrustedImmPtr {
explicit TrustedImmPtr(const void* value)
: m_value(value)
{
}
intptr_t asIntptr()
{
return reinterpret_cast<intptr_t>(m_value);
}
const void* m_value;
};
struct ImmPtr : public TrustedImmPtr {
explicit ImmPtr(const void* value)
: TrustedImmPtr(value)
{
}
};
// TrustedImm32:
//
// A 32bit immediate operand to an instruction - this is wrapped in a
// class requiring explicit construction in order to prevent RegisterIDs
// (which are implemented as an enum) from accidentally being passed as
// immediate values.
struct TrustedImm32 {
explicit TrustedImm32(int32_t value)
: m_value(value)
#if WTF_CPU_ARM || WTF_CPU_MIPS
, m_isPointer(false)
#endif
{
}
#if !WTF_CPU_X86_64
explicit TrustedImm32(TrustedImmPtr ptr)
: m_value(ptr.asIntptr())
#if WTF_CPU_ARM || WTF_CPU_MIPS
, m_isPointer(true)
#endif
{
}
#endif
int32_t m_value;
#if WTF_CPU_ARM || WTF_CPU_MIPS
// We rely on being able to regenerate code to recover exception handling
// information. Since ARMv7 supports 16-bit immediates there is a danger
// that if pointer values change the layout of the generated code will change.
// To avoid this problem, always generate pointers (and thus Imm32s constructed
// from ImmPtrs) with a code sequence that is able to represent any pointer
// value - don't use a more compact form in these cases.
// Same for MIPS.
bool m_isPointer;
#endif
};
struct Imm32 : public TrustedImm32 {
explicit Imm32(int32_t value)
: TrustedImm32(value)
{
}
#if !WTF_CPU_X86_64
explicit Imm32(TrustedImmPtr ptr)
: TrustedImm32(ptr)
{
}
#endif
};
struct ImmDouble {
union {
struct {
#if WTF_CPU_BIG_ENDIAN || WTF_CPU_MIDDLE_ENDIAN
uint32_t msb, lsb;
#else
uint32_t lsb, msb;
#endif
} s;
uint64_t u64;
double d;
} u;
explicit ImmDouble(double d) {
u.d = d;
}
};
// Section 2: MacroAssembler code buffer handles
//
// The following types are used to reference items in the code buffer
// during JIT code generation. For example, the type Jump is used to
// track the location of a jump instruction so that it may later be
// linked to a label marking its destination.
// Label:
//
// A Label records a point in the generated instruction stream, typically such that
// it may be used as a destination for a jump.
class Label {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class Jump;
friend class MacroAssemblerCodeRef;
friend class LinkBuffer;
public:
Label()
{
}
explicit Label(AbstractMacroAssembler<AssemblerType>* masm)
: m_label(masm->m_assembler.label())
{
}
bool isUsed() const { return m_label.isUsed(); }
void used() { m_label.used(); }
bool isSet() const { return m_label.isValid(); }
private:
JmpDst m_label;
};
// DataLabelPtr:
//
// A DataLabelPtr is used to refer to a location in the code containing a pointer to be
// patched after the code has been generated.
class DataLabelPtr {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class LinkBuffer;
public:
DataLabelPtr()
{
}
explicit DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
: m_label(masm->m_assembler.label())
{
}
bool isSet() const { return m_label.isValid(); }
private:
JmpDst m_label;
};
// DataLabel32:
//
// A DataLabel32 is used to refer to a location in the code containing a
// 32-bit constant to be patched after the code has been generated.
class DataLabel32 {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class LinkBuffer;
public:
DataLabel32()
{
}
explicit DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
: m_label(masm->m_assembler.label())
{
}
private:
JmpDst m_label;
};
// Call:
//
// A Call object is a reference to a call instruction that has been planted
// into the code buffer - it is typically used to link the call, setting the
// relative offset such that when executed it will call to the desired
// destination.
class Call {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
public:
enum Flags {
None = 0x0,
Linkable = 0x1,
Near = 0x2,
LinkableNear = 0x3
};
Call()
: m_flags(None)
{
}
Call(JmpSrc jmp, Flags flags)
: m_jmp(jmp)
, m_flags(flags)
{
}
bool isFlagSet(Flags flag)
{
return !!(m_flags & flag);
}
static Call fromTailJump(Jump jump)
{
return Call(jump.m_jmp, Linkable);
}
JmpSrc m_jmp;
private:
Flags m_flags;
};
// Jump:
//
// A jump object is a reference to a jump instruction that has been planted
// into the code buffer - it is typically used to link the jump, setting the
// relative offset such that when executed it will jump to the desired
// destination.
class Jump {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class Call;
friend class LinkBuffer;
public:
Jump()
{
}
explicit Jump(JmpSrc jmp)
: m_jmp(jmp)
{
}
void link(AbstractMacroAssembler<AssemblerType>* masm) const
{
masm->m_assembler.linkJump(m_jmp, masm->m_assembler.label());
}
void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) const
{
masm->m_assembler.linkJump(m_jmp, label.m_label);
}
bool isSet() const { return m_jmp.isSet(); }
private:
JmpSrc m_jmp;
};
// JumpList:
//
// A JumpList is a set of Jump objects.
// All jumps in the set will be linked to the same destination.
class JumpList {
friend class LinkBuffer;
public:
typedef js::Vector<Jump, 16 ,js::SystemAllocPolicy > JumpVector;
JumpList() {}
JumpList(const JumpList &other)
{
m_jumps.appendAll(other.m_jumps);
}
JumpList &operator=(const JumpList &other)
{
m_jumps.clear();
m_jumps.append(other.m_jumps);
return *this;
}
void link(AbstractMacroAssembler<AssemblerType>* masm)
{
size_t size = m_jumps.length();
for (size_t i = 0; i < size; ++i)
m_jumps[i].link(masm);
m_jumps.clear();
}
void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
{
size_t size = m_jumps.length();
for (size_t i = 0; i < size; ++i)
m_jumps[i].linkTo(label, masm);
m_jumps.clear();
}
void append(Jump jump)
{
m_jumps.append(jump);
}
void append(const JumpList& other)
{
m_jumps.append(other.m_jumps.begin(), other.m_jumps.length());
}
void clear()
{
m_jumps.clear();
}
bool empty()
{
return !m_jumps.length();
}
const JumpVector& jumps() const { return m_jumps; }
private:
JumpVector m_jumps;
};
// Section 3: Misc admin methods
static CodePtr trampolineAt(CodeRef ref, Label label)
{
return CodePtr(AssemblerType::getRelocatedAddress(ref.m_code.dataLocation(), label.m_label));
}
size_t size()
{
return m_assembler.size();
}
unsigned char *buffer()
{
return m_assembler.buffer();
}
bool oom()
{
return m_assembler.oom();
}
void executableCopy(void* buffer)
{
ASSERT(!oom());
m_assembler.executableCopy(buffer);
}
Label label()
{
return Label(this);
}
DataLabel32 dataLabel32()
{
return DataLabel32(this);
}
Label align()
{
m_assembler.align(16);
return Label(this);
}
ptrdiff_t differenceBetween(Label from, Jump to)
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
}
ptrdiff_t differenceBetween(Label from, Call to)
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
}
ptrdiff_t differenceBetween(Label from, Label to)
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
}
ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
}
ptrdiff_t differenceBetween(Label from, DataLabel32 to)
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
}
ptrdiff_t differenceBetween(DataLabel32 from, Label to)
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
}
ptrdiff_t differenceBetween(DataLabelPtr from, Label to)
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
}
ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
}
ptrdiff_t differenceBetween(DataLabelPtr from, DataLabelPtr to)
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
}
ptrdiff_t differenceBetween(DataLabelPtr from, Call to)
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
}
protected:
AssemblerType m_assembler;
friend class LinkBuffer;
friend class RepatchBuffer;
static void linkJump(void* code, Jump jump, CodeLocationLabel target)
{
AssemblerType::linkJump(code, jump.m_jmp, target.dataLocation());
}
static void linkPointer(void* code, typename AssemblerType::JmpDst label, void* value)
{
AssemblerType::linkPointer(code, label, value);
}
static void* getLinkerAddress(void* code, typename AssemblerType::JmpSrc label)
{
return AssemblerType::getRelocatedAddress(code, label);
}
static void* getLinkerAddress(void* code, typename AssemblerType::JmpDst label)
{
return AssemblerType::getRelocatedAddress(code, label);
}
static unsigned getLinkerCallReturnOffset(Call call)
{
return AssemblerType::getCallReturnOffset(call.m_jmp);
}
static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
{
AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
}
static bool canRepatchJump(CodeLocationJump jump, CodeLocationLabel destination)
{
return AssemblerType::canRelinkJump(jump.dataLocation(), destination.dataLocation());
}
static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
{
AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
}
static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
{
AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
}
static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
{
AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
}
static void repatchLoadPtrToLEA(CodeLocationInstruction instruction)
{
AssemblerType::repatchLoadPtrToLEA(instruction.dataLocation());
}
static void repatchLEAToLoadPtr(CodeLocationInstruction instruction)
{
AssemblerType::repatchLEAToLoadPtr(instruction.dataLocation());
}
};
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
#endif /* assembler_assembler_AbstractMacroAssembler_h */

View File

@ -1,386 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Copyright (C) 2009 University of Szeged
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
#ifndef assembler_assembler_AssemblerBufferWithConstantPool_h
#define assembler_assembler_AssemblerBufferWithConstantPool_h
#include "assembler/wtf/Platform.h"
#if ENABLE_ASSEMBLER
#include "assembler/assembler/AssemblerBuffer.h"
#include "assembler/wtf/SegmentedVector.h"
#include "assembler/wtf/Assertions.h"
#define ASSEMBLER_HAS_CONSTANT_POOL 1
namespace JSC {
/*
On a constant pool 4 or 8 bytes data can be stored. The values can be
constants or addresses. The addresses should be 32 or 64 bits. The constants
should be double-precisions float or integer numbers which are hard to be
encoded as few machine instructions.
TODO: The pool is desinged to handle both 32 and 64 bits values, but
currently only the 4 bytes constants are implemented and tested.
The AssemblerBuffer can contain multiple constant pools. Each pool is inserted
into the instruction stream - protected by a jump instruction from the
execution flow.
The flush mechanism is called when no space remain to insert the next instruction
into the pool. Three values are used to determine when the constant pool itself
have to be inserted into the instruction stream (Assembler Buffer):
- maxPoolSize: size of the constant pool in bytes, this value cannot be
larger than the maximum offset of a PC relative memory load
- barrierSize: size of jump instruction in bytes which protects the
constant pool from execution
- maxInstructionSize: maximum length of a machine instruction in bytes
There are some callbacks which solve the target architecture specific
address handling:
- TYPE patchConstantPoolLoad(TYPE load, int value):
patch the 'load' instruction with the index of the constant in the
constant pool and return the patched instruction.
- void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr):
patch the a PC relative load instruction at 'loadAddr' address with the
final relative offset. The offset can be computed with help of
'constPoolAddr' (the address of the constant pool) and index of the
constant (which is stored previously in the load instruction itself).
- TYPE placeConstantPoolBarrier(int size):
return with a constant pool barrier instruction which jumps over the
constant pool.
The 'put*WithConstant*' functions should be used to place a data into the
constant pool.
*/
template <int maxPoolSize, int barrierSize, int maxInstructionSize, class AssemblerType>
class AssemblerBufferWithConstantPool: public AssemblerBuffer {
typedef SegmentedVector<uint32_t, 512> LoadOffsets;
public:
enum {
UniqueConst,
ReusableConst,
UnusedEntry
};
AssemblerBufferWithConstantPool()
: AssemblerBuffer()
, m_numConsts(0)
, m_maxDistance(maxPoolSize)
, m_lastConstDelta(0)
, m_flushCount(0)
{
m_pool = static_cast<uint32_t*>(js_malloc(maxPoolSize));
m_mask = static_cast<char*>(js_malloc(maxPoolSize / sizeof(uint32_t)));
}
~AssemblerBufferWithConstantPool()
{
js_free(m_mask);
js_free(m_pool);
}
void ensureSpace(int space)
{
flushIfNoSpaceFor(space);
AssemblerBuffer::ensureSpace(space);
}
void ensureSpace(int insnSpace, int constSpace)
{
flushIfNoSpaceFor(insnSpace, constSpace);
AssemblerBuffer::ensureSpace(insnSpace);
}
bool isAligned(int alignment)
{
flushIfNoSpaceFor(alignment);
return AssemblerBuffer::isAligned(alignment);
}
void putByteUnchecked(int value)
{
AssemblerBuffer::putByteUnchecked(value);
correctDeltas(1);
}
void putByte(int value)
{
flushIfNoSpaceFor(1);
AssemblerBuffer::putByte(value);
correctDeltas(1);
}
void putShortUnchecked(int value)
{
AssemblerBuffer::putShortUnchecked(value);
correctDeltas(2);
}
void putShort(int value)
{
flushIfNoSpaceFor(2);
AssemblerBuffer::putShort(value);
correctDeltas(2);
}
// Puts 1 word worth of data into the instruction stream
void putIntUnchecked(int value)
{
AssemblerBuffer::putIntUnchecked(value);
correctDeltas(4);
}
// Puts one word worth of data into the instruction stream, and makes sure
// there is enough space to place it, dumping the constant pool if there isn't
void putInt(int value)
{
flushIfNoSpaceFor(4);
AssemblerBuffer::putInt(value);
correctDeltas(4);
}
// puts 64 bits worth of data into the instruction stream
void putInt64Unchecked(int64_t value)
{
AssemblerBuffer::putInt64Unchecked(value);
correctDeltas(8);
}
int size()
{
flushIfNoSpaceFor(maxInstructionSize, sizeof(uint64_t));
return AssemblerBuffer::size();
}
int uncheckedSize() const
{
return AssemblerBuffer::size();
}
// copy all of our instructions and pools into their final location
void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp, CodeKind kind)
{
flushConstantPool(false);
return AssemblerBuffer::executableAllocAndCopy(allocator, poolp, kind);
}
// places 1 int worth of data into a pool, and mashes an instruction into place to
// hold this offset.
// the caller of putIntWithConstantInt passes in some token that represents an
// instruction, as well as the raw data that is to be placed in the pool.
// Traditionally, this 'token' has been the instruction that we wish to encode
// in the end, however, I have started encoding it in a much simpler manner,
// using bitfields and a fairly flat representation.
void putIntWithConstantInt(uint32_t insn, uint32_t constant, bool isReusable = false)
{
flushIfNoSpaceFor(4, 4);
m_loadOffsets.append(AssemblerBuffer::size());
if (isReusable)
for (int i = 0; i < m_numConsts; ++i) {
if (m_mask[i] == ReusableConst && m_pool[i] == constant) {
AssemblerBuffer::putInt(AssemblerType::patchConstantPoolLoad(insn, i));
correctDeltas(4);
return;
}
}
m_pool[m_numConsts] = constant;
m_mask[m_numConsts] = static_cast<char>(isReusable ? ReusableConst : UniqueConst);
AssemblerBuffer::putInt(AssemblerType::patchConstantPoolLoad(insn, m_numConsts));
++m_numConsts;
correctDeltas(4, 4);
}
void putIntWithConstantDouble(uint32_t insn, double constant)
{
flushIfNoSpaceFor(4, 8);
m_loadOffsets.append(AssemblerBuffer::size());
bool isReusable = false;
union DoublePun {
struct {
#if defined(IS_LITTLE_ENDIAN)
uint32_t lo, hi;
#else
uint32_t hi, lo;
#endif
} s;
double d;
} dpun;
dpun.d = constant;
m_pool[m_numConsts] = dpun.s.lo;
m_pool[m_numConsts+1] = dpun.s.hi;
m_mask[m_numConsts] = static_cast<char>(isReusable ? ReusableConst : UniqueConst);
m_mask[m_numConsts+1] = static_cast<char>(isReusable ? ReusableConst : UniqueConst);
AssemblerBuffer::putInt(AssemblerType::patchConstantPoolLoad(insn, m_numConsts));
m_numConsts+=2;
correctDeltas(4, 8);
}
// This flushing mechanism can be called after any unconditional jumps.
void flushWithoutBarrier(bool isForced = false)
{
// Flush if constant pool is more than 60% full to avoid overuse of this function.
if (isForced || (5 * m_numConsts * sizeof(uint32_t)) > (3 * maxPoolSize))
flushConstantPool(false);
}
// return the address of the pool; we really shouldn't be using this.
uint32_t* poolAddress()
{
return m_pool;
}
// how many constants have been placed into the pool thusfar?
int sizeOfConstantPool()
{
return m_numConsts;
}
int flushCount()
{
return m_flushCount;
}
private:
void correctDeltas(int insnSize)
{
m_maxDistance -= insnSize;
ASSERT(m_maxDistance >= 0);
m_lastConstDelta -= insnSize;
if (m_lastConstDelta < 0)
m_lastConstDelta = 0;
}
void correctDeltas(int insnSize, int constSize)
{
correctDeltas(insnSize);
m_maxDistance -= m_lastConstDelta;
ASSERT(m_maxDistance >= 0);
m_lastConstDelta = constSize;
}
// place a constant pool after the last instruction placed, and
// optionally place a jump to ensure we don't start executing the pool.
void flushConstantPool(bool useBarrier = true)
{
GenericAssembler::staticSpew(" -- FLUSHING CONSTANT POOL WITH %d CONSTANTS --\n",
m_numConsts);
if (m_numConsts == 0)
return;
m_flushCount++;
int alignPool = (AssemblerBuffer::size() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
if (alignPool)
alignPool = sizeof(uint64_t) - alignPool;
// Callback to protect the constant pool from execution
if (useBarrier)
AssemblerBuffer::putInt(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool));
if (alignPool) {
if (alignPool & 1)
AssemblerBuffer::putByte(AssemblerType::padForAlign8);
if (alignPool & 2)
AssemblerBuffer::putShort(AssemblerType::padForAlign16);
if (alignPool & 4)
AssemblerBuffer::putInt(AssemblerType::padForAlign32);
}
int constPoolOffset = AssemblerBuffer::size();
append(reinterpret_cast<char*>(m_pool), m_numConsts * sizeof(uint32_t));
// Patch each PC relative load
for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) {
void* loadAddr = reinterpret_cast<void*>(m_buffer + *iter);
AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast<void*>(m_buffer + constPoolOffset));
}
m_loadOffsets.clear();
m_numConsts = 0;
m_maxDistance = maxPoolSize;
ASSERT(m_maxDistance >= 0);
}
void flushIfNoSpaceFor(int nextInsnSize)
{
if (m_numConsts == 0) {
m_maxDistance = maxPoolSize;
return;
}
int lastConstDelta = m_lastConstDelta > nextInsnSize ? m_lastConstDelta - nextInsnSize : 0;
if ((m_maxDistance < nextInsnSize + lastConstDelta + barrierSize + (int)sizeof(uint32_t)))
flushConstantPool();
}
void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize)
{
if (m_numConsts == 0) {
m_maxDistance = maxPoolSize;
return;
}
if ((m_maxDistance < nextInsnSize + m_lastConstDelta + nextConstSize + barrierSize + (int)sizeof(uint32_t)) ||
(m_numConsts * sizeof(uint32_t) + nextConstSize >= maxPoolSize))
flushConstantPool();
}
uint32_t* m_pool;
char* m_mask;
LoadOffsets m_loadOffsets;
int m_numConsts;
int m_maxDistance;
int m_lastConstDelta;
int m_flushCount;
};
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
#endif /* assembler_assembler_AssemblerBufferWithConstantPool_h */

View File

@ -1,189 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Copyright (C) 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
#ifndef assembler_assembler_CodeLocation_h
#define assembler_assembler_CodeLocation_h
#include "assembler/wtf/Platform.h"
#include "assembler/assembler/MacroAssemblerCodeRef.h"
#if ENABLE_ASSEMBLER
namespace JSC {
class CodeLocationInstruction;
class CodeLocationLabel;
class CodeLocationJump;
class CodeLocationCall;
class CodeLocationNearCall;
class CodeLocationDataLabel32;
class CodeLocationDataLabelPtr;
// The CodeLocation* types are all pretty much do-nothing wrappers around
// CodePtr (or MacroAssemblerCodePtr, to give it its full name). These
// classes only exist to provide type-safety when linking and patching code.
//
// The one new piece of functionallity introduced by these classes is the
// ability to create (or put another way, to re-discover) another CodeLocation
// at an offset from one you already know. When patching code to optimize it
// we often want to patch a number of instructions that are short, fixed
// offsets apart. To reduce memory overhead we will only retain a pointer to
// one of the instructions, and we will use the *AtOffset methods provided by
// CodeLocationCommon to find the other points in the code to modify.
class CodeLocationCommon : public MacroAssemblerCodePtr {
public:
CodeLocationInstruction instructionAtOffset(int offset);
CodeLocationLabel labelAtOffset(int offset);
CodeLocationJump jumpAtOffset(int offset);
CodeLocationCall callAtOffset(int offset);
CodeLocationNearCall nearCallAtOffset(int offset);
CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
protected:
CodeLocationCommon()
{
}
explicit CodeLocationCommon(MacroAssemblerCodePtr location)
: MacroAssemblerCodePtr(location)
{
}
};
class CodeLocationInstruction : public CodeLocationCommon {
public:
CodeLocationInstruction() {}
explicit CodeLocationInstruction(MacroAssemblerCodePtr location)
: CodeLocationCommon(location) {}
explicit CodeLocationInstruction(void* location)
: CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
};
class CodeLocationLabel : public CodeLocationCommon {
public:
CodeLocationLabel() {}
explicit CodeLocationLabel(MacroAssemblerCodePtr location)
: CodeLocationCommon(location) {}
explicit CodeLocationLabel(void* location)
: CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
};
class CodeLocationJump : public CodeLocationCommon {
public:
CodeLocationJump() {}
explicit CodeLocationJump(MacroAssemblerCodePtr location)
: CodeLocationCommon(location) {}
explicit CodeLocationJump(void* location)
: CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
};
class CodeLocationCall : public CodeLocationCommon {
public:
CodeLocationCall() {}
explicit CodeLocationCall(MacroAssemblerCodePtr location)
: CodeLocationCommon(location) {}
explicit CodeLocationCall(void* location)
: CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
};
class CodeLocationNearCall : public CodeLocationCommon {
public:
CodeLocationNearCall() {}
explicit CodeLocationNearCall(MacroAssemblerCodePtr location)
: CodeLocationCommon(location) {}
explicit CodeLocationNearCall(void* location)
: CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
};
class CodeLocationDataLabel32 : public CodeLocationCommon {
public:
CodeLocationDataLabel32() {}
explicit CodeLocationDataLabel32(MacroAssemblerCodePtr location)
: CodeLocationCommon(location) {}
explicit CodeLocationDataLabel32(void* location)
: CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
};
class CodeLocationDataLabelPtr : public CodeLocationCommon {
public:
CodeLocationDataLabelPtr() {}
explicit CodeLocationDataLabelPtr(MacroAssemblerCodePtr location)
: CodeLocationCommon(location) {}
explicit CodeLocationDataLabelPtr(void* location)
: CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
};
inline CodeLocationInstruction CodeLocationCommon::instructionAtOffset(int offset)
{
ASSERT_VALID_CODE_OFFSET(offset);
return CodeLocationInstruction(reinterpret_cast<char*>(dataLocation()) + offset);
}
inline CodeLocationLabel CodeLocationCommon::labelAtOffset(int offset)
{
ASSERT_VALID_CODE_OFFSET(offset);
return CodeLocationLabel(reinterpret_cast<char*>(dataLocation()) + offset);
}
inline CodeLocationJump CodeLocationCommon::jumpAtOffset(int offset)
{
ASSERT_VALID_CODE_OFFSET(offset);
return CodeLocationJump(reinterpret_cast<char*>(dataLocation()) + offset);
}
inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset)
{
ASSERT_VALID_CODE_OFFSET(offset);
return CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset);
}
inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset)
{
ASSERT_VALID_CODE_OFFSET(offset);
return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset);
}
inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset)
{
ASSERT_VALID_CODE_OFFSET(offset);
return CodeLocationDataLabelPtr(reinterpret_cast<char*>(dataLocation()) + offset);
}
inline CodeLocationDataLabel32 CodeLocationCommon::dataLabel32AtOffset(int offset)
{
ASSERT_VALID_CODE_OFFSET(offset);
return CodeLocationDataLabel32(reinterpret_cast<char*>(dataLocation()) + offset);
}
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
#endif /* assembler_assembler_CodeLocation_h */

View File

@ -1,221 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Copyright (C) 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
#ifndef assembler_assembler_LinkBuffer_h
#define assembler_assembler_LinkBuffer_h
#include "assembler/wtf/Platform.h"
#if ENABLE_ASSEMBLER
#include "assembler/assembler/MacroAssembler.h"
namespace JSC {
// LinkBuffer:
//
// This class assists in linking code generated by the macro assembler, once code generation
// has been completed, and the code has been copied to is final location in memory. At this
// time pointers to labels within the code may be resolved, and relative offsets to external
// addresses may be fixed.
//
// Specifically:
// * Jump objects may be linked to external targets,
// * The address of Jump objects may taken, such that it can later be relinked.
// * The return address of a Call may be acquired.
// * The address of a Label pointing into the code may be resolved.
// * The value referenced by a DataLabel may be set.
//
class LinkBuffer {
typedef MacroAssemblerCodeRef CodeRef;
typedef MacroAssembler::Label Label;
typedef MacroAssembler::Jump Jump;
typedef MacroAssembler::JumpList JumpList;
typedef MacroAssembler::Call Call;
typedef MacroAssembler::DataLabel32 DataLabel32;
typedef MacroAssembler::DataLabelPtr DataLabelPtr;
public:
// 'ok' should be checked after this constructor is called; it's false if OOM occurred.
LinkBuffer(MacroAssembler* masm, ExecutableAllocator* executableAllocator,
ExecutablePool** poolp, bool* ok, CodeKind codeKind)
{
// LinkBuffer is only used by Yarr. MacroAssemblerCodeRef::release relies on this.
MOZ_ASSERT(codeKind == REGEXP_CODE);
m_codeKind = codeKind;
m_code = executableAllocAndCopy(*masm, executableAllocator, poolp);
m_executablePool = *poolp;
m_size = masm->m_assembler.size(); // must come after call to executableAllocAndCopy()!
m_allocSize = masm->m_assembler.allocSize();
#ifndef NDEBUG
m_completed = false;
#endif
*ok = !!m_code;
}
~LinkBuffer()
{
ASSERT(!m_executablePool || m_completed);
}
// These methods are used to link or set values at code generation time.
void link(Call call, FunctionPtr function)
{
ASSERT(call.isFlagSet(Call::Linkable));
MacroAssembler::linkCall(code(), call, function);
}
void link(Jump jump, CodeLocationLabel label)
{
MacroAssembler::linkJump(code(), jump, label);
}
void link(JumpList list, CodeLocationLabel label)
{
for (unsigned i = 0; i < list.m_jumps.length(); ++i)
MacroAssembler::linkJump(code(), list.m_jumps[i], label);
}
void patch(DataLabelPtr label, void* value)
{
MacroAssembler::linkPointer(code(), label.m_label, value);
}
void patch(DataLabelPtr label, CodeLocationLabel value)
{
MacroAssembler::linkPointer(code(), label.m_label, value.executableAddress());
}
// These methods are used to obtain handles to allow the code to be relinked / repatched later.
CodeLocationCall locationOf(Call call)
{
ASSERT(call.isFlagSet(Call::Linkable));
ASSERT(!call.isFlagSet(Call::Near));
return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
}
CodeLocationJump locationOf(Jump j)
{
return CodeLocationJump(MacroAssembler::getLinkerAddress(code(), j.m_jmp));
}
CodeLocationNearCall locationOfNearCall(Call call)
{
ASSERT(call.isFlagSet(Call::Linkable));
ASSERT(call.isFlagSet(Call::Near));
return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
}
CodeLocationLabel locationOf(Label label)
{
return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), label.m_label));
}
CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
{
return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), label.m_label));
}
CodeLocationDataLabel32 locationOf(DataLabel32 label)
{
return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), label.m_label));
}
// This method obtains the return address of the call, given as an offset from
// the start of the code.
unsigned returnAddressOffset(Call call)
{
return MacroAssembler::getLinkerCallReturnOffset(call);
}
// Upon completion of all patching either 'finalizeCode()' or 'finalizeCodeAddendum()' should be called
// once to complete generation of the code. 'finalizeCode()' is suited to situations
// where the executable pool must also be retained, the lighter-weight 'finalizeCodeAddendum()' is
// suited to adding to an existing allocation.
CodeRef finalizeCode()
{
performFinalization();
MOZ_ASSERT(m_allocSize >= m_size);
return CodeRef(m_code, m_executablePool, m_allocSize);
}
CodeLocationLabel finalizeCodeAddendum()
{
performFinalization();
return CodeLocationLabel(code());
}
// Useful as a proxy to detect OOM.
void* unsafeCode() {
return code();
}
protected:
// Keep this private! - the underlying code should only be obtained externally via
// finalizeCode() or finalizeCodeAddendum().
void* code()
{
return m_code;
}
void *executableAllocAndCopy(MacroAssembler &masm, ExecutableAllocator *allocator,
ExecutablePool **poolp)
{
return masm.m_assembler.executableAllocAndCopy(allocator, poolp, m_codeKind);
}
void performFinalization()
{
#ifndef NDEBUG
ASSERT(!m_completed);
m_completed = true;
#endif
ExecutableAllocator::makeExecutable(code(), m_size);
ExecutableAllocator::cacheFlush(code(), m_size);
}
ExecutablePool* m_executablePool;
void* m_code;
size_t m_size;
size_t m_allocSize;
CodeKind m_codeKind;
#ifndef NDEBUG
bool m_completed;
#endif
};
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
#endif /* assembler_assembler_LinkBuffer_h */

File diff suppressed because it is too large Load Diff

View File

@ -36,361 +36,32 @@
#if WTF_CPU_ARM_THUMB2
#include "assembler/assembler/MacroAssemblerARMv7.h"
namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }
namespace JSC { typedef MacroAssemblerARMv7 MacroAssembler; }
#elif WTF_CPU_ARM_TRADITIONAL
#include "assembler/assembler/MacroAssemblerARM.h"
namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; }
namespace JSC { typedef MacroAssemblerARM MacroAssembler; }
#elif WTF_CPU_MIPS
#include "assembler/assembler/MacroAssemblerMIPS.h"
namespace JSC { typedef MacroAssemblerMIPS MacroAssemblerBase; }
namespace JSC { typedef MacroAssemblerMIPS MacroAssembler; }
#elif WTF_CPU_X86
#include "assembler/assembler/MacroAssemblerX86.h"
namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; }
namespace JSC { typedef MacroAssemblerX86 MacroAssembler; }
#elif WTF_CPU_X86_64
#include "assembler/assembler/MacroAssemblerX86_64.h"
namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; }
namespace JSC { typedef MacroAssemblerX86_64 MacroAssembler; }
#elif WTF_CPU_SPARC
#include "assembler/assembler/MacroAssemblerSparc.h"
namespace JSC { typedef MacroAssemblerSparc MacroAssemblerBase; }
namespace JSC { typedef MacroAssemblerSparc MacroAssembler; }
#else
#error "The MacroAssembler is not supported on this platform."
#endif
namespace JSC {
class MacroAssembler : public MacroAssemblerBase {
public:
using MacroAssemblerBase::pop;
using MacroAssemblerBase::jump;
using MacroAssemblerBase::branch32;
using MacroAssemblerBase::branch16;
#if WTF_CPU_X86_64
using MacroAssemblerBase::branchPtr;
using MacroAssemblerBase::branchTestPtr;
#endif
// Platform agnostic onvenience functions,
// described in terms of other macro assembly methods.
void pop()
{
addPtr(Imm32(sizeof(void*)), stackPointerRegister);
}
void peek(RegisterID dest, int index = 0)
{
loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
}
void poke(RegisterID src, int index = 0)
{
storePtr(src, Address(stackPointerRegister, (index * sizeof(void*))));
}
void poke(TrustedImm32 value, int index = 0)
{
store32(value, Address(stackPointerRegister, (index * sizeof(void*))));
}
void poke(TrustedImmPtr imm, int index = 0)
{
storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*))));
}
// Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
void branchPtr(Condition cond, RegisterID op1, ImmPtr imm, Label target)
{
branchPtr(cond, op1, imm).linkTo(target, this);
}
void branch32(Condition cond, RegisterID op1, RegisterID op2, Label target)
{
branch32(cond, op1, op2).linkTo(target, this);
}
void branch32(Condition cond, RegisterID op1, TrustedImm32 imm, Label target)
{
branch32(cond, op1, imm).linkTo(target, this);
}
void branch32(Condition cond, RegisterID left, Address right, Label target)
{
branch32(cond, left, right).linkTo(target, this);
}
void branch16(Condition cond, BaseIndex left, RegisterID right, Label target)
{
branch16(cond, left, right).linkTo(target, this);
}
void branchTestPtr(Condition cond, RegisterID reg, Label target)
{
branchTestPtr(cond, reg).linkTo(target, this);
}
void jump(Label target)
{
jump().linkTo(target, this);
}
// Ptr methods
// On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
// FIXME: should this use a test for 32-bitness instead of this specific exception?
#if !WTF_CPU_X86_64
void addPtr(RegisterID src, RegisterID dest)
{
add32(src, dest);
}
void addPtr(Imm32 imm32, Address address)
{
add32(imm32, address);
}
void addPtr(Imm32 imm, RegisterID srcDest)
{
add32(imm, srcDest);
}
void addPtr(ImmPtr imm, RegisterID dest)
{
add32(Imm32(imm), dest);
}
void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
{
add32(imm, src, dest);
}
void andPtr(RegisterID src, RegisterID dest)
{
and32(src, dest);
}
void andPtr(Imm32 imm, RegisterID srcDest)
{
and32(imm, srcDest);
}
void andPtr(ImmPtr ptr, RegisterID srcDest)
{
and32(Imm32(ptr), srcDest);
}
void negPtr(RegisterID srcDest)
{
neg32(srcDest);
}
void notPtr(RegisterID srcDest)
{
not32(srcDest);
}
void orPtr(RegisterID src, RegisterID dest)
{
or32(src, dest);
}
void orPtr(ImmPtr imm, RegisterID dest)
{
or32(Imm32(imm), dest);
}
void orPtr(Imm32 imm, RegisterID dest)
{
or32(imm, dest);
}
void subPtr(RegisterID src, RegisterID dest)
{
sub32(src, dest);
}
void subPtr(Imm32 imm, RegisterID dest)
{
sub32(imm, dest);
}
void subPtr(ImmPtr imm, RegisterID dest)
{
sub32(Imm32(imm), dest);
}
void subPtr(ImmPtr imm, Address address)
{
sub32(Imm32(imm), address);
}
void xorPtr(RegisterID src, RegisterID dest)
{
xor32(src, dest);
}
void xorPtr(Imm32 imm, RegisterID srcDest)
{
xor32(imm, srcDest);
}
void loadPtr(ImplicitAddress address, RegisterID dest)
{
load32(address, dest);
}
void loadPtr(BaseIndex address, RegisterID dest)
{
load32(address, dest);
}
void loadPtr(void* address, RegisterID dest)
{
load32(address, dest);
}
DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
{
return load32WithAddressOffsetPatch(address, dest);
}
void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
{
set32(cond, left, right, dest);
}
void storePtr(RegisterID src, ImplicitAddress address)
{
store32(src, address);
}
void storePtr(RegisterID src, void* address)
{
store32(src, address);
}
void storePtr(TrustedImmPtr imm, ImplicitAddress address)
{
store32(Imm32(imm), address);
}
void storePtr(TrustedImmPtr imm, BaseIndex address)
{
store32(Imm32(imm), address);
}
void storePtr(TrustedImmPtr imm, void* address)
{
store32(Imm32(imm), address);
}
DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
{
return store32WithAddressOffsetPatch(src, address);
}
Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
{
return branch32(cond, left, right);
}
Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
{
return branch32(cond, left, Imm32(right));
}
Jump branchPtr(Condition cond, RegisterID left, Imm32 right)
{
return branch32(cond, left, right);
}
Jump branchPtr(Condition cond, RegisterID left, Address right)
{
return branch32(cond, left, right);
}
Jump branchPtr(Condition cond, Address left, RegisterID right)
{
return branch32(cond, left, right);
}
Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
{
return branch32(cond, left, right);
}
Jump branchPtr(Condition cond, Address left, ImmPtr right)
{
return branch32(cond, left, Imm32(right));
}
Jump branchPtr(Condition cond, AbsoluteAddress left, ImmPtr right, RegisterID scratch)
{
return branch32(cond, left, Imm32(right));
}
Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
{
return branchTest32(cond, reg, mask);
}
Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
{
return branchTest32(cond, reg, mask);
}
Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
{
return branchTest32(cond, address, mask);
}
Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
{
return branchTest32(cond, address, mask);
}
Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
{
return branchAdd32(cond, src, dest);
}
Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
{
return branchSub32(cond, imm, dest);
}
using MacroAssemblerBase::branchTest8;
Jump branchTest8(Condition cond, ExtendedAddress address, Imm32 mask = Imm32(-1))
{
return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
}
void rshiftPtr(Imm32 imm, RegisterID dest)
{
rshift32(imm, dest);
}
void lshiftPtr(Imm32 imm, RegisterID dest)
{
lshift32(imm, dest);
}
#endif
};
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
#endif /* assembler_assembler_MacroAssembler_h */

View File

@ -40,6 +40,7 @@
#include <fcntl.h>
#include <unistd.h>
#include <elf.h>
#include <stdio.h>
// lame check for kernel version
// see bug 586550
@ -94,34 +95,6 @@ static bool isVFPPresent()
const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent();
#if WTF_CPU_ARMV5_OR_LOWER
/* On ARMv5 and below, natural alignment is required. */
void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
{
ARMWord op2;
ASSERT(address.scale >= 0 && address.scale <= 3);
op2 = m_assembler.lsl(address.index, static_cast<int>(address.scale));
if (address.offset >= 0 && address.offset + 0x2 <= 0xff) {
m_assembler.add_r(ARMRegisters::S0, address.base, op2);
m_assembler.ldrh_u(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset));
m_assembler.ldrh_u(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset + 0x2));
} else if (address.offset < 0 && address.offset >= -0xff) {
m_assembler.add_r(ARMRegisters::S0, address.base, op2);
m_assembler.ldrh_d(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset));
m_assembler.ldrh_d(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset - 0x2));
} else {
m_assembler.ldr_un_imm(ARMRegisters::S0, address.offset);
m_assembler.add_r(ARMRegisters::S0, ARMRegisters::S0, op2);
m_assembler.ldrh_r(dest, address.base, ARMRegisters::S0);
m_assembler.add_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::OP2_IMM | 0x2);
m_assembler.ldrh_r(ARMRegisters::S0, address.base, ARMRegisters::S0);
}
m_assembler.orr_r(dest, dest, m_assembler.lsl(ARMRegisters::S0, 16));
}
#endif
}
#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,229 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Copyright (C) 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
#ifndef assembler_assembler_MacroAssemblerCodeRef_h
#define assembler_assembler_MacroAssemblerCodeRef_h
#include "assembler/wtf/Assertions.h"
#include "assembler/wtf/Platform.h"
#include "assembler/jit/ExecutableAllocator.h"
#if ENABLE_ASSEMBLER
#include "jsutil.h"
// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
// instruction address on the platform (for example, check any alignment requirements).
#if WTF_CPU_ARM_THUMB2
// ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded
// into the processor are decorated with the bottom bit set, indicating that this is
// thumb code (as oposed to 32-bit traditional ARM). The first test checks for both
// decorated and undectorated null, and the second test ensures that the pointer is
// decorated.
#define ASSERT_VALID_CODE_POINTER(ptr) \
ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1); \
ASSERT(reinterpret_cast<intptr_t>(ptr) & 1)
#define ASSERT_VALID_CODE_OFFSET(offset) \
ASSERT(!(offset & 1)) // Must be multiple of 2.
#else
#define ASSERT_VALID_CODE_POINTER(ptr) \
ASSERT(ptr)
#define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes!
#endif
namespace JSC {
// FunctionPtr:
//
// FunctionPtr should be used to wrap pointers to C/C++ functions in JSC
// (particularly, the stub functions).
class FunctionPtr {
public:
FunctionPtr()
: m_value(0)
{
}
template<typename FunctionType>
explicit FunctionPtr(FunctionType* value)
#if WTF_COMPILER_RVCT
// RVTC compiler needs C-style cast as it fails with the following error
// Error: #694: reinterpret_cast cannot cast away const or other type qualifiers
: m_value((void*)(value))
#else
: m_value(reinterpret_cast<void*>(value))
#endif
{
ASSERT_VALID_CODE_POINTER(m_value);
}
void* value() const { return m_value; }
void* executableAddress() const { return m_value; }
private:
void* m_value;
};
// ReturnAddressPtr:
//
// ReturnAddressPtr should be used to wrap return addresses generated by processor
// 'call' instructions exectued in JIT code. We use return addresses to look up
// exception and optimization information, and to repatch the call instruction
// that is the source of the return address.
class ReturnAddressPtr {
public:
ReturnAddressPtr()
: m_value(0)
{
}
explicit ReturnAddressPtr(void* value)
: m_value(value)
{
ASSERT_VALID_CODE_POINTER(m_value);
}
explicit ReturnAddressPtr(FunctionPtr function)
: m_value(function.value())
{
ASSERT_VALID_CODE_POINTER(m_value);
}
void* value() const { return m_value; }
private:
void* m_value;
};
// MacroAssemblerCodePtr:
//
// MacroAssemblerCodePtr should be used to wrap pointers to JIT generated code.
class MacroAssemblerCodePtr {
public:
MacroAssemblerCodePtr()
: m_value(0)
{
}
explicit MacroAssemblerCodePtr(void* value)
#if WTF_CPU_ARM_THUMB2
// Decorate the pointer as a thumb code pointer.
: m_value(reinterpret_cast<char*>(value) + 1)
#else
: m_value(value)
#endif
{
ASSERT_VALID_CODE_POINTER(m_value);
}
explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
: m_value(ra.value())
{
ASSERT_VALID_CODE_POINTER(m_value);
}
void* executableAddress() const {
return m_value;
}
#if WTF_CPU_ARM_THUMB2
// To use this pointer as a data address remove the decoration.
void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
#else
void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
#endif
bool operator!() const
{
return !m_value;
}
ptrdiff_t operator -(const MacroAssemblerCodePtr &other) const
{
JS_ASSERT(m_value);
return reinterpret_cast<uint8_t *>(m_value) -
reinterpret_cast<uint8_t *>(other.m_value);
}
private:
void* m_value;
};
// MacroAssemblerCodeRef:
//
// A reference to a section of JIT generated code. A CodeRef consists of a
// pointer to the code, and a ref pointer to the pool from within which it
// was allocated.
class MacroAssemblerCodeRef {
public:
MacroAssemblerCodeRef()
: m_executablePool(NULL),
m_allocSize(0)
{
}
MacroAssemblerCodeRef(void* code, ExecutablePool* executablePool, size_t allocSize)
: m_code(code)
, m_executablePool(executablePool)
, m_allocSize(allocSize)
{
}
// Release the code memory in this code ref.
void release()
{
if (!m_executablePool)
return;
JS_POISON(m_code.executableAddress(), JS_SWEPT_CODE_PATTERN, m_allocSize);
m_code = MacroAssemblerCodePtr();
// MacroAssemblerCodeRef is only used by Yarr.
m_executablePool->release(m_allocSize, REGEXP_CODE);
m_executablePool = nullptr;
}
MacroAssemblerCodePtr code() const {
return m_code;
}
size_t allocSize() const {
return m_allocSize;
}
MacroAssemblerCodePtr m_code;
ExecutablePool* m_executablePool;
size_t m_allocSize;
};
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
#endif /* assembler_assembler_MacroAssemblerCodeRef_h */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -40,216 +40,7 @@ namespace JSC {
class MacroAssemblerX86 : public MacroAssemblerX86Common {
public:
MacroAssemblerX86()
{ }
static const Scale ScalePtr = TimesFour;
static const unsigned int TotalRegisters = 8;
using MacroAssemblerX86Common::add32;
using MacroAssemblerX86Common::and32;
using MacroAssemblerX86Common::sub32;
using MacroAssemblerX86Common::or32;
using MacroAssemblerX86Common::load32;
using MacroAssemblerX86Common::store32;
using MacroAssemblerX86Common::branch32;
using MacroAssemblerX86Common::call;
using MacroAssemblerX86Common::loadDouble;
using MacroAssemblerX86Common::storeDouble;
using MacroAssemblerX86Common::convertInt32ToDouble;
void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
m_assembler.leal_mr(imm.m_value, src, dest);
}
void lea(Address address, RegisterID dest)
{
m_assembler.leal_mr(address.offset, address.base, dest);
}
void lea(BaseIndex address, RegisterID dest)
{
m_assembler.leal_mr(address.offset, address.base, address.index, address.scale, dest);
}
void add32(Imm32 imm, AbsoluteAddress address)
{
m_assembler.addl_im(imm.m_value, address.m_ptr);
}
void addWithCarry32(Imm32 imm, AbsoluteAddress address)
{
m_assembler.adcl_im(imm.m_value, address.m_ptr);
}
void and32(Imm32 imm, AbsoluteAddress address)
{
m_assembler.andl_im(imm.m_value, address.m_ptr);
}
void or32(TrustedImm32 imm, AbsoluteAddress address)
{
m_assembler.orl_im(imm.m_value, address.m_ptr);
}
void sub32(TrustedImm32 imm, AbsoluteAddress address)
{
m_assembler.subl_im(imm.m_value, address.m_ptr);
}
void load32(void* address, RegisterID dest)
{
m_assembler.movl_mr(address, dest);
}
void storeDouble(ImmDouble imm, Address address)
{
store32(Imm32(imm.u.s.lsb), address);
store32(Imm32(imm.u.s.msb), Address(address.base, address.offset + 4));
}
void storeDouble(ImmDouble imm, BaseIndex address)
{
store32(Imm32(imm.u.s.lsb), address);
store32(Imm32(imm.u.s.msb),
BaseIndex(address.base, address.index, address.scale, address.offset + 4));
}
DataLabelPtr loadDouble(const void* address, FPRegisterID dest)
{
ASSERT(isSSE2Present());
m_assembler.movsd_mr(address, dest);
return DataLabelPtr(this);
}
void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
{
m_assembler.cvtsi2sd_mr(src.m_ptr, dest);
}
void convertUInt32ToDouble(RegisterID srcDest, FPRegisterID dest)
{
// Trick is from nanojit/Nativei386.cpp, asm_ui2d.
static const double NegativeOne = 2147483648.0;
// src is [0, 2^32-1]
sub32(Imm32(0x80000000), srcDest);
// Now src is [-2^31, 2^31-1] - int range, but not the same value.
zeroDouble(dest);
convertInt32ToDouble(srcDest, dest);
// dest is now a double with the int range.
// correct the double value by adding (0x80000000).
move(ImmPtr(&NegativeOne), srcDest);
addDouble(Address(srcDest), dest);
}
void store32(TrustedImm32 imm, void* address)
{
m_assembler.movl_i32m(imm.m_value, address);
}
void store32(RegisterID src, void* address)
{
m_assembler.movl_rm(src, address);
}
Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
{
m_assembler.cmpl_rm(right, left.m_ptr);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branch32(Condition cond, AbsoluteAddress left, TrustedImm32 right)
{
m_assembler.cmpl_im(right.m_value, left.m_ptr);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Call call()
{
return Call(m_assembler.call(), Call::Linkable);
}
Call tailRecursiveCall()
{
return Call::fromTailJump(jump());
}
Call makeTailRecursiveCall(Jump oldJump)
{
return Call::fromTailJump(oldJump);
}
DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
{
m_assembler.movl_i32r(initialValue.asIntptr(), dest);
return DataLabelPtr(this);
}
Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
{
m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
dataLabel = DataLabelPtr(this);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
{
m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
dataLabel = DataLabelPtr(this);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
{
m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base);
return DataLabelPtr(this);
}
Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
{
Label label(this);
load32(address, dest);
return label;
}
void pushAllRegs()
{
m_assembler.pusha();
}
void popAllRegs()
{
m_assembler.popa();
}
static bool supportsFloatingPoint() { return isSSE2Present(); }
// See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
static bool supportsFloatingPointTruncate() { return isSSE2Present(); }
static bool supportsFloatingPointSqrt() { return isSSE2Present(); }
private:
friend class LinkBuffer;
friend class RepatchBuffer;
static void linkCall(void* code, Call call, FunctionPtr function)
{
X86Assembler::linkCall(code, call.m_jmp, function.value());
}
static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
{
X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
}
static void repatchCall(CodeLocationCall call, FunctionPtr destination)
{
X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
}
};
} // namespace JSC

File diff suppressed because it is too large Load Diff

View File

@ -30,571 +30,17 @@
#ifndef assembler_assembler_MacroAssemblerX86_64_h
#define assembler_assembler_MacroAssemblerX86_64_h
#include "mozilla/DebugOnly.h"
#include "assembler/wtf/Platform.h"
#if ENABLE_ASSEMBLER && WTF_CPU_X86_64
#include "assembler/assembler/MacroAssemblerX86Common.h"
#define REPTACH_OFFSET_CALL_R11 3
namespace JSC {
class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
protected:
static const intptr_t MinInt32 = 0xFFFFFFFF80000000;
static const intptr_t MaxInt32 = 0x000000007FFFFFFF;
public:
static const Scale ScalePtr = TimesEight;
static const unsigned int TotalRegisters = 16;
using MacroAssemblerX86Common::add32;
using MacroAssemblerX86Common::and32;
using MacroAssemblerX86Common::or32;
using MacroAssemblerX86Common::sub32;
using MacroAssemblerX86Common::load32;
using MacroAssemblerX86Common::store32;
using MacroAssemblerX86Common::call;
using MacroAssemblerX86Common::loadDouble;
using MacroAssemblerX86Common::storeDouble;
using MacroAssemblerX86Common::convertInt32ToDouble;
void add32(TrustedImm32 imm, AbsoluteAddress address)
{
move(ImmPtr(address.m_ptr), scratchRegister);
add32(imm, Address(scratchRegister));
}
void and32(Imm32 imm, AbsoluteAddress address)
{
move(ImmPtr(address.m_ptr), scratchRegister);
and32(imm, Address(scratchRegister));
}
void or32(TrustedImm32 imm, AbsoluteAddress address)
{
move(ImmPtr(address.m_ptr), scratchRegister);
or32(imm, Address(scratchRegister));
}
void sub32(TrustedImm32 imm, AbsoluteAddress address)
{
move(ImmPtr(address.m_ptr), scratchRegister);
sub32(imm, Address(scratchRegister));
}
void load32(const void* address, RegisterID dest)
{
if (dest == X86Registers::eax)
m_assembler.movl_mEAX(address);
else {
move(ImmPtr(address), scratchRegister);
load32(ImplicitAddress(scratchRegister), dest);
}
}
DataLabelPtr loadDouble(const void* address, FPRegisterID dest)
{
DataLabelPtr label = moveWithPatch(ImmPtr(address), scratchRegister);
loadDouble(ImplicitAddress(scratchRegister), dest);
return label;
}
void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
{
move(Imm32(*static_cast<const int32_t*>(src.m_ptr)), scratchRegister);
m_assembler.cvtsi2sd_rr(scratchRegister, dest);
}
void convertUInt32ToDouble(RegisterID srcDest, FPRegisterID dest)
{
zeroExtend32ToPtr(srcDest, srcDest);
zeroDouble(dest); // break dependency chains
m_assembler.cvtsq2sd_rr(srcDest, dest);
}
void store32(TrustedImm32 imm, void* address)
{
move(X86Registers::eax, scratchRegister);
move(imm, X86Registers::eax);
m_assembler.movl_EAXm(address);
move(scratchRegister, X86Registers::eax);
}
Call call()
{
mozilla::DebugOnly<DataLabelPtr> label = moveWithPatch(ImmPtr(0), scratchRegister);
Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
ASSERT(differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
return result;
}
Call tailRecursiveCall()
{
mozilla::DebugOnly<DataLabelPtr> label = moveWithPatch(ImmPtr(0), scratchRegister);
Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
return Call::fromTailJump(newJump);
}
Call makeTailRecursiveCall(Jump oldJump)
{
oldJump.link(this);
mozilla::DebugOnly<DataLabelPtr> label = moveWithPatch(ImmPtr(0), scratchRegister);
Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
return Call::fromTailJump(newJump);
}
void addPtr(RegisterID src, RegisterID dest)
{
m_assembler.addq_rr(src, dest);
}
void lea(BaseIndex address, RegisterID dest)
{
m_assembler.leaq_mr(address.offset, address.base, address.index, address.scale, dest);
}
void lea(Address address, RegisterID dest)
{
m_assembler.leaq_mr(address.offset, address.base, dest);
}
void addPtr(Imm32 imm, RegisterID srcDest)
{
m_assembler.addq_ir(imm.m_value, srcDest);
}
void addPtr(ImmPtr imm, RegisterID dest)
{
move(imm, scratchRegister);
m_assembler.addq_rr(scratchRegister, dest);
}
void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
{
m_assembler.leaq_mr(imm.m_value, src, dest);
}
void addPtr(Imm32 imm, Address address)
{
m_assembler.addq_im(imm.m_value, address.offset, address.base);
}
void addPtr(Imm32 imm, AbsoluteAddress address)
{
move(ImmPtr(address.m_ptr), scratchRegister);
addPtr(imm, Address(scratchRegister));
}
void andPtr(RegisterID src, RegisterID dest)
{
m_assembler.andq_rr(src, dest);
}
void andPtr(Address src, RegisterID dest)
{
m_assembler.andq_mr(src.offset, src.base, dest);
}
void andPtr(Imm32 imm, RegisterID srcDest)
{
m_assembler.andq_ir(imm.m_value, srcDest);
}
void andPtr(ImmPtr imm, RegisterID srcDest)
{
intptr_t value = intptr_t(imm.m_value);
// 32-bit immediates in 64-bit ALU ops are sign-extended.
if (value >= MinInt32 && value <= MaxInt32) {
andPtr(Imm32(int(value)), srcDest);
} else {
move(imm, scratchRegister);
m_assembler.andq_rr(scratchRegister, srcDest);
}
}
void negPtr(RegisterID srcDest)
{
m_assembler.negq_r(srcDest);
}
void notPtr(RegisterID srcDest)
{
m_assembler.notq_r(srcDest);
}
void orPtr(Address src, RegisterID dest)
{
m_assembler.orq_mr(src.offset, src.base, dest);
}
void orPtr(RegisterID src, RegisterID dest)
{
m_assembler.orq_rr(src, dest);
}
void orPtr(ImmPtr imm, RegisterID dest)
{
move(imm, scratchRegister);
m_assembler.orq_rr(scratchRegister, dest);
}
void orPtr(Imm32 imm, RegisterID dest)
{
m_assembler.orq_ir(imm.m_value, dest);
}
void subPtr(RegisterID src, RegisterID dest)
{
m_assembler.subq_rr(src, dest);
}
void subPtr(Imm32 imm, RegisterID dest)
{
m_assembler.subq_ir(imm.m_value, dest);
}
void subPtr(ImmPtr imm, RegisterID dest)
{
move(imm, scratchRegister);
m_assembler.subq_rr(scratchRegister, dest);
}
void xorPtr(RegisterID src, RegisterID dest)
{
m_assembler.xorq_rr(src, dest);
}
void xorPtr(Imm32 imm, RegisterID srcDest)
{
m_assembler.xorq_ir(imm.m_value, srcDest);
}
void rshiftPtr(Imm32 imm, RegisterID srcDest)
{
m_assembler.sarq_i8r(imm.m_value, srcDest);
}
void lshiftPtr(Imm32 imm, RegisterID srcDest)
{
m_assembler.shlq_i8r(imm.m_value, srcDest);
}
void loadPtr(ImplicitAddress address, RegisterID dest)
{
m_assembler.movq_mr(address.offset, address.base, dest);
}
void loadPtr(BaseIndex address, RegisterID dest)
{
m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
}
void loadPtr(const void* address, RegisterID dest)
{
if (dest == X86Registers::eax)
m_assembler.movq_mEAX(address);
else {
move(ImmPtr(address), scratchRegister);
loadPtr(ImplicitAddress(scratchRegister), dest);
}
}
DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
{
m_assembler.movq_mr_disp32(address.offset, address.base, dest);
return DataLabel32(this);
}
void storePtr(RegisterID src, ImplicitAddress address)
{
m_assembler.movq_rm(src, address.offset, address.base);
}
void storePtr(TrustedImmPtr imm, BaseIndex address)
{
intptr_t value = intptr_t(imm.m_value);
// 32-bit immediates in 64-bit stores will be zero-extended, so check
// if the value can fit in such a store.
if (value >= 0 && value < intptr_t(0x7FFFFFFF)) {
m_assembler.movq_i32m(int32_t(value), address.offset, address.base, address.index,
address.scale);
} else {
move(imm, scratchRegister);
storePtr(scratchRegister, address);
}
}
void storePtr(RegisterID src, BaseIndex address)
{
m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
}
void storePtr(RegisterID src, void* address)
{
if (src == X86Registers::eax)
m_assembler.movq_EAXm(address);
else {
move(ImmPtr(address), scratchRegister);
storePtr(src, ImplicitAddress(scratchRegister));
}
}
void storePtr(TrustedImmPtr imm, ImplicitAddress address)
{
intptr_t value = intptr_t(imm.m_value);
// 32-bit immediates in 64-bit stores will be zero-extended, so check
// if the value can fit in such a store.
if (value >= 0 && value < intptr_t(0x7FFFFFFF)) {
m_assembler.movq_i32m(int32_t(value), address.offset, address.base);
} else {
move(imm, scratchRegister);
storePtr(scratchRegister, address);
}
}
DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
{
m_assembler.movq_rm_disp32(src, address.offset, address.base);
return DataLabel32(this);
}
void move32(RegisterID src, RegisterID dest)
{
// upper 32bit will be 0
m_assembler.movl_rr(src, dest);
}
void movePtrToDouble(RegisterID src, FPRegisterID dest)
{
m_assembler.movq_rr(src, dest);
}
void moveDoubleToPtr(FPRegisterID src, RegisterID dest)
{
m_assembler.movq_rr(src, dest);
}
void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
{
if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
m_assembler.testq_rr(left, left);
else
m_assembler.cmpq_ir(right.m_value, left);
m_assembler.setCC_r(x86Condition(cond), dest);
m_assembler.movzbl_rr(dest, dest);
}
void setPtr(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
{
m_assembler.cmpq_rr(right, left);
m_assembler.setCC_r(x86Condition(cond), dest);
m_assembler.movzbl_rr(dest, dest);
}
void setPtr(Condition cond, RegisterID left, ImmPtr right, RegisterID dest)
{
move(right, scratchRegister);
setPtr(cond, left, scratchRegister, dest);
}
Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
{
m_assembler.cmpq_rr(right, left);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchPtr(Condition cond, RegisterID left, Imm32 right)
{
m_assembler.cmpq_ir(right.m_value, left);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
{
move(right, scratchRegister);
return branchPtr(cond, left, scratchRegister);
}
Jump branchPtr(Condition cond, RegisterID left, Address right)
{
m_assembler.cmpq_mr(right.offset, right.base, left);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
{
move(ImmPtr(left.m_ptr), scratchRegister);
return branchPtr(cond, Address(scratchRegister), right);
}
Jump branchPtr(Condition cond, AbsoluteAddress left, ImmPtr right, RegisterID scratch)
{
move(ImmPtr(left.m_ptr), scratch);
return branchPtr(cond, Address(scratch), right);
}
Jump branchPtr(Condition cond, Address left, RegisterID right)
{
m_assembler.cmpq_rm(right, left.offset, left.base);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchPtr(Condition cond, Address left, ImmPtr right)
{
move(right, scratchRegister);
return branchPtr(cond, left, scratchRegister);
}
Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
{
m_assembler.testq_rr(reg, mask);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
{
// if we are only interested in the low seven bits, this can be tested with a testb
if (mask.m_value == -1)
m_assembler.testq_rr(reg, reg);
else if ((mask.m_value & ~0x7f) == 0)
m_assembler.testb_i8r(mask.m_value, reg);
else
m_assembler.testq_i32r(mask.m_value, reg);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
{
if (mask.m_value == -1)
m_assembler.cmpq_im(0, address.offset, address.base);
else
m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
{
if (mask.m_value == -1)
m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
else
m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
addPtr(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
subPtr(imm, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
{
m_assembler.movq_i64r(initialValue.asIntptr(), dest);
return DataLabelPtr(this);
}
Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
{
dataLabel = moveWithPatch(initialRightValue, scratchRegister);
return branchPtr(cond, left, scratchRegister);
}
Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
{
dataLabel = moveWithPatch(initialRightValue, scratchRegister);
return branchPtr(cond, left, scratchRegister);
}
DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
{
DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
storePtr(scratchRegister, address);
return label;
}
using MacroAssemblerX86Common::branchTest8;
Jump branchTest8(Condition cond, ExtendedAddress address, Imm32 mask = Imm32(-1))
{
ImmPtr addr(reinterpret_cast<void*>(address.offset));
MacroAssemblerX86Common::move(addr, scratchRegister);
return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask);
}
Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
{
Label label(this);
loadPtr(address, dest);
return label;
}
void pushAllRegs()
{
for (int i = X86Registers::eax; i <= X86Registers::r15; i++)
m_assembler.push_r((RegisterID)i);
}
void popAllRegs()
{
for (int i = X86Registers::r15; i >= X86Registers::eax; i--)
m_assembler.pop_r((RegisterID)i);
}
void storeDouble(ImmDouble imm, Address address)
{
storePtr(ImmPtr(reinterpret_cast<void *>(imm.u.u64)), address);
}
void storeDouble(ImmDouble imm, BaseIndex address)
{
storePtr(ImmPtr(reinterpret_cast<void *>(imm.u.u64)), address);
}
static bool supportsFloatingPoint() { return true; }
// See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
static bool supportsFloatingPointTruncate() { return true; }
static bool supportsFloatingPointSqrt() { return true; }
private:
friend class LinkBuffer;
friend class RepatchBuffer;
static void linkCall(void* code, Call call, FunctionPtr function)
{
if (!call.isFlagSet(Call::Near))
X86Assembler::linkPointer(code, X86Assembler::labelFor(call.m_jmp, -REPTACH_OFFSET_CALL_R11), function.value());
else
X86Assembler::linkCall(code, call.m_jmp, function.value());
}
static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
{
X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
}
static void repatchCall(CodeLocationCall call, FunctionPtr destination)
{
X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
}
};
} // namespace JSC

View File

@ -1,163 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Copyright (C) 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
#ifndef assembler_assembler_RepatchBuffer_h
#define assembler_assembler_RepatchBuffer_h
#include "assembler/wtf/Platform.h"
#if ENABLE_ASSEMBLER
#include "assembler/assembler/MacroAssembler.h"
#include "assembler/moco/MocoStubs.h" //MOCO
namespace JSC {
// RepatchBuffer:
//
// This class is used to modify code after code generation has been completed,
// and after the code has potentially already been executed. This mechanism is
// used to apply optimizations to the code.
//
class RepatchBuffer {
typedef MacroAssemblerCodePtr CodePtr;
public:
RepatchBuffer(const MacroAssemblerCodeRef &ref)
{
m_start = ref.m_code.executableAddress();
m_size = ref.m_size;
mprot = true;
if (mprot)
ExecutableAllocator::makeWritable(m_start, m_size);
}
RepatchBuffer(const JITCode &code)
{
m_start = code.start();
m_size = code.size();
mprot = true;
if (mprot)
ExecutableAllocator::makeWritable(m_start, m_size);
}
~RepatchBuffer()
{
if (mprot)
ExecutableAllocator::makeExecutable(m_start, m_size);
}
void relink(CodeLocationJump jump, CodeLocationLabel destination)
{
MacroAssembler::repatchJump(jump, destination);
}
bool canRelink(CodeLocationJump jump, CodeLocationLabel destination)
{
return MacroAssembler::canRepatchJump(jump, destination);
}
void relink(CodeLocationCall call, CodeLocationLabel destination)
{
MacroAssembler::repatchCall(call, destination);
}
void relink(CodeLocationCall call, FunctionPtr destination)
{
MacroAssembler::repatchCall(call, destination);
}
void relink(CodeLocationNearCall nearCall, CodePtr destination)
{
MacroAssembler::repatchNearCall(nearCall, CodeLocationLabel(destination));
}
void relink(CodeLocationNearCall nearCall, CodeLocationLabel destination)
{
MacroAssembler::repatchNearCall(nearCall, destination);
}
void repatch(CodeLocationDataLabel32 dataLabel32, int32_t value)
{
MacroAssembler::repatchInt32(dataLabel32, value);
}
void repatch(CodeLocationDataLabelPtr dataLabelPtr, const void* value)
{
MacroAssembler::repatchPointer(dataLabelPtr, (void*) value);
}
void repatchLoadPtrToLEA(CodeLocationInstruction instruction)
{
MacroAssembler::repatchLoadPtrToLEA(instruction);
}
void repatchLEAToLoadPtr(CodeLocationInstruction instruction)
{
MacroAssembler::repatchLEAToLoadPtr(instruction);
}
void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
{
relink(CodeLocationCall(CodePtr(returnAddress)), label);
}
void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
{
relinkCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
}
void relinkCallerToFunction(ReturnAddressPtr returnAddress, FunctionPtr function)
{
relink(CodeLocationCall(CodePtr(returnAddress)), function);
}
void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
{
relink(CodeLocationNearCall(CodePtr(returnAddress)), label);
}
void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
{
relinkNearCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
}
protected:
void* m_start;
size_t m_size;
bool mprot;
};
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
#endif /* assembler_assembler_RepatchBuffer_h */

File diff suppressed because it is too large Load Diff

View File

@ -1,38 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef assembler_moco_MocoStubs_h
#define assembler_moco_MocoStubs_h
namespace JSC {
class JITCode {
public:
JITCode(void* start, size_t size)
: m_start(start), m_size(size)
{ }
JITCode() { }
void* start() const { return m_start; }
size_t size() const { return m_size; }
private:
void* m_start;
size_t m_size;
};
class CodeBlock {
public:
CodeBlock(JITCode& jc)
: m_jitcode(jc)
{
}
JITCode& getJITCode() { return m_jitcode; }
private:
JITCode& m_jitcode;
};
} // namespace JSC
#endif /* assembler_moco_MocoStubs_h */

View File

@ -1,264 +0,0 @@
/*
* Copyright (C) 2008 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef assembler_wtf_SegmentedVector_h
#define assembler_wtf_SegmentedVector_h
#include "js/Utility.h"
#include "js/Vector.h"
namespace WTF {
// An iterator for SegmentedVector. It supports only the pre ++ operator
template <typename T, size_t SegmentSize> class SegmentedVector;
template <typename T, size_t SegmentSize> class SegmentedVectorIterator {
private:
friend class SegmentedVector<T, SegmentSize>;
public:
typedef SegmentedVectorIterator<T, SegmentSize> Iterator;
~SegmentedVectorIterator() { }
T& operator*() const { return (*m_vector.m_segments[m_segment])[m_index]; }
T* operator->() const { return &(*m_vector.m_segments[m_segment])[m_index]; }
// Only prefix ++ operator supported
Iterator& operator++()
{
ASSERT(m_index != SegmentSize);
++m_index;
//if (m_index >= m_vector.m_segments.at(m_segment)->size()) {
if (m_index >= m_vector.m_segments[m_segment]->length()) {
//if (m_segment + 1 < m_vector.m_segments.size()) {
if (m_segment + 1 < m_vector.m_segments.length()) {
//ASSERT(m_vector.m_segments.at(m_segment)->size() > 0);
ASSERT(m_vector.m_segments[m_segment]->length() > 0);
++m_segment;
m_index = 0;
} else {
// Points to the "end" symbol
m_segment = 0;
m_index = SegmentSize;
}
}
return *this;
}
bool operator==(const Iterator& other) const
{
return (m_index == other.m_index && m_segment = other.m_segment && &m_vector == &other.m_vector);
}
bool operator!=(const Iterator& other) const
{
return (m_index != other.m_index || m_segment != other.m_segment || &m_vector != &other.m_vector);
}
SegmentedVectorIterator& operator=(const SegmentedVectorIterator<T, SegmentSize>& other)
{
m_vector = other.m_vector;
m_segment = other.m_segment;
m_index = other.m_index;
return *this;
}
private:
SegmentedVectorIterator(SegmentedVector<T, SegmentSize>& vector, size_t segment, size_t index)
: m_vector(vector)
, m_segment(segment)
, m_index(index)
{
}
SegmentedVector<T, SegmentSize>& m_vector;
size_t m_segment;
size_t m_index;
};
// SegmentedVector is just like Vector, but it doesn't move the values
// stored in its buffer when it grows. Therefore, it is safe to keep
// pointers into a SegmentedVector.
template <typename T, size_t SegmentSize> class SegmentedVector {
friend class SegmentedVectorIterator<T, SegmentSize>;
public:
typedef SegmentedVectorIterator<T, SegmentSize> Iterator;
SegmentedVector()
: m_size(0)
{
m_segments.append(&m_inlineSegment);
}
~SegmentedVector()
{
deleteAllSegments();
}
size_t size() const { return m_size; }
bool isEmpty() const { return !size(); }
T& at(size_t index)
{
if (index < SegmentSize)
return m_inlineSegment[index];
return segmentFor(index)->at(subscriptFor(index));
}
T& operator[](size_t index)
{
return at(index);
}
T& last()
{
return at(size() - 1);
}
template <typename U> void append(const U& value)
{
++m_size;
if (m_size <= SegmentSize) {
//m_inlineSegment.uncheckedAppend(value);
m_inlineSegment.append(value);
return;
}
if (!segmentExistsFor(m_size - 1))
m_segments.append(js_new<Segment>());
//segmentFor(m_size - 1)->uncheckedAppend(value);
segmentFor(m_size - 1)->append(value);
}
T& alloc()
{
append<T>(T());
return last();
}
void removeLast()
{
if (m_size <= SegmentSize)
m_inlineSegment.removeLast();
else
segmentFor(m_size - 1)->removeLast();
--m_size;
}
void grow(size_t size)
{
ASSERT(size > m_size);
ensureSegmentsFor(size);
m_size = size;
}
void clear()
{
deleteAllSegments();
m_segments.resize(1);
m_inlineSegment.clear();
m_size = 0;
}
Iterator begin()
{
return Iterator(*this, 0, m_size ? 0 : SegmentSize);
}
Iterator end()
{
return Iterator(*this, 0, SegmentSize);
}
private:
typedef js::Vector<T, SegmentSize ,js::SystemAllocPolicy > Segment;
void deleteAllSegments()
{
// Skip the first segment, because it's our inline segment, which was
// not created by new.
//for (size_t i = 1; i < m_segments.size(); i++)
for (size_t i = 1; i < m_segments.length(); i++)
js_delete<Segment>(m_segments[i]);
}
bool segmentExistsFor(size_t index)
{
//return index / SegmentSize < m_segments.size();
return index / SegmentSize < m_segments.length();
}
Segment* segmentFor(size_t index)
{
return m_segments[index / SegmentSize];
}
size_t subscriptFor(size_t index)
{
return index % SegmentSize;
}
void ensureSegmentsFor(size_t size)
{
size_t segmentCount = m_size / SegmentSize;
if (m_size % SegmentSize)
++segmentCount;
//segmentCount = std::max<size_t>(segmentCount, 1); // We always have at least our inline segment.
segmentCount = segmentCount > 1 ? segmentCount : 1; // We always have at least our inline segment.
size_t neededSegmentCount = size / SegmentSize;
if (size % SegmentSize)
++neededSegmentCount;
// Fill up to N - 1 segments.
size_t end = neededSegmentCount - 1;
for (size_t i = segmentCount - 1; i < end; ++i)
ensureSegment(i, SegmentSize);
// Grow segment N to accomodate the remainder.
ensureSegment(end, subscriptFor(size - 1) + 1);
}
void ensureSegment(size_t segmentIndex, size_t size)
{
ASSERT(segmentIndex <= m_segments.size());
if (segmentIndex == m_segments.size())
m_segments.append(js_new<Segment>());
m_segments[segmentIndex]->grow(size);
}
size_t m_size;
Segment m_inlineSegment;
js::Vector<Segment*, 32 ,js::SystemAllocPolicy > m_segments;
};
} // namespace WTF
using WTF::SegmentedVector;
#endif /* assembler_wtf_SegmentedVector_h */

View File

@ -11,7 +11,6 @@
#include "mozilla/Attributes.h"
#include "mozilla/MathAlgorithms.h"
#include "assembler/assembler/AssemblerBufferWithConstantPool.h"
#include "jit/arm/Architecture-arm.h"
#include "jit/CompactBuffer.h"
#include "jit/IonCode.h"

View File

@ -675,7 +675,6 @@ class AssemblerX86Shared : public AssemblerShared
}
void cmpEAX(Label *label) { cmpSrc(label); }
void bind(Label *label) {
JSC::MacroAssembler::Label jsclabel;
JSC::X86Assembler::JmpDst dst(masm.label());
if (label->used()) {
bool more;
@ -690,7 +689,6 @@ class AssemblerX86Shared : public AssemblerShared
label->bind(dst.offset());
}
void bind(RepatchLabel *label) {
JSC::MacroAssembler::Label jsclabel;
JSC::X86Assembler::JmpDst dst(masm.label());
if (label->used()) {
JSC::X86Assembler::JmpSrc jmp(label->offset());
@ -704,7 +702,6 @@ class AssemblerX86Shared : public AssemblerShared
// Re-routes pending jumps to a new label.
void retarget(Label *label, Label *target) {
JSC::MacroAssembler::Label jsclabel;
if (label->used()) {
bool more;
JSC::X86Assembler::JmpSrc jmp(label->offset());

View File

@ -9,7 +9,6 @@
#include "mozilla/DebugOnly.h"
#include "assembler/wtf/SegmentedVector.h"
#include "jit/IonSpewer.h"
#include "jit/shared/IonAssemblerBuffer.h"

View File

@ -408,7 +408,6 @@ class Assembler : public AssemblerX86Shared
// Re-routes pending jumps to an external target, flushing the label in the
// process.
void retarget(Label *label, ImmPtr target, Relocation::Kind reloc) {
JSC::MacroAssembler::Label jsclabel;
if (label->used()) {
bool more;
JSC::X86Assembler::JmpSrc jmp(label->offset());

View File

@ -373,7 +373,6 @@ if CONFIG['ENABLE_ION']:
]
elif CONFIG['JS_CODEGEN_ARM']:
SOURCES += [
'assembler/assembler/ARMAssembler.cpp',
'assembler/assembler/MacroAssemblerARM.cpp',
]