mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 1166527 - Import ARM64 Architecture and Assembler. r=nbp
This commit is contained in:
parent
3651473f9f
commit
6816932cd0
75
js/src/jit/arm64/Architecture-arm64.cpp
Normal file
75
js/src/jit/arm64/Architecture-arm64.cpp
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||||
|
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||||
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||||
|
|
||||||
|
#include "jit/arm64/Architecture-arm64.h"
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
#include "jit/RegisterSets.h"
|
||||||
|
|
||||||
|
namespace js {
|
||||||
|
namespace jit {
|
||||||
|
|
||||||
|
Registers::Code
|
||||||
|
Registers::FromName(const char* name)
|
||||||
|
{
|
||||||
|
// Check for some register aliases first.
|
||||||
|
if (strcmp(name, "ip0") == 0)
|
||||||
|
return ip0;
|
||||||
|
if (strcmp(name, "ip1") == 0)
|
||||||
|
return ip1;
|
||||||
|
if (strcmp(name, "fp") == 0)
|
||||||
|
return fp;
|
||||||
|
|
||||||
|
for (uint32_t i = 0; i < Total; i++) {
|
||||||
|
if (strcmp(GetName(Code(i)), name) == 0)
|
||||||
|
return Code(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
return invalid_reg;
|
||||||
|
}
|
||||||
|
|
||||||
|
FloatRegisters::Code
|
||||||
|
FloatRegisters::FromName(const char* name)
|
||||||
|
{
|
||||||
|
for (size_t i = 0; i < Total; i++) {
|
||||||
|
if (strcmp(GetName(Code(i)), name) == 0)
|
||||||
|
return Code(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
return invalid_fpreg;
|
||||||
|
}
|
||||||
|
|
||||||
|
FloatRegisterSet
|
||||||
|
FloatRegister::ReduceSetForPush(const FloatRegisterSet& s)
|
||||||
|
{
|
||||||
|
LiveFloatRegisterSet ret;
|
||||||
|
for (FloatRegisterIterator iter(s); iter.more(); ++iter)
|
||||||
|
ret.addUnchecked(FromCode((*iter).encoding()));
|
||||||
|
return ret.set();
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t
|
||||||
|
FloatRegister::GetSizeInBytes(const FloatRegisterSet& s)
|
||||||
|
{
|
||||||
|
return s.size() * sizeof(double);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t
|
||||||
|
FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s)
|
||||||
|
{
|
||||||
|
return s.size() * sizeof(double);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t
|
||||||
|
FloatRegister::getRegisterDumpOffsetInBytes()
|
||||||
|
{
|
||||||
|
// Although registers are 128-bits wide, only the first 64 need saving per ABI.
|
||||||
|
return encoding() * sizeof(double);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace jit
|
||||||
|
} // namespace js
|
462
js/src/jit/arm64/Architecture-arm64.h
Normal file
462
js/src/jit/arm64/Architecture-arm64.h
Normal file
@ -0,0 +1,462 @@
|
|||||||
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||||
|
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||||
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||||
|
|
||||||
|
#ifndef jit_arm64_Architecture_arm64_h
|
||||||
|
#define jit_arm64_Architecture_arm64_h
|
||||||
|
|
||||||
|
#include "mozilla/Assertions.h"
|
||||||
|
#include "mozilla/MathAlgorithms.h"
|
||||||
|
|
||||||
|
#include "js/Utility.h"
|
||||||
|
|
||||||
|
namespace js {
|
||||||
|
namespace jit {
|
||||||
|
|
||||||
|
// AArch64 has 32 64-bit integer registers, x0 though x31.
|
||||||
|
// x31 is special and functions as both the stack pointer and a zero register.
|
||||||
|
// The bottom 32 bits of each of the X registers is accessible as w0 through w31.
|
||||||
|
// The program counter is no longer accessible as a register.
|
||||||
|
// SIMD and scalar floating-point registers share a register bank.
|
||||||
|
// 32 bit float registers are s0 through s31.
|
||||||
|
// 64 bit double registers are d0 through d31.
|
||||||
|
// 128 bit SIMD registers are v0 through v31.
|
||||||
|
// e.g., s0 is the bottom 32 bits of d0, which is the bottom 64 bits of v0.
|
||||||
|
|
||||||
|
// AArch64 Calling Convention:
|
||||||
|
// x0 - x7: arguments and return value
|
||||||
|
// x8: indirect result (struct) location
|
||||||
|
// x9 - x15: temporary registers
|
||||||
|
// x16 - x17: intra-call-use registers (PLT, linker)
|
||||||
|
// x18: platform specific use (TLS)
|
||||||
|
// x19 - x28: callee-saved registers
|
||||||
|
// x29: frame pointer
|
||||||
|
// x30: link register
|
||||||
|
|
||||||
|
// AArch64 Calling Convention for Floats:
|
||||||
|
// d0 - d7: arguments and return value
|
||||||
|
// d8 - d15: callee-saved registers
|
||||||
|
// Bits 64:128 are not saved for v8-v15.
|
||||||
|
// d16 - d31: temporary registers
|
||||||
|
|
||||||
|
// AArch64 does not have soft float.
|
||||||
|
|
||||||
|
class Registers {
|
||||||
|
public:
|
||||||
|
enum RegisterID {
|
||||||
|
w0 = 0, x0 = 0,
|
||||||
|
w1 = 1, x1 = 1,
|
||||||
|
w2 = 2, x2 = 2,
|
||||||
|
w3 = 3, x3 = 3,
|
||||||
|
w4 = 4, x4 = 4,
|
||||||
|
w5 = 5, x5 = 5,
|
||||||
|
w6 = 6, x6 = 6,
|
||||||
|
w7 = 7, x7 = 7,
|
||||||
|
w8 = 8, x8 = 8,
|
||||||
|
w9 = 9, x9 = 9,
|
||||||
|
w10 = 10, x10 = 10,
|
||||||
|
w11 = 11, x11 = 11,
|
||||||
|
w12 = 12, x12 = 12,
|
||||||
|
w13 = 13, x13 = 13,
|
||||||
|
w14 = 14, x14 = 14,
|
||||||
|
w15 = 15, x15 = 15,
|
||||||
|
w16 = 16, x16 = 16, ip0 = 16, // MacroAssembler scratch register 1.
|
||||||
|
w17 = 17, x17 = 17, ip1 = 17, // MacroAssembler scratch register 2.
|
||||||
|
w18 = 18, x18 = 18, tls = 18, // Platform-specific use (TLS).
|
||||||
|
w19 = 19, x19 = 19,
|
||||||
|
w20 = 20, x20 = 20,
|
||||||
|
w21 = 21, x21 = 21,
|
||||||
|
w22 = 22, x22 = 22,
|
||||||
|
w23 = 23, x23 = 23,
|
||||||
|
w24 = 24, x24 = 24,
|
||||||
|
w25 = 25, x25 = 25,
|
||||||
|
w26 = 26, x26 = 26,
|
||||||
|
w27 = 27, x27 = 27,
|
||||||
|
w28 = 28, x28 = 28,
|
||||||
|
w29 = 29, x29 = 29, fp = 29,
|
||||||
|
w30 = 30, x30 = 30, lr = 30,
|
||||||
|
w31 = 31, x31 = 31, wzr = 31, xzr = 31, sp = 31, // Special: both stack pointer and a zero register.
|
||||||
|
invalid_reg
|
||||||
|
};
|
||||||
|
typedef uint8_t Code;
|
||||||
|
typedef uint32_t Encoding;
|
||||||
|
typedef uint32_t SetType;
|
||||||
|
|
||||||
|
union RegisterContent {
|
||||||
|
uintptr_t r;
|
||||||
|
};
|
||||||
|
|
||||||
|
static uint32_t SetSize(SetType x) {
|
||||||
|
static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
|
||||||
|
return mozilla::CountPopulation32(x);
|
||||||
|
}
|
||||||
|
static uint32_t FirstBit(SetType x) {
|
||||||
|
return mozilla::CountTrailingZeroes32(x);
|
||||||
|
}
|
||||||
|
static uint32_t LastBit(SetType x) {
|
||||||
|
return 31 - mozilla::CountLeadingZeroes32(x);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const char* GetName(Code code) {
|
||||||
|
static const char* const Names[] =
|
||||||
|
{ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9",
|
||||||
|
"x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
|
||||||
|
"x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29",
|
||||||
|
"lr", "sp", "invalid" };
|
||||||
|
return Names[code];
|
||||||
|
}
|
||||||
|
static const char* GetName(uint32_t i) {
|
||||||
|
MOZ_ASSERT(i < Total);
|
||||||
|
return GetName(Code(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
static Code FromName(const char* name);
|
||||||
|
|
||||||
|
// If SP is used as the base register for a memory load or store, then the value
|
||||||
|
// of the stack pointer prior to adding any offset must be quadword (16 byte) aligned,
|
||||||
|
// or else a stack aligment exception will be generated.
|
||||||
|
static const Code StackPointer = sp;
|
||||||
|
|
||||||
|
static const Code Invalid = invalid_reg;
|
||||||
|
|
||||||
|
static const uint32_t Total = 32;
|
||||||
|
static const uint32_t TotalPhys = 32;
|
||||||
|
static const uint32_t Allocatable = 27; // No named special-function registers.
|
||||||
|
|
||||||
|
static const SetType AllMask = 0xFFFFFFFF;
|
||||||
|
|
||||||
|
static const SetType ArgRegMask =
|
||||||
|
(1 << Registers::x0) | (1 << Registers::x1) |
|
||||||
|
(1 << Registers::x2) | (1 << Registers::x3) |
|
||||||
|
(1 << Registers::x4) | (1 << Registers::x5) |
|
||||||
|
(1 << Registers::x6) | (1 << Registers::x7) |
|
||||||
|
(1 << Registers::x8);
|
||||||
|
|
||||||
|
static const SetType VolatileMask =
|
||||||
|
(1 << Registers::x0) | (1 << Registers::x1) |
|
||||||
|
(1 << Registers::x2) | (1 << Registers::x3) |
|
||||||
|
(1 << Registers::x4) | (1 << Registers::x5) |
|
||||||
|
(1 << Registers::x6) | (1 << Registers::x7) |
|
||||||
|
(1 << Registers::x8) | (1 << Registers::x9) |
|
||||||
|
(1 << Registers::x10) | (1 << Registers::x11) |
|
||||||
|
(1 << Registers::x11) | (1 << Registers::x12) |
|
||||||
|
(1 << Registers::x13) | (1 << Registers::x14) |
|
||||||
|
(1 << Registers::x14) | (1 << Registers::x15) |
|
||||||
|
(1 << Registers::x16) | (1 << Registers::x17) |
|
||||||
|
(1 << Registers::x18);
|
||||||
|
|
||||||
|
static const SetType NonVolatileMask =
|
||||||
|
(1 << Registers::x19) | (1 << Registers::x20) |
|
||||||
|
(1 << Registers::x21) | (1 << Registers::x22) |
|
||||||
|
(1 << Registers::x23) | (1 << Registers::x24) |
|
||||||
|
(1 << Registers::x25) | (1 << Registers::x26) |
|
||||||
|
(1 << Registers::x27) | (1 << Registers::x28) |
|
||||||
|
(1 << Registers::x29) | (1 << Registers::x30);
|
||||||
|
|
||||||
|
static const SetType SingleByteRegs = VolatileMask | NonVolatileMask;
|
||||||
|
|
||||||
|
static const SetType NonAllocatableMask =
|
||||||
|
(1 << Registers::x28) | // PseudoStackPointer.
|
||||||
|
(1 << Registers::ip0) | // First scratch register.
|
||||||
|
(1 << Registers::ip1) | // Second scratch register.
|
||||||
|
(1 << Registers::tls) |
|
||||||
|
(1 << Registers::lr) |
|
||||||
|
(1 << Registers::sp);
|
||||||
|
|
||||||
|
// Registers that can be allocated without being saved, generally.
|
||||||
|
static const SetType TempMask = VolatileMask & ~NonAllocatableMask;
|
||||||
|
|
||||||
|
static const SetType WrapperMask = VolatileMask;
|
||||||
|
|
||||||
|
// Registers returned from a JS -> JS call.
|
||||||
|
static const SetType JSCallMask = (1 << Registers::x2);
|
||||||
|
|
||||||
|
// Registers returned from a JS -> C call.
|
||||||
|
static const SetType CallMask = (1 << Registers::x0);
|
||||||
|
|
||||||
|
static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Smallest integer type that can hold a register bitmask.
|
||||||
|
typedef uint32_t PackedRegisterMask;
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
class TypedRegisterSet;
|
||||||
|
|
||||||
|
class FloatRegisters
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
enum FPRegisterID {
|
||||||
|
s0 = 0, d0 = 0, v0 = 0,
|
||||||
|
s1 = 1, d1 = 1, v1 = 1,
|
||||||
|
s2 = 2, d2 = 2, v2 = 2,
|
||||||
|
s3 = 3, d3 = 3, v3 = 3,
|
||||||
|
s4 = 4, d4 = 4, v4 = 4,
|
||||||
|
s5 = 5, d5 = 5, v5 = 5,
|
||||||
|
s6 = 6, d6 = 6, v6 = 6,
|
||||||
|
s7 = 7, d7 = 7, v7 = 7,
|
||||||
|
s8 = 8, d8 = 8, v8 = 8,
|
||||||
|
s9 = 9, d9 = 9, v9 = 9,
|
||||||
|
s10 = 10, d10 = 10, v10 = 10,
|
||||||
|
s11 = 11, d11 = 11, v11 = 11,
|
||||||
|
s12 = 12, d12 = 12, v12 = 12,
|
||||||
|
s13 = 13, d13 = 13, v13 = 13,
|
||||||
|
s14 = 14, d14 = 14, v14 = 14,
|
||||||
|
s15 = 15, d15 = 15, v15 = 15,
|
||||||
|
s16 = 16, d16 = 16, v16 = 16,
|
||||||
|
s17 = 17, d17 = 17, v17 = 17,
|
||||||
|
s18 = 18, d18 = 18, v18 = 18,
|
||||||
|
s19 = 19, d19 = 19, v19 = 19,
|
||||||
|
s20 = 20, d20 = 20, v20 = 20,
|
||||||
|
s21 = 21, d21 = 21, v21 = 21,
|
||||||
|
s22 = 22, d22 = 22, v22 = 22,
|
||||||
|
s23 = 23, d23 = 23, v23 = 23,
|
||||||
|
s24 = 24, d24 = 24, v24 = 24,
|
||||||
|
s25 = 25, d25 = 25, v25 = 25,
|
||||||
|
s26 = 26, d26 = 26, v26 = 26,
|
||||||
|
s27 = 27, d27 = 27, v27 = 27,
|
||||||
|
s28 = 28, d28 = 28, v28 = 28,
|
||||||
|
s29 = 29, d29 = 29, v29 = 29,
|
||||||
|
s30 = 30, d30 = 30, v30 = 30,
|
||||||
|
s31 = 31, d31 = 31, v31 = 31, // Scratch register.
|
||||||
|
invalid_fpreg
|
||||||
|
};
|
||||||
|
typedef uint8_t Code;
|
||||||
|
typedef FPRegisterID Encoding;
|
||||||
|
typedef uint64_t SetType;
|
||||||
|
|
||||||
|
static const char* GetName(Code code) {
|
||||||
|
static const char* const Names[] =
|
||||||
|
{ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9",
|
||||||
|
"d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19",
|
||||||
|
"d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29",
|
||||||
|
"d30", "d31", "invalid" };
|
||||||
|
return Names[code];
|
||||||
|
}
|
||||||
|
|
||||||
|
static const char* GetName(uint32_t i) {
|
||||||
|
MOZ_ASSERT(i < TotalPhys);
|
||||||
|
return GetName(Code(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
static Code FromName(const char* name);
|
||||||
|
|
||||||
|
static const Code Invalid = invalid_fpreg;
|
||||||
|
|
||||||
|
static const uint32_t Total = 64;
|
||||||
|
static const uint32_t TotalPhys = 32;
|
||||||
|
static const SetType AllMask = 0xFFFFFFFFFFFFFFFFULL;
|
||||||
|
static const SetType AllPhysMask = 0xFFFFFFFFULL;
|
||||||
|
static const SetType SpreadCoefficient = 0x100000001ULL;
|
||||||
|
|
||||||
|
static const uint32_t Allocatable = 31; // Without d31, the scratch register.
|
||||||
|
|
||||||
|
// d31 is the ScratchFloatReg.
|
||||||
|
static const SetType NonVolatileMask =
|
||||||
|
SetType((1 << FloatRegisters::d8) | (1 << FloatRegisters::d9) |
|
||||||
|
(1 << FloatRegisters::d10) | (1 << FloatRegisters::d11) |
|
||||||
|
(1 << FloatRegisters::d12) | (1 << FloatRegisters::d13) |
|
||||||
|
(1 << FloatRegisters::d14) | (1 << FloatRegisters::d15) |
|
||||||
|
(1 << FloatRegisters::d16) | (1 << FloatRegisters::d17) |
|
||||||
|
(1 << FloatRegisters::d18) | (1 << FloatRegisters::d19) |
|
||||||
|
(1 << FloatRegisters::d20) | (1 << FloatRegisters::d21) |
|
||||||
|
(1 << FloatRegisters::d22) | (1 << FloatRegisters::d23) |
|
||||||
|
(1 << FloatRegisters::d24) | (1 << FloatRegisters::d25) |
|
||||||
|
(1 << FloatRegisters::d26) | (1 << FloatRegisters::d27) |
|
||||||
|
(1 << FloatRegisters::d28) | (1 << FloatRegisters::d29) |
|
||||||
|
(1 << FloatRegisters::d30)) * SpreadCoefficient;
|
||||||
|
|
||||||
|
static const SetType VolatileMask = AllMask & ~NonVolatileMask;
|
||||||
|
static const SetType AllDoubleMask = AllMask;
|
||||||
|
|
||||||
|
static const SetType WrapperMask = VolatileMask;
|
||||||
|
|
||||||
|
// d31 is the ScratchFloatReg.
|
||||||
|
static const SetType NonAllocatableMask = (SetType(1) << FloatRegisters::d31) * SpreadCoefficient;
|
||||||
|
|
||||||
|
// Registers that can be allocated without being saved, generally.
|
||||||
|
static const SetType TempMask = VolatileMask & ~NonAllocatableMask;
|
||||||
|
|
||||||
|
static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
|
||||||
|
union RegisterContent {
|
||||||
|
float s;
|
||||||
|
double d;
|
||||||
|
};
|
||||||
|
enum Kind {
|
||||||
|
Double,
|
||||||
|
Single
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
// In bytes: slots needed for potential memory->memory move spills.
|
||||||
|
// +8 for cycles
|
||||||
|
// +8 for gpr spills
|
||||||
|
// +8 for double spills
|
||||||
|
static const uint32_t ION_FRAME_SLACK_SIZE = 24;
|
||||||
|
|
||||||
|
static const uint32_t ShadowStackSpace = 0;
|
||||||
|
|
||||||
|
static const uint32_t ABIStackAlignment = 16;
|
||||||
|
static const uint32_t CodeAlignment = 16;
|
||||||
|
static const bool StackKeptAligned = false;
|
||||||
|
|
||||||
|
// Although sp is only usable if 16-byte alignment is kept,
|
||||||
|
// the Pseudo-StackPointer enables use of 8-byte alignment.
|
||||||
|
static const uint32_t StackAlignment = 8;
|
||||||
|
static const uint32_t NativeFrameSize = 8;
|
||||||
|
|
||||||
|
struct FloatRegister
|
||||||
|
{
|
||||||
|
typedef FloatRegisters Codes;
|
||||||
|
typedef Codes::Code Code;
|
||||||
|
typedef Codes::Encoding Encoding;
|
||||||
|
typedef Codes::SetType SetType;
|
||||||
|
|
||||||
|
union RegisterContent {
|
||||||
|
float s;
|
||||||
|
double d;
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr FloatRegister(uint32_t code, FloatRegisters::Kind k)
|
||||||
|
: code_(FloatRegisters::Code(code & 31)),
|
||||||
|
k_(k)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
constexpr FloatRegister(uint32_t code)
|
||||||
|
: code_(FloatRegisters::Code(code & 31)),
|
||||||
|
k_(FloatRegisters::Kind(code >> 5))
|
||||||
|
{ }
|
||||||
|
|
||||||
|
constexpr FloatRegister()
|
||||||
|
: code_(FloatRegisters::Code(-1)),
|
||||||
|
k_(FloatRegisters::Double)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
static uint32_t SetSize(SetType x) {
|
||||||
|
static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
|
||||||
|
x |= x >> FloatRegisters::TotalPhys;
|
||||||
|
x &= FloatRegisters::AllPhysMask;
|
||||||
|
return mozilla::CountPopulation32(x);
|
||||||
|
}
|
||||||
|
|
||||||
|
static FloatRegister FromCode(uint32_t i) {
|
||||||
|
MOZ_ASSERT(i < FloatRegisters::Total);
|
||||||
|
FloatRegister r(i);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
Code code() const {
|
||||||
|
MOZ_ASSERT((uint32_t)code_ < FloatRegisters::Total);
|
||||||
|
return Code(code_ | (k_ << 5));
|
||||||
|
}
|
||||||
|
Encoding encoding() const {
|
||||||
|
return Encoding(code_);
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* name() const {
|
||||||
|
return FloatRegisters::GetName(code());
|
||||||
|
}
|
||||||
|
bool volatile_() const {
|
||||||
|
return !!((SetType(1) << code()) & FloatRegisters::VolatileMask);
|
||||||
|
}
|
||||||
|
bool operator!=(FloatRegister other) const {
|
||||||
|
return other.code_ != code_ || other.k_ != k_;
|
||||||
|
}
|
||||||
|
bool operator==(FloatRegister other) const {
|
||||||
|
return other.code_ == code_ && other.k_ == k_;
|
||||||
|
}
|
||||||
|
bool aliases(FloatRegister other) const {
|
||||||
|
return other.code_ == code_;
|
||||||
|
}
|
||||||
|
uint32_t numAliased() const {
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
static FloatRegisters::Kind otherkind(FloatRegisters::Kind k) {
|
||||||
|
if (k == FloatRegisters::Double)
|
||||||
|
return FloatRegisters::Single;
|
||||||
|
return FloatRegisters::Double;
|
||||||
|
}
|
||||||
|
void aliased(uint32_t aliasIdx, FloatRegister* ret) {
|
||||||
|
if (aliasIdx == 0)
|
||||||
|
*ret = *this;
|
||||||
|
else
|
||||||
|
*ret = FloatRegister(code_, otherkind(k_));
|
||||||
|
}
|
||||||
|
// This function mostly exists for the ARM backend. It is to ensure that two
|
||||||
|
// floating point registers' types are equivalent. e.g. S0 is not equivalent
|
||||||
|
// to D16, since S0 holds a float32, and D16 holds a Double.
|
||||||
|
// Since all floating point registers on x86 and x64 are equivalent, it is
|
||||||
|
// reasonable for this function to do the same.
|
||||||
|
bool equiv(FloatRegister other) const {
|
||||||
|
return k_ == other.k_;
|
||||||
|
}
|
||||||
|
MOZ_CONSTEXPR uint32_t size() const {
|
||||||
|
return k_ == FloatRegisters::Double ? sizeof(double) : sizeof(float);
|
||||||
|
}
|
||||||
|
uint32_t numAlignedAliased() {
|
||||||
|
return numAliased();
|
||||||
|
}
|
||||||
|
void alignedAliased(uint32_t aliasIdx, FloatRegister* ret) {
|
||||||
|
MOZ_ASSERT(aliasIdx == 0);
|
||||||
|
aliased(aliasIdx, ret);
|
||||||
|
}
|
||||||
|
SetType alignedOrDominatedAliasedSet() const {
|
||||||
|
return Codes::SpreadCoefficient << code_;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isSingle() const {
|
||||||
|
return k_ == FloatRegisters::Single;
|
||||||
|
}
|
||||||
|
bool isDouble() const {
|
||||||
|
return k_ == FloatRegisters::Double;
|
||||||
|
}
|
||||||
|
bool isInt32x4() const {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
bool isFloat32x4() const {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint32_t FirstBit(SetType x) {
|
||||||
|
JS_STATIC_ASSERT(sizeof(SetType) == 8);
|
||||||
|
return mozilla::CountTrailingZeroes64(x);
|
||||||
|
}
|
||||||
|
static uint32_t LastBit(SetType x) {
|
||||||
|
JS_STATIC_ASSERT(sizeof(SetType) == 8);
|
||||||
|
return 63 - mozilla::CountLeadingZeroes64(x);
|
||||||
|
}
|
||||||
|
|
||||||
|
static TypedRegisterSet<FloatRegister> ReduceSetForPush(const TypedRegisterSet<FloatRegister>& s);
|
||||||
|
static uint32_t GetSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
|
||||||
|
static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
|
||||||
|
uint32_t getRegisterDumpOffsetInBytes();
|
||||||
|
|
||||||
|
public:
|
||||||
|
Code code_ : 8;
|
||||||
|
FloatRegisters::Kind k_ : 1;
|
||||||
|
};
|
||||||
|
|
||||||
|
// ARM/D32 has double registers that cannot be treated as float32.
|
||||||
|
// Luckily, ARMv8 doesn't have the same misfortune.
|
||||||
|
inline bool
|
||||||
|
hasUnaliasedDouble()
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ARM prior to ARMv8 also has doubles that alias multiple floats.
|
||||||
|
// Again, ARMv8 is in the clear.
|
||||||
|
inline bool
|
||||||
|
hasMultiAlias()
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const size_t AsmJSCheckedImmediateRange = 0;
|
||||||
|
static const size_t AsmJSImmediateRange = 0;
|
||||||
|
|
||||||
|
} // namespace jit
|
||||||
|
} // namespace js
|
||||||
|
|
||||||
|
#endif // jit_arm64_Architecture_arm64_h
|
626
js/src/jit/arm64/Assembler-arm64.cpp
Normal file
626
js/src/jit/arm64/Assembler-arm64.cpp
Normal file
@ -0,0 +1,626 @@
|
|||||||
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||||
|
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||||
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||||
|
|
||||||
|
#include "jit/arm64/Assembler-arm64.h"
|
||||||
|
|
||||||
|
#include "mozilla/DebugOnly.h"
|
||||||
|
#include "mozilla/MathAlgorithms.h"
|
||||||
|
|
||||||
|
#include "jscompartment.h"
|
||||||
|
#include "jsutil.h"
|
||||||
|
|
||||||
|
#include "gc/Marking.h"
|
||||||
|
|
||||||
|
#include "jit/arm64/MacroAssembler-arm64.h"
|
||||||
|
#include "jit/ExecutableAllocator.h"
|
||||||
|
#include "jit/JitCompartment.h"
|
||||||
|
|
||||||
|
using namespace js;
|
||||||
|
using namespace js::jit;
|
||||||
|
|
||||||
|
using mozilla::CountLeadingZeroes32;
|
||||||
|
using mozilla::DebugOnly;
|
||||||
|
|
||||||
|
// Note this is used for inter-AsmJS calls and may pass arguments and results
|
||||||
|
// in floating point registers even if the system ABI does not.
|
||||||
|
|
||||||
|
ABIArg
|
||||||
|
ABIArgGenerator::next(MIRType type)
|
||||||
|
{
|
||||||
|
switch (type) {
|
||||||
|
case MIRType_Int32:
|
||||||
|
case MIRType_Pointer:
|
||||||
|
if (intRegIndex_ == NumIntArgRegs) {
|
||||||
|
current_ = ABIArg(stackOffset_);
|
||||||
|
stackOffset_ += sizeof(uintptr_t);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
current_ = ABIArg(Register::FromCode(intRegIndex_));
|
||||||
|
intRegIndex_++;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case MIRType_Float32:
|
||||||
|
case MIRType_Double:
|
||||||
|
if (floatRegIndex_ == NumFloatArgRegs) {
|
||||||
|
current_ = ABIArg(stackOffset_);
|
||||||
|
stackOffset_ += sizeof(double);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
current_ = ABIArg(FloatRegister(floatRegIndex_,
|
||||||
|
type == MIRType_Double ? FloatRegisters::Double
|
||||||
|
: FloatRegisters::Single));
|
||||||
|
floatRegIndex_++;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
MOZ_CRASH("Unexpected argument type");
|
||||||
|
}
|
||||||
|
return current_;
|
||||||
|
}
|
||||||
|
|
||||||
|
const Register ABIArgGenerator::NonArgReturnReg0 = r8;
|
||||||
|
const Register ABIArgGenerator::NonArgReturnReg1 = r9;
|
||||||
|
const Register ABIArgGenerator::NonVolatileReg = r1;
|
||||||
|
const Register ABIArgGenerator::NonArg_VolatileReg = r13;
|
||||||
|
const Register ABIArgGenerator::NonReturn_VolatileReg0 = r2;
|
||||||
|
const Register ABIArgGenerator::NonReturn_VolatileReg1 = r3;
|
||||||
|
|
||||||
|
namespace js {
|
||||||
|
namespace jit {
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::finish()
|
||||||
|
{
|
||||||
|
armbuffer_.flushPool();
|
||||||
|
|
||||||
|
// The extended jump table is part of the code buffer.
|
||||||
|
ExtendedJumpTable_ = emitExtendedJumpTable();
|
||||||
|
Assembler::FinalizeCode();
|
||||||
|
|
||||||
|
// The jump relocation table starts with a fixed-width integer pointing
|
||||||
|
// to the start of the extended jump table.
|
||||||
|
if (tmpJumpRelocations_.length())
|
||||||
|
jumpRelocations_.writeFixedUint32_t(toFinalOffset(ExtendedJumpTable_));
|
||||||
|
|
||||||
|
for (unsigned int i = 0; i < tmpJumpRelocations_.length(); i++) {
|
||||||
|
JumpRelocation& reloc = tmpJumpRelocations_[i];
|
||||||
|
|
||||||
|
// Each entry in the relocations table is an (offset, extendedTableIndex) pair.
|
||||||
|
jumpRelocations_.writeUnsigned(toFinalOffset(reloc.jump));
|
||||||
|
jumpRelocations_.writeUnsigned(reloc.extendedTableIndex);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (unsigned int i = 0; i < tmpDataRelocations_.length(); i++)
|
||||||
|
dataRelocations_.writeUnsigned(toFinalOffset(tmpDataRelocations_[i]));
|
||||||
|
|
||||||
|
for (unsigned int i = 0; i < tmpPreBarriers_.length(); i++)
|
||||||
|
preBarriers_.writeUnsigned(toFinalOffset(tmpPreBarriers_[i]));
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferOffset
|
||||||
|
Assembler::emitExtendedJumpTable()
|
||||||
|
{
|
||||||
|
if (!pendingJumps_.length() || oom())
|
||||||
|
return BufferOffset();
|
||||||
|
|
||||||
|
armbuffer_.flushPool();
|
||||||
|
armbuffer_.align(SizeOfJumpTableEntry);
|
||||||
|
|
||||||
|
BufferOffset tableOffset = armbuffer_.nextOffset();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < pendingJumps_.length(); i++) {
|
||||||
|
// Each JumpTableEntry is of the form:
|
||||||
|
// LDR ip0 [PC, 8]
|
||||||
|
// BR ip0
|
||||||
|
// [Patchable 8-byte constant low bits]
|
||||||
|
// [Patchable 8-byte constant high bits]
|
||||||
|
DebugOnly<size_t> preOffset = size_t(armbuffer_.nextOffset().getOffset());
|
||||||
|
|
||||||
|
ldr(vixl::ip0, ptrdiff_t(8 / vixl::kInstructionSize));
|
||||||
|
br(vixl::ip0);
|
||||||
|
|
||||||
|
DebugOnly<size_t> prePointer = size_t(armbuffer_.nextOffset().getOffset());
|
||||||
|
MOZ_ASSERT(prePointer - preOffset == OffsetOfJumpTableEntryPointer);
|
||||||
|
|
||||||
|
brk(0x0);
|
||||||
|
brk(0x0);
|
||||||
|
|
||||||
|
DebugOnly<size_t> postOffset = size_t(armbuffer_.nextOffset().getOffset());
|
||||||
|
|
||||||
|
MOZ_ASSERT(postOffset - preOffset == SizeOfJumpTableEntry);
|
||||||
|
}
|
||||||
|
|
||||||
|
return tableOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::executableCopy(uint8_t* buffer)
|
||||||
|
{
|
||||||
|
// Copy the code and all constant pools into the output buffer.
|
||||||
|
armbuffer_.executableCopy(buffer);
|
||||||
|
|
||||||
|
// Patch any relative jumps that target code outside the buffer.
|
||||||
|
// The extended jump table may be used for distant jumps.
|
||||||
|
for (size_t i = 0; i < pendingJumps_.length(); i++) {
|
||||||
|
RelativePatch& rp = pendingJumps_[i];
|
||||||
|
|
||||||
|
if (!rp.target) {
|
||||||
|
// The patch target is nullptr for jumps that have been linked to
|
||||||
|
// a label within the same code block, but may be repatched later
|
||||||
|
// to jump to a different code block.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
Instruction* target = (Instruction*)rp.target;
|
||||||
|
Instruction* branch = (Instruction*)(buffer + toFinalOffset(rp.offset));
|
||||||
|
JumpTableEntry* extendedJumpTable =
|
||||||
|
reinterpret_cast<JumpTableEntry*>(buffer + toFinalOffset(ExtendedJumpTable_));
|
||||||
|
if (branch->BranchType() != vixl::UnknownBranchType) {
|
||||||
|
if (branch->IsTargetReachable(target)) {
|
||||||
|
branch->SetImmPCOffsetTarget(target);
|
||||||
|
} else {
|
||||||
|
JumpTableEntry* entry = &extendedJumpTable[i];
|
||||||
|
branch->SetImmPCOffsetTarget(entry->getLdr());
|
||||||
|
entry->data = target;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Currently a two-instruction call, it should be possible to optimize this
|
||||||
|
// into a single instruction call + nop in some instances, but this will work.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferOffset
|
||||||
|
Assembler::immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op, ARMBuffer::PoolEntry* pe)
|
||||||
|
{
|
||||||
|
uint32_t inst = op | Rt(dest);
|
||||||
|
const size_t numInst = 1;
|
||||||
|
const unsigned sizeOfPoolEntryInBytes = 4;
|
||||||
|
const unsigned numPoolEntries = sizeof(value) / sizeOfPoolEntryInBytes;
|
||||||
|
return armbuffer_.allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value, pe);
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferOffset
|
||||||
|
Assembler::immPool64(ARMRegister dest, uint64_t value, ARMBuffer::PoolEntry* pe)
|
||||||
|
{
|
||||||
|
return immPool(dest, (uint8_t*)&value, vixl::LDR_x_lit, pe);
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferOffset
|
||||||
|
Assembler::immPool64Branch(RepatchLabel* label, ARMBuffer::PoolEntry* pe, Condition c)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("immPool64Branch");
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferOffset
|
||||||
|
Assembler::fImmPool(ARMFPRegister dest, uint8_t* value, vixl::LoadLiteralOp op)
|
||||||
|
{
|
||||||
|
uint32_t inst = op | Rt(dest);
|
||||||
|
const size_t numInst = 1;
|
||||||
|
const unsigned sizeOfPoolEntryInBits = 32;
|
||||||
|
const unsigned numPoolEntries = dest.size() / sizeOfPoolEntryInBits;
|
||||||
|
return armbuffer_.allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferOffset
|
||||||
|
Assembler::fImmPool64(ARMFPRegister dest, double value)
|
||||||
|
{
|
||||||
|
return fImmPool(dest, (uint8_t*)&value, vixl::LDR_d_lit);
|
||||||
|
}
|
||||||
|
BufferOffset
|
||||||
|
Assembler::fImmPool32(ARMFPRegister dest, float value)
|
||||||
|
{
|
||||||
|
return fImmPool(dest, (uint8_t*)&value, vixl::LDR_s_lit);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::bind(Label* label, BufferOffset targetOffset)
|
||||||
|
{
|
||||||
|
// Nothing has seen the label yet: just mark the location.
|
||||||
|
if (!label->used()) {
|
||||||
|
label->bind(targetOffset.getOffset());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the most recent instruction that used the label, as stored in the label.
|
||||||
|
// This instruction is the head of an implicit linked list of label uses.
|
||||||
|
uint32_t branchOffset = label->offset();
|
||||||
|
|
||||||
|
while ((int32_t)branchOffset != LabelBase::INVALID_OFFSET) {
|
||||||
|
Instruction* link = getInstructionAt(BufferOffset(branchOffset));
|
||||||
|
|
||||||
|
// Before overwriting the offset in this instruction, get the offset of
|
||||||
|
// the next link in the implicit branch list.
|
||||||
|
uint32_t nextLinkOffset = uint32_t(link->ImmPCRawOffset());
|
||||||
|
if (nextLinkOffset != uint32_t(LabelBase::INVALID_OFFSET))
|
||||||
|
nextLinkOffset += branchOffset;
|
||||||
|
// Linking against the actual (Instruction*) would be invalid,
|
||||||
|
// since that Instruction could be anywhere in memory.
|
||||||
|
// Instead, just link against the correct relative offset, assuming
|
||||||
|
// no constant pools, which will be taken into consideration
|
||||||
|
// during finalization.
|
||||||
|
ptrdiff_t relativeByteOffset = targetOffset.getOffset() - branchOffset;
|
||||||
|
Instruction* target = (Instruction*)(((uint8_t*)link) + relativeByteOffset);
|
||||||
|
|
||||||
|
// Write a new relative offset into the instruction.
|
||||||
|
link->SetImmPCOffsetTarget(target);
|
||||||
|
branchOffset = nextLinkOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bind the label, so that future uses may encode the offset immediately.
|
||||||
|
label->bind(targetOffset.getOffset());
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::bind(RepatchLabel* label)
|
||||||
|
{
|
||||||
|
// Nothing has seen the label yet: just mark the location.
|
||||||
|
if (!label->used()) {
|
||||||
|
label->bind(nextOffset().getOffset());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
int branchOffset = label->offset();
|
||||||
|
Instruction* inst = getInstructionAt(BufferOffset(branchOffset));
|
||||||
|
inst->SetImmPCOffsetTarget(inst + nextOffset().getOffset() - branchOffset);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::trace(JSTracer* trc)
|
||||||
|
{
|
||||||
|
for (size_t i = 0; i < pendingJumps_.length(); i++) {
|
||||||
|
RelativePatch& rp = pendingJumps_[i];
|
||||||
|
if (rp.kind == Relocation::JITCODE) {
|
||||||
|
JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target);
|
||||||
|
TraceManuallyBarrieredEdge(trc, &code, "masmrel32");
|
||||||
|
MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Trace.
|
||||||
|
#if 0
|
||||||
|
if (tmpDataRelocations_.length())
|
||||||
|
::TraceDataRelocations(trc, &armbuffer_, &tmpDataRelocations_);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::addJumpRelocation(BufferOffset src, Relocation::Kind reloc)
|
||||||
|
{
|
||||||
|
// Only JITCODE relocations are patchable at runtime.
|
||||||
|
MOZ_ASSERT(reloc == Relocation::JITCODE);
|
||||||
|
|
||||||
|
// Each relocation requires an entry in the extended jump table.
|
||||||
|
tmpJumpRelocations_.append(JumpRelocation(src, pendingJumps_.length()));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind reloc)
|
||||||
|
{
|
||||||
|
MOZ_ASSERT(target.value != nullptr);
|
||||||
|
|
||||||
|
if (reloc == Relocation::JITCODE)
|
||||||
|
addJumpRelocation(src, reloc);
|
||||||
|
|
||||||
|
// This jump is not patchable at runtime. Extended jump table entry requirements
|
||||||
|
// cannot be known until finalization, so to be safe, give each jump and entry.
|
||||||
|
// This also causes GC tracing of the target.
|
||||||
|
enoughMemory_ &= pendingJumps_.append(RelativePatch(src, target.value, reloc));
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t
|
||||||
|
Assembler::addPatchableJump(BufferOffset src, Relocation::Kind reloc)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("TODO: This is currently unused (and untested)");
|
||||||
|
if (reloc == Relocation::JITCODE)
|
||||||
|
addJumpRelocation(src, reloc);
|
||||||
|
|
||||||
|
size_t extendedTableIndex = pendingJumps_.length();
|
||||||
|
enoughMemory_ &= pendingJumps_.append(RelativePatch(src, nullptr, reloc));
|
||||||
|
return extendedTableIndex;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
PatchJump(CodeLocationJump& jump_, CodeLocationLabel label)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("PatchJump");
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
|
||||||
|
PatchedImmPtr expected)
|
||||||
|
{
|
||||||
|
Instruction* i = (Instruction*)label.raw();
|
||||||
|
void** pValue = i->LiteralAddress<void**>();
|
||||||
|
MOZ_ASSERT(*pValue == expected.value);
|
||||||
|
*pValue = newValue.value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expected)
|
||||||
|
{
|
||||||
|
PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expected.value));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::ToggleToJmp(CodeLocationLabel inst_)
|
||||||
|
{
|
||||||
|
Instruction* i = (Instruction*)inst_.raw();
|
||||||
|
MOZ_ASSERT(i->IsAddSubImmediate());
|
||||||
|
|
||||||
|
// Refer to instruction layout in ToggleToCmp().
|
||||||
|
int imm19 = (int)i->Bits(23, 5);
|
||||||
|
MOZ_ASSERT(vixl::is_int19(imm19));
|
||||||
|
|
||||||
|
b(i, imm19, Always);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::ToggleToCmp(CodeLocationLabel inst_)
|
||||||
|
{
|
||||||
|
Instruction* i = (Instruction*)inst_.raw();
|
||||||
|
MOZ_ASSERT(i->IsCondB());
|
||||||
|
|
||||||
|
int imm19 = i->ImmCondBranch();
|
||||||
|
// bit 23 is reserved, and the simulator throws an assertion when this happens
|
||||||
|
// It'll be messy to decode, but we can steal bit 30 or bit 31.
|
||||||
|
MOZ_ASSERT(vixl::is_int18(imm19));
|
||||||
|
|
||||||
|
// 31 - 64-bit if set, 32-bit if unset. (OK!)
|
||||||
|
// 30 - sub if set, add if unset. (OK!)
|
||||||
|
// 29 - SetFlagsBit. Must be set.
|
||||||
|
// 22:23 - ShiftAddSub. (OK!)
|
||||||
|
// 10:21 - ImmAddSub. (OK!)
|
||||||
|
// 5:9 - First source register (Rn). (OK!)
|
||||||
|
// 0:4 - Destination Register. Must be xzr.
|
||||||
|
|
||||||
|
// From the above, there is a safe 19-bit contiguous region from 5:23.
|
||||||
|
Emit(i, vixl::ThirtyTwoBits | vixl::AddSubImmediateFixed | vixl::SUB | Flags(vixl::SetFlags) |
|
||||||
|
Rd(vixl::xzr) | (imm19 << vixl::Rn_offset));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
|
||||||
|
{
|
||||||
|
Instruction* first = (Instruction*)inst_.raw();
|
||||||
|
Instruction* load;
|
||||||
|
Instruction* call;
|
||||||
|
|
||||||
|
if (first->InstructionBits() == 0x9100039f) {
|
||||||
|
load = (Instruction*)NextInstruction(first);
|
||||||
|
call = NextInstruction(load);
|
||||||
|
} else {
|
||||||
|
load = first;
|
||||||
|
call = NextInstruction(first);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (call->IsBLR() == enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (call->IsBLR()) {
|
||||||
|
// if the second instruction is blr(), then wehave:
|
||||||
|
// ldr x17, [pc, offset]
|
||||||
|
// blr x17
|
||||||
|
// we want to transform this to:
|
||||||
|
// adr xzr, [pc, offset]
|
||||||
|
// nop
|
||||||
|
int32_t offset = load->ImmLLiteral();
|
||||||
|
adr(load, xzr, int32_t(offset));
|
||||||
|
nop(call);
|
||||||
|
} else {
|
||||||
|
// we have adr xzr, [pc, offset]
|
||||||
|
// nop
|
||||||
|
// transform this to
|
||||||
|
// ldr x17, [pc, offset]
|
||||||
|
// blr x17
|
||||||
|
|
||||||
|
int32_t offset = (int)load->ImmPCRawOffset();
|
||||||
|
MOZ_ASSERT(vixl::is_int19(offset));
|
||||||
|
ldr(load, ScratchReg2_64, int32_t(offset));
|
||||||
|
blr(call, ScratchReg2_64);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class RelocationIterator
|
||||||
|
{
|
||||||
|
CompactBufferReader reader_;
|
||||||
|
uint32_t tableStart_;
|
||||||
|
uint32_t offset_;
|
||||||
|
uint32_t extOffset_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
explicit RelocationIterator(CompactBufferReader& reader)
|
||||||
|
: reader_(reader)
|
||||||
|
{
|
||||||
|
// The first uint32_t stores the extended table offset.
|
||||||
|
tableStart_ = reader_.readFixedUint32_t();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool read() {
|
||||||
|
if (!reader_.more())
|
||||||
|
return false;
|
||||||
|
offset_ = reader_.readUnsigned();
|
||||||
|
extOffset_ = reader_.readUnsigned();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t offset() const {
|
||||||
|
return offset_;
|
||||||
|
}
|
||||||
|
uint32_t extendedOffset() const {
|
||||||
|
return extOffset_;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static JitCode*
|
||||||
|
CodeFromJump(JitCode* code, uint8_t* jump)
|
||||||
|
{
|
||||||
|
Instruction* branch = (Instruction*)jump;
|
||||||
|
uint8_t* target;
|
||||||
|
// If this is a toggled branch, and is currently off, then we have some 'splainin
|
||||||
|
if (branch->BranchType() == vixl::UnknownBranchType)
|
||||||
|
target = (uint8_t*)branch->Literal64();
|
||||||
|
else
|
||||||
|
target = (uint8_t*)branch->ImmPCOffsetTarget();
|
||||||
|
|
||||||
|
// If the jump is within the code buffer, it uses the extended jump table.
|
||||||
|
if (target >= code->raw() && target < code->raw() + code->instructionsSize()) {
|
||||||
|
MOZ_ASSERT(target + Assembler::SizeOfJumpTableEntry <= code->raw() + code->instructionsSize());
|
||||||
|
|
||||||
|
uint8_t** patchablePtr = (uint8_t**)(target + Assembler::OffsetOfJumpTableEntryPointer);
|
||||||
|
target = *patchablePtr;
|
||||||
|
}
|
||||||
|
|
||||||
|
return JitCode::FromExecutable(target);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
|
||||||
|
{
|
||||||
|
RelocationIterator iter(reader);
|
||||||
|
while (iter.read()) {
|
||||||
|
JitCode* child = CodeFromJump(code, code->raw() + iter.offset());
|
||||||
|
TraceManuallyBarrieredEdge(trc, &child, "rel32");
|
||||||
|
MOZ_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader)
|
||||||
|
{
|
||||||
|
while (reader.more()) {
|
||||||
|
size_t offset = reader.readUnsigned();
|
||||||
|
Instruction* load = (Instruction*)&buffer[offset];
|
||||||
|
|
||||||
|
// The only valid traceable operation is a 64-bit load to an ARMRegister.
|
||||||
|
// Refer to movePatchablePtr() for generation.
|
||||||
|
MOZ_ASSERT(load->Mask(vixl::LoadLiteralMask) == vixl::LDR_x_lit);
|
||||||
|
|
||||||
|
uintptr_t* literalAddr = load->LiteralAddress<uintptr_t*>();
|
||||||
|
uintptr_t literal = *literalAddr;
|
||||||
|
|
||||||
|
// All pointers on AArch64 will have the top bits cleared.
|
||||||
|
// If those bits are not cleared, this must be a Value.
|
||||||
|
if (literal >> JSVAL_TAG_SHIFT) {
|
||||||
|
jsval_layout layout;
|
||||||
|
layout.asBits = literal;
|
||||||
|
Value v = IMPL_TO_JSVAL(layout);
|
||||||
|
TraceManuallyBarrieredEdge(trc, &v, "ion-masm-value");
|
||||||
|
*literalAddr = JSVAL_TO_IMPL(v).asBits;
|
||||||
|
|
||||||
|
// TODO: When we can, flush caches here if a pointer was moved.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// No barriers needed since the pointers are constants.
|
||||||
|
TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast<gc::Cell**>(literalAddr),
|
||||||
|
"ion-masm-ptr");
|
||||||
|
|
||||||
|
// TODO: Flush caches at end?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
|
||||||
|
{
|
||||||
|
::TraceDataRelocations(trc, code->raw(), reader);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader& reader,
|
||||||
|
const ObjectVector& nurseryObjects)
|
||||||
|
{
|
||||||
|
|
||||||
|
MOZ_ASSERT(!nurseryObjects.empty());
|
||||||
|
|
||||||
|
uint8_t* buffer = code->raw();
|
||||||
|
bool hasNurseryPointers = false;
|
||||||
|
|
||||||
|
while (reader.more()) {
|
||||||
|
size_t offset = reader.readUnsigned();
|
||||||
|
Instruction* ins = (Instruction*)&buffer[offset];
|
||||||
|
|
||||||
|
uintptr_t* literalAddr = ins->LiteralAddress<uintptr_t*>();
|
||||||
|
uintptr_t literal = *literalAddr;
|
||||||
|
|
||||||
|
if (literal >> JSVAL_TAG_SHIFT)
|
||||||
|
continue; // This is a Value.
|
||||||
|
|
||||||
|
if (!(literal & 0x1))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
uint32_t index = literal >> 1;
|
||||||
|
JSObject* obj = nurseryObjects[index];
|
||||||
|
*literalAddr = uintptr_t(obj);
|
||||||
|
|
||||||
|
// Either all objects are still in the nursery, or all objects are tenured.
|
||||||
|
MOZ_ASSERT_IF(hasNurseryPointers, IsInsideNursery(obj));
|
||||||
|
|
||||||
|
if (!hasNurseryPointers && IsInsideNursery(obj))
|
||||||
|
hasNurseryPointers = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hasNurseryPointers)
|
||||||
|
cx->runtime()->gc.storeBuffer.putWholeCellFromMainThread(code);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t
|
||||||
|
Assembler::ExtractCodeLabelOffset(uint8_t* code)
|
||||||
|
{
|
||||||
|
return *(int32_t*)code;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("PatchInstructionImmediate()");
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction* inst)
|
||||||
|
{
|
||||||
|
int32_t mask = ~(heapSize - 1);
|
||||||
|
unsigned n, imm_s, imm_r;
|
||||||
|
if (!IsImmLogical(mask, 32, &n, &imm_s, &imm_r))
|
||||||
|
MOZ_CRASH("Could not encode immediate!?");
|
||||||
|
|
||||||
|
inst->SetImmR(imm_r);
|
||||||
|
inst->SetImmS(imm_s);
|
||||||
|
inst->SetBitN(n);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Assembler::retarget(Label* label, Label* target)
|
||||||
|
{
|
||||||
|
if (label->used()) {
|
||||||
|
if (target->bound()) {
|
||||||
|
bind(label, BufferOffset(target));
|
||||||
|
} else if (target->used()) {
|
||||||
|
// The target is not bound but used. Prepend label's branch list
|
||||||
|
// onto target's.
|
||||||
|
BufferOffset labelBranchOffset(label);
|
||||||
|
BufferOffset next;
|
||||||
|
|
||||||
|
// Find the head of the use chain for label.
|
||||||
|
while (nextLink(labelBranchOffset, &next))
|
||||||
|
labelBranchOffset = next;
|
||||||
|
|
||||||
|
// Then patch the head of label's use chain to the tail of target's
|
||||||
|
// use chain, prepending the entire use chain of target.
|
||||||
|
Instruction* branch = getInstructionAt(labelBranchOffset);
|
||||||
|
target->use(label->offset());
|
||||||
|
branch->SetImmPCOffsetTarget(branch - labelBranchOffset.getOffset());
|
||||||
|
} else {
|
||||||
|
// The target is unbound and unused. We can just take the head of
|
||||||
|
// the list hanging off of label, and dump that into target.
|
||||||
|
DebugOnly<uint32_t> prev = target->use(label->offset());
|
||||||
|
MOZ_ASSERT((int32_t)prev == Label::INVALID_OFFSET);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
label->reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace jit
|
||||||
|
} // namespace js
|
587
js/src/jit/arm64/Assembler-arm64.h
Normal file
587
js/src/jit/arm64/Assembler-arm64.h
Normal file
@ -0,0 +1,587 @@
|
|||||||
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||||
|
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||||
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||||
|
|
||||||
|
#ifndef A64_ASSEMBLER_A64_H_
|
||||||
|
#define A64_ASSEMBLER_A64_H_
|
||||||
|
|
||||||
|
#include "jit/arm64/vixl/Assembler-vixl.h"
|
||||||
|
|
||||||
|
#include "jit/JitCompartment.h"
|
||||||
|
|
||||||
|
namespace js {
|
||||||
|
namespace jit {
|
||||||
|
|
||||||
|
// VIXL imports.
|
||||||
|
typedef vixl::Register ARMRegister;
|
||||||
|
typedef vixl::FPRegister ARMFPRegister;
|
||||||
|
using vixl::ARMBuffer;
|
||||||
|
using vixl::Instruction;
|
||||||
|
|
||||||
|
static const uint32_t AlignmentAtPrologue = 0;
|
||||||
|
static const uint32_t AlignmentMidPrologue = 8;
|
||||||
|
static const Scale ScalePointer = TimesEight;
|
||||||
|
static const uint32_t AlignmentAtAsmJSPrologue = sizeof(void*);
|
||||||
|
|
||||||
|
// The MacroAssembler uses scratch registers extensively and unexpectedly.
|
||||||
|
// For safety, scratch registers should always be acquired using
|
||||||
|
// vixl::UseScratchRegisterScope.
|
||||||
|
static constexpr Register ScratchReg = { Registers::ip0 };
|
||||||
|
static constexpr ARMRegister ScratchReg64 = { ScratchReg, 64 };
|
||||||
|
|
||||||
|
static constexpr Register ScratchReg2 = { Registers::ip1 };
|
||||||
|
static constexpr ARMRegister ScratchReg2_64 = { ScratchReg2, 64 };
|
||||||
|
|
||||||
|
static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::d31 };
|
||||||
|
static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::d0 };
|
||||||
|
|
||||||
|
static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::s0 , FloatRegisters::Single };
|
||||||
|
static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::s31 , FloatRegisters::Single };
|
||||||
|
|
||||||
|
static constexpr Register InvalidReg = { Registers::invalid_reg };
|
||||||
|
static constexpr FloatRegister InvalidFloatReg = { FloatRegisters::invalid_fpreg };
|
||||||
|
|
||||||
|
static constexpr FloatRegister ReturnInt32x4Reg = InvalidFloatReg;
|
||||||
|
static constexpr FloatRegister ReturnFloat32x4Reg = InvalidFloatReg;
|
||||||
|
|
||||||
|
static constexpr Register OsrFrameReg = { Registers::x3 };
|
||||||
|
static constexpr Register ArgumentsRectifierReg = { Registers::x8 };
|
||||||
|
static constexpr Register CallTempReg0 = { Registers::x9 };
|
||||||
|
static constexpr Register CallTempReg1 = { Registers::x10 };
|
||||||
|
static constexpr Register CallTempReg2 = { Registers::x11 };
|
||||||
|
static constexpr Register CallTempReg3 = { Registers::x12 };
|
||||||
|
static constexpr Register CallTempReg4 = { Registers::x13 };
|
||||||
|
static constexpr Register CallTempReg5 = { Registers::x14 };
|
||||||
|
|
||||||
|
static constexpr Register PreBarrierReg = { Registers::x1 };
|
||||||
|
|
||||||
|
static constexpr Register ReturnReg = { Registers::x0 };
|
||||||
|
static constexpr Register JSReturnReg = { Registers::x2 };
|
||||||
|
static constexpr Register FramePointer = { Registers::fp };
|
||||||
|
static constexpr Register ZeroRegister = { Registers::sp };
|
||||||
|
static constexpr ARMRegister ZeroRegister64 = { Registers::sp, 64 };
|
||||||
|
static constexpr ARMRegister ZeroRegister32 = { Registers::sp, 32 };
|
||||||
|
|
||||||
|
static constexpr FloatRegister ReturnFloatReg = { FloatRegisters::d0 };
|
||||||
|
static constexpr FloatRegister ScratchFloatReg = { FloatRegisters::d31 };
|
||||||
|
|
||||||
|
static constexpr FloatRegister ReturnSimdReg = InvalidFloatReg;
|
||||||
|
static constexpr FloatRegister ScratchSimdReg = InvalidFloatReg;
|
||||||
|
|
||||||
|
// StackPointer is intentionally undefined on ARM64 to prevent misuse:
|
||||||
|
// using sp as a base register is only valid if sp % 16 == 0.
|
||||||
|
static constexpr Register RealStackPointer = { Registers::sp };
|
||||||
|
// TODO: We're not quite there yet.
|
||||||
|
static constexpr Register StackPointer = { Registers::sp };
|
||||||
|
|
||||||
|
static constexpr Register PseudoStackPointer = { Registers::x28 };
|
||||||
|
static constexpr ARMRegister PseudoStackPointer64 = { Registers::x28, 64 };
|
||||||
|
static constexpr ARMRegister PseudoStackPointer32 = { Registers::x28, 32 };
|
||||||
|
|
||||||
|
// StackPointer for use by irregexp.
|
||||||
|
static constexpr Register RegExpStackPointer = PseudoStackPointer;
|
||||||
|
|
||||||
|
static constexpr Register IntArgReg0 = { Registers::x0 };
|
||||||
|
static constexpr Register IntArgReg1 = { Registers::x1 };
|
||||||
|
static constexpr Register IntArgReg2 = { Registers::x2 };
|
||||||
|
static constexpr Register IntArgReg3 = { Registers::x3 };
|
||||||
|
static constexpr Register IntArgReg4 = { Registers::x4 };
|
||||||
|
static constexpr Register IntArgReg5 = { Registers::x5 };
|
||||||
|
static constexpr Register IntArgReg6 = { Registers::x6 };
|
||||||
|
static constexpr Register IntArgReg7 = { Registers::x7 };
|
||||||
|
static constexpr Register GlobalReg = { Registers::x20 };
|
||||||
|
static constexpr Register HeapReg = { Registers::x21 };
|
||||||
|
static constexpr Register HeapLenReg = { Registers::x22 };
|
||||||
|
|
||||||
|
// Define unsized Registers.
|
||||||
|
#define DEFINE_UNSIZED_REGISTERS(N) \
|
||||||
|
static constexpr Register r##N = { Registers::x##N };
|
||||||
|
REGISTER_CODE_LIST(DEFINE_UNSIZED_REGISTERS)
|
||||||
|
#undef DEFINE_UNSIZED_REGISTERS
|
||||||
|
static constexpr Register ip0 = { Registers::x16 };
|
||||||
|
static constexpr Register ip1 = { Registers::x16 };
|
||||||
|
static constexpr Register fp = { Registers::x30 };
|
||||||
|
static constexpr Register lr = { Registers::x30 };
|
||||||
|
static constexpr Register rzr = { Registers::xzr };
|
||||||
|
|
||||||
|
// Import VIXL registers into the js::jit namespace.
|
||||||
|
#define IMPORT_VIXL_REGISTERS(N) \
|
||||||
|
static constexpr ARMRegister w##N = vixl::w##N; \
|
||||||
|
static constexpr ARMRegister x##N = vixl::x##N;
|
||||||
|
REGISTER_CODE_LIST(IMPORT_VIXL_REGISTERS)
|
||||||
|
#undef IMPORT_VIXL_REGISTERS
|
||||||
|
static constexpr ARMRegister wzr = vixl::wzr;
|
||||||
|
static constexpr ARMRegister xzr = vixl::xzr;
|
||||||
|
static constexpr ARMRegister wsp = vixl::wsp;
|
||||||
|
static constexpr ARMRegister sp = vixl::sp;
|
||||||
|
|
||||||
|
// Import VIXL VRegisters into the js::jit namespace.
|
||||||
|
#define IMPORT_VIXL_VREGISTERS(N) \
|
||||||
|
static constexpr ARMFPRegister s##N = vixl::s##N; \
|
||||||
|
static constexpr ARMFPRegister d##N = vixl::d##N;
|
||||||
|
REGISTER_CODE_LIST(IMPORT_VIXL_VREGISTERS)
|
||||||
|
#undef IMPORT_VIXL_VREGISTERS
|
||||||
|
|
||||||
|
static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
|
||||||
|
|
||||||
|
// Registers used in the GenerateFFIIonExit Enable Activation block.
|
||||||
|
static constexpr Register AsmJSIonExitRegCallee = r8;
|
||||||
|
static constexpr Register AsmJSIonExitRegE0 = r0;
|
||||||
|
static constexpr Register AsmJSIonExitRegE1 = r1;
|
||||||
|
static constexpr Register AsmJSIonExitRegE2 = r2;
|
||||||
|
static constexpr Register AsmJSIonExitRegE3 = r3;
|
||||||
|
|
||||||
|
// Registers used in the GenerateFFIIonExit Disable Activation block.
|
||||||
|
// None of these may be the second scratch register.
|
||||||
|
static constexpr Register AsmJSIonExitRegReturnData = r2;
|
||||||
|
static constexpr Register AsmJSIonExitRegReturnType = r3;
|
||||||
|
static constexpr Register AsmJSIonExitRegD0 = r0;
|
||||||
|
static constexpr Register AsmJSIonExitRegD1 = r1;
|
||||||
|
static constexpr Register AsmJSIonExitRegD2 = r4;
|
||||||
|
|
||||||
|
static constexpr Register JSReturnReg_Type = r3;
|
||||||
|
static constexpr Register JSReturnReg_Data = r2;
|
||||||
|
|
||||||
|
static constexpr FloatRegister NANReg = { FloatRegisters::d14 };
|
||||||
|
// N.B. r8 isn't listed as an aapcs temp register, but we can use it as such because we never
|
||||||
|
// use return-structs.
|
||||||
|
static constexpr Register CallTempNonArgRegs[] = { r8, r9, r10, r11, r12, r13, r14, r15 };
|
||||||
|
static const uint32_t NumCallTempNonArgRegs =
|
||||||
|
mozilla::ArrayLength(CallTempNonArgRegs);
|
||||||
|
|
||||||
|
static constexpr uint32_t JitStackAlignment = 16;
|
||||||
|
|
||||||
|
static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
|
||||||
|
static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
|
||||||
|
"Stack alignment should be a non-zero multiple of sizeof(Value)");
|
||||||
|
|
||||||
|
// This boolean indicates whether we support SIMD instructions flavoured for
|
||||||
|
// this architecture or not. Rather than a method in the LIRGenerator, it is
|
||||||
|
// here such that it is accessible from the entire codebase. Once full support
|
||||||
|
// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
|
||||||
|
static constexpr bool SupportsSimd = false;
|
||||||
|
static constexpr uint32_t SimdMemoryAlignment = 16;
|
||||||
|
|
||||||
|
static_assert(CodeAlignment % SimdMemoryAlignment == 0,
|
||||||
|
"Code alignment should be larger than any of the alignments which are used for "
|
||||||
|
"the constant sections of the code buffer. Thus it should be larger than the "
|
||||||
|
"alignment for SIMD constants.");
|
||||||
|
|
||||||
|
static const uint32_t AsmJSStackAlignment = SimdMemoryAlignment;
|
||||||
|
static const int32_t AsmJSGlobalRegBias = 1024;
|
||||||
|
|
||||||
|
class Assembler : public vixl::Assembler
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
Assembler()
|
||||||
|
: vixl::Assembler()
|
||||||
|
{ }
|
||||||
|
|
||||||
|
typedef vixl::Condition Condition;
|
||||||
|
|
||||||
|
void finish();
|
||||||
|
void trace(JSTracer* trc);
|
||||||
|
|
||||||
|
// Emit the jump table, returning the BufferOffset to the first entry in the table.
|
||||||
|
BufferOffset emitExtendedJumpTable();
|
||||||
|
BufferOffset ExtendedJumpTable_;
|
||||||
|
void executableCopy(uint8_t* buffer);
|
||||||
|
|
||||||
|
BufferOffset immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op,
|
||||||
|
ARMBuffer::PoolEntry* pe = nullptr);
|
||||||
|
BufferOffset immPool64(ARMRegister dest, uint64_t value, ARMBuffer::PoolEntry* pe = nullptr);
|
||||||
|
BufferOffset immPool64Branch(RepatchLabel* label, ARMBuffer::PoolEntry* pe, vixl::Condition c);
|
||||||
|
BufferOffset fImmPool(ARMFPRegister dest, uint8_t* value, vixl::LoadLiteralOp op);
|
||||||
|
BufferOffset fImmPool64(ARMFPRegister dest, double value);
|
||||||
|
BufferOffset fImmPool32(ARMFPRegister dest, float value);
|
||||||
|
|
||||||
|
void bind(Label* label) { bind(label, nextOffset()); }
|
||||||
|
void bind(Label* label, BufferOffset boff);
|
||||||
|
void bind(RepatchLabel* label);
|
||||||
|
|
||||||
|
bool oom() const {
|
||||||
|
return AssemblerShared::oom() ||
|
||||||
|
armbuffer_.oom() ||
|
||||||
|
jumpRelocations_.oom() ||
|
||||||
|
dataRelocations_.oom() ||
|
||||||
|
preBarriers_.oom();
|
||||||
|
}
|
||||||
|
|
||||||
|
void copyJumpRelocationTable(uint8_t* dest) const {
|
||||||
|
if (jumpRelocations_.length())
|
||||||
|
memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
|
||||||
|
}
|
||||||
|
void copyDataRelocationTable(uint8_t* dest) const {
|
||||||
|
if (dataRelocations_.length())
|
||||||
|
memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
|
||||||
|
}
|
||||||
|
void copyPreBarrierTable(uint8_t* dest) const {
|
||||||
|
if (preBarriers_.length())
|
||||||
|
memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t jumpRelocationTableBytes() const {
|
||||||
|
return jumpRelocations_.length();
|
||||||
|
}
|
||||||
|
size_t dataRelocationTableBytes() const {
|
||||||
|
return dataRelocations_.length();
|
||||||
|
}
|
||||||
|
size_t preBarrierTableBytes() const {
|
||||||
|
return preBarriers_.length();
|
||||||
|
}
|
||||||
|
size_t bytesNeeded() const {
|
||||||
|
return SizeOfCodeGenerated() +
|
||||||
|
jumpRelocationTableBytes() +
|
||||||
|
dataRelocationTableBytes() +
|
||||||
|
preBarrierTableBytes();
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferOffset nextOffset() const {
|
||||||
|
return armbuffer_.nextOffset();
|
||||||
|
}
|
||||||
|
|
||||||
|
void addCodeLabel(CodeLabel label) {
|
||||||
|
propagateOOM(codeLabels_.append(label));
|
||||||
|
}
|
||||||
|
size_t numCodeLabels() const {
|
||||||
|
return codeLabels_.length();
|
||||||
|
}
|
||||||
|
CodeLabel codeLabel(size_t i) {
|
||||||
|
return codeLabels_[i];
|
||||||
|
}
|
||||||
|
void processCodeLabels(uint8_t* rawCode) {
|
||||||
|
for (size_t i = 0; i < codeLabels_.length(); i++) {
|
||||||
|
CodeLabel label = codeLabels_[i];
|
||||||
|
Bind(rawCode, label.dest(), rawCode + actualOffset(label.src()->offset()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Bind(uint8_t* rawCode, AbsoluteLabel* label, const void* address) {
|
||||||
|
uint32_t off = actualOffset(label->offset());
|
||||||
|
*reinterpret_cast<const void**>(rawCode + off) = address;
|
||||||
|
}
|
||||||
|
bool nextLink(BufferOffset cur, BufferOffset* next) {
|
||||||
|
Instruction* link = getInstructionAt(cur);
|
||||||
|
uint32_t nextLinkOffset = uint32_t(link->ImmPCRawOffset());
|
||||||
|
if (nextLinkOffset == uint32_t(LabelBase::INVALID_OFFSET))
|
||||||
|
return false;
|
||||||
|
*next = BufferOffset(nextLinkOffset + cur.getOffset());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
void retarget(Label* cur, Label* next);
|
||||||
|
|
||||||
|
// The buffer is about to be linked. Ensure any constant pools or
|
||||||
|
// excess bookkeeping has been flushed to the instruction stream.
|
||||||
|
void flush() {
|
||||||
|
armbuffer_.flushPool();
|
||||||
|
}
|
||||||
|
|
||||||
|
int actualOffset(int curOffset) {
|
||||||
|
return curOffset + armbuffer_.poolSizeBefore(curOffset);
|
||||||
|
}
|
||||||
|
int actualIndex(int curOffset) {
|
||||||
|
ARMBuffer::PoolEntry pe(curOffset);
|
||||||
|
return armbuffer_.poolEntryOffset(pe);
|
||||||
|
}
|
||||||
|
int labelOffsetToPatchOffset(int labelOff) {
|
||||||
|
return actualOffset(labelOff);
|
||||||
|
}
|
||||||
|
static uint8_t* PatchableJumpAddress(JitCode* code, uint32_t index) {
|
||||||
|
return code->raw() + index;
|
||||||
|
}
|
||||||
|
void setPrinter(Sprinter* sp) {
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool SupportsFloatingPoint() { return true; }
|
||||||
|
static bool SupportsSimd() { return js::jit::SupportsSimd; }
|
||||||
|
|
||||||
|
// Tracks a jump that is patchable after finalization.
|
||||||
|
void addJumpRelocation(BufferOffset src, Relocation::Kind reloc);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// Add a jump whose target is unknown until finalization.
|
||||||
|
// The jump may not be patched at runtime.
|
||||||
|
void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind);
|
||||||
|
|
||||||
|
// Add a jump whose target is unknown until finalization, and may change
|
||||||
|
// thereafter. The jump is patchable at runtime.
|
||||||
|
size_t addPatchableJump(BufferOffset src, Relocation::Kind kind);
|
||||||
|
|
||||||
|
public:
|
||||||
|
static uint32_t PatchWrite_NearCallSize() {
|
||||||
|
return 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint32_t NopSize() {
|
||||||
|
return 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) {
|
||||||
|
Instruction* dest = (Instruction*)start.raw();
|
||||||
|
//printf("patching %p with call to %p\n", start.raw(), toCall.raw());
|
||||||
|
bl(dest, ((Instruction*)toCall.raw() - dest)>>2);
|
||||||
|
|
||||||
|
}
|
||||||
|
static void PatchDataWithValueCheck(CodeLocationLabel label,
|
||||||
|
PatchedImmPtr newValue,
|
||||||
|
PatchedImmPtr expected);
|
||||||
|
|
||||||
|
static void PatchDataWithValueCheck(CodeLocationLabel label,
|
||||||
|
ImmPtr newValue,
|
||||||
|
ImmPtr expected);
|
||||||
|
|
||||||
|
static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
|
||||||
|
// Raw is going to be the return address.
|
||||||
|
uint32_t* raw = (uint32_t*)label.raw();
|
||||||
|
// Overwrite the 4 bytes before the return address, which will end up being
|
||||||
|
// the call instruction.
|
||||||
|
*(raw - 1) = imm.value;
|
||||||
|
}
|
||||||
|
static uint32_t AlignDoubleArg(uint32_t offset) {
|
||||||
|
MOZ_CRASH("AlignDoubleArg()");
|
||||||
|
}
|
||||||
|
static Instruction* NextInstruction(Instruction* instruction, uint32_t* count = nullptr) {
|
||||||
|
if (count != nullptr)
|
||||||
|
*count += 4;
|
||||||
|
Instruction* cur = instruction;
|
||||||
|
Instruction* next = cur + 4;
|
||||||
|
// Artificial pool guards can only be B (rather than BR)
|
||||||
|
if (next->IsUncondB()) {
|
||||||
|
uint32_t* snd = (uint32_t*)(instruction + 8);
|
||||||
|
// test both the upper 16 bits, but also bit 15, which should be unset
|
||||||
|
// for an artificial branch guard.
|
||||||
|
if ((*snd & 0xffff8000) == 0xffff0000) {
|
||||||
|
// that was a guard before a pool, step over the pool.
|
||||||
|
int poolSize = (*snd & 0x7fff);
|
||||||
|
return (Instruction*)(snd + poolSize);
|
||||||
|
}
|
||||||
|
} else if (cur->IsBR() || cur->IsUncondB()) {
|
||||||
|
// natural pool guards can be anything
|
||||||
|
// but they need to have bit 15 set.
|
||||||
|
if ((next->InstructionBits() & 0xffff0000) == 0xffff0000) {
|
||||||
|
int poolSize = (next->InstructionBits() & 0x7fff);
|
||||||
|
Instruction* ret = (next + (poolSize << 2));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return (instruction + 4);
|
||||||
|
|
||||||
|
}
|
||||||
|
static uint8_t* NextInstruction(uint8_t* instruction, uint32_t* count = nullptr) {
|
||||||
|
return (uint8_t*)NextInstruction((Instruction*)instruction, count);
|
||||||
|
}
|
||||||
|
static uintptr_t GetPointer(uint8_t* ptr) {
|
||||||
|
Instruction* i = reinterpret_cast<Instruction*>(ptr);
|
||||||
|
uint64_t ret = i->Literal64();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Toggle a jmp or cmp emitted by toggledJump().
|
||||||
|
static void ToggleToJmp(CodeLocationLabel inst_);
|
||||||
|
static void ToggleToCmp(CodeLocationLabel inst_);
|
||||||
|
static void ToggleCall(CodeLocationLabel inst_, bool enabled);
|
||||||
|
|
||||||
|
static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
|
||||||
|
static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
|
||||||
|
|
||||||
|
static int32_t ExtractCodeLabelOffset(uint8_t* code);
|
||||||
|
static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
|
||||||
|
|
||||||
|
static void FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader& reader,
|
||||||
|
const ObjectVector& nurseryObjects);
|
||||||
|
|
||||||
|
// Convert a BufferOffset to a final byte offset from the start of the code buffer.
|
||||||
|
size_t toFinalOffset(BufferOffset offset) {
|
||||||
|
return size_t(offset.getOffset() + armbuffer_.poolSizeBefore(offset.getOffset()));
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
// A Jump table entry is 2 instructions, with 8 bytes of raw data
|
||||||
|
static const size_t SizeOfJumpTableEntry = 16;
|
||||||
|
|
||||||
|
struct JumpTableEntry
|
||||||
|
{
|
||||||
|
uint32_t ldr;
|
||||||
|
uint32_t br;
|
||||||
|
void* data;
|
||||||
|
|
||||||
|
Instruction* getLdr() {
|
||||||
|
return reinterpret_cast<Instruction*>(&ldr);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Offset of the patchable target for the given entry.
|
||||||
|
static const size_t OffsetOfJumpTableEntryPointer = 8;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction* inst);
|
||||||
|
|
||||||
|
void writeCodePointer(AbsoluteLabel* absoluteLabel) {
|
||||||
|
MOZ_ASSERT(!absoluteLabel->bound());
|
||||||
|
uintptr_t x = LabelBase::INVALID_OFFSET;
|
||||||
|
BufferOffset off = EmitData(&x, sizeof(uintptr_t));
|
||||||
|
|
||||||
|
// The x86/x64 makes general use of AbsoluteLabel and weaves a linked list
|
||||||
|
// of uses of an AbsoluteLabel through the assembly. ARM only uses labels
|
||||||
|
// for the case statements of switch jump tables. Thus, for simplicity, we
|
||||||
|
// simply treat the AbsoluteLabel as a label and bind it to the offset of
|
||||||
|
// the jump table entry that needs to be patched.
|
||||||
|
LabelBase* label = absoluteLabel;
|
||||||
|
label->bind(off.getOffset());
|
||||||
|
}
|
||||||
|
|
||||||
|
void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
|
||||||
|
const Disassembler::HeapAccess& heapAccess)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("verifyHeapAccessDisassembly");
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// Because jumps may be relocated to a target inaccessible by a short jump,
|
||||||
|
// each relocatable jump must have a unique entry in the extended jump table.
|
||||||
|
// Valid relocatable targets are of type Relocation::JITCODE.
|
||||||
|
struct JumpRelocation
|
||||||
|
{
|
||||||
|
BufferOffset jump; // Offset to the short jump, from the start of the code buffer.
|
||||||
|
uint32_t extendedTableIndex; // Unique index within the extended jump table.
|
||||||
|
|
||||||
|
JumpRelocation(BufferOffset jump, uint32_t extendedTableIndex)
|
||||||
|
: jump(jump), extendedTableIndex(extendedTableIndex)
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
|
||||||
|
// Because ARM and A64 use a code buffer that allows for constant pool insertion,
|
||||||
|
// the actual offset of each jump cannot be known until finalization.
|
||||||
|
// These vectors store the WIP offsets.
|
||||||
|
js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpDataRelocations_;
|
||||||
|
js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpPreBarriers_;
|
||||||
|
js::Vector<JumpRelocation, 0, SystemAllocPolicy> tmpJumpRelocations_;
|
||||||
|
|
||||||
|
// Structure for fixing up pc-relative loads/jumps when the machine
|
||||||
|
// code gets moved (executable copy, gc, etc.).
|
||||||
|
struct RelativePatch
|
||||||
|
{
|
||||||
|
BufferOffset offset;
|
||||||
|
void* target;
|
||||||
|
Relocation::Kind kind;
|
||||||
|
|
||||||
|
RelativePatch(BufferOffset offset, void* target, Relocation::Kind kind)
|
||||||
|
: offset(offset), target(target), kind(kind)
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
|
||||||
|
js::Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
|
||||||
|
|
||||||
|
// List of jumps for which the target is either unknown until finalization,
|
||||||
|
// or cannot be known due to GC. Each entry here requires a unique entry
|
||||||
|
// in the extended jump table, and is patched at finalization.
|
||||||
|
js::Vector<RelativePatch, 8, SystemAllocPolicy> pendingJumps_;
|
||||||
|
|
||||||
|
// Final output formatters.
|
||||||
|
CompactBufferWriter jumpRelocations_;
|
||||||
|
CompactBufferWriter dataRelocations_;
|
||||||
|
CompactBufferWriter preBarriers_;
|
||||||
|
};
|
||||||
|
|
||||||
|
static const uint32_t NumIntArgRegs = 8;
|
||||||
|
static const uint32_t NumFloatArgRegs = 8;
|
||||||
|
|
||||||
|
class ABIArgGenerator
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
ABIArgGenerator()
|
||||||
|
: intRegIndex_(0),
|
||||||
|
floatRegIndex_(0),
|
||||||
|
stackOffset_(0),
|
||||||
|
current_()
|
||||||
|
{ }
|
||||||
|
|
||||||
|
ABIArg next(MIRType argType);
|
||||||
|
ABIArg& current() { return current_; }
|
||||||
|
uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
|
||||||
|
|
||||||
|
public:
|
||||||
|
static const Register NonArgReturnReg0;
|
||||||
|
static const Register NonArgReturnReg1;
|
||||||
|
static const Register NonVolatileReg;
|
||||||
|
static const Register NonArg_VolatileReg;
|
||||||
|
static const Register NonReturn_VolatileReg0;
|
||||||
|
static const Register NonReturn_VolatileReg1;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
unsigned intRegIndex_;
|
||||||
|
unsigned floatRegIndex_;
|
||||||
|
uint32_t stackOffset_;
|
||||||
|
ABIArg current_;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
|
||||||
|
{
|
||||||
|
if (usedIntArgs >= NumIntArgRegs)
|
||||||
|
return false;
|
||||||
|
*out = Register::FromCode(usedIntArgs);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
GetFloatArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, FloatRegister* out)
|
||||||
|
{
|
||||||
|
if (usedFloatArgs >= NumFloatArgRegs)
|
||||||
|
return false;
|
||||||
|
*out = FloatRegister::FromCode(usedFloatArgs);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a register in which we plan to put a quantity that will be used as an
|
||||||
|
// integer argument. This differs from GetIntArgReg in that if we have no more
|
||||||
|
// actual argument registers to use we will fall back on using whatever
|
||||||
|
// CallTempReg* don't overlap the argument registers, and only fail once those
|
||||||
|
// run out too.
|
||||||
|
static inline bool
|
||||||
|
GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
|
||||||
|
{
|
||||||
|
if (GetIntArgReg(usedIntArgs, usedFloatArgs, out))
|
||||||
|
return true;
|
||||||
|
// Unfortunately, we have to assume things about the point at which
|
||||||
|
// GetIntArgReg returns false, because we need to know how many registers it
|
||||||
|
// can allocate.
|
||||||
|
usedIntArgs -= NumIntArgRegs;
|
||||||
|
if (usedIntArgs >= NumCallTempNonArgRegs)
|
||||||
|
return false;
|
||||||
|
*out = CallTempNonArgRegs[usedIntArgs];
|
||||||
|
return true;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void PatchJump(CodeLocationJump& jump_, CodeLocationLabel label);
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
|
||||||
|
{
|
||||||
|
PatchJump(jump_, label);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Forbids pool generation during a specified interval. Not nestable.
|
||||||
|
class AutoForbidPools
|
||||||
|
{
|
||||||
|
Assembler* asm_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
AutoForbidPools(Assembler* asm_, size_t maxInst)
|
||||||
|
: asm_(asm_)
|
||||||
|
{
|
||||||
|
asm_->enterNoPool(maxInst);
|
||||||
|
}
|
||||||
|
|
||||||
|
~AutoForbidPools() {
|
||||||
|
asm_->leaveNoPool();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace jit
|
||||||
|
} // namespace js
|
||||||
|
|
||||||
|
#endif // A64_ASSEMBLER_A64_H_
|
104
js/src/jit/arm64/AtomicOperations-arm64.h
Normal file
104
js/src/jit/arm64/AtomicOperations-arm64.h
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||||
|
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||||
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||||
|
|
||||||
|
/* For documentation, see jit/AtomicOperations.h */
|
||||||
|
|
||||||
|
#ifndef jit_arm64_AtomicOperations_arm64_h
|
||||||
|
#define jit_arm64_AtomicOperations_arm64_h
|
||||||
|
|
||||||
|
#include "jit/arm64/Architecture-arm64.h"
|
||||||
|
#include "jit/AtomicOperations.h"
|
||||||
|
|
||||||
|
inline bool
|
||||||
|
js::jit::AtomicOperations::isLockfree8()
|
||||||
|
{
|
||||||
|
MOZ_CRASH("isLockfree8()");
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void
|
||||||
|
js::jit::AtomicOperations::fenceSeqCst()
|
||||||
|
{
|
||||||
|
MOZ_CRASH("fenceSeqCst()");
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline T
|
||||||
|
js::jit::AtomicOperations::loadSeqCst(T* addr)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("loadSeqCst()");
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline void
|
||||||
|
js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("storeSeqCst()");
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline T
|
||||||
|
js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("exchangeSeqCst()");
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline T
|
||||||
|
js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("compareExchangeSeqCst()");
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline T
|
||||||
|
js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("fetchAddSeqCst()");
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline T
|
||||||
|
js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("fetchSubSeqCst()");
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline T
|
||||||
|
js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("fetchAndSeqCst()");
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline T
|
||||||
|
js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("fetchOrSeqCst()");
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline T
|
||||||
|
js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("fetchXorSeqCst()");
|
||||||
|
}
|
||||||
|
|
||||||
|
template<size_t nbytes>
|
||||||
|
inline void
|
||||||
|
js::jit::RegionLock::acquire(void* addr)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("acquire()");
|
||||||
|
}
|
||||||
|
|
||||||
|
template<size_t nbytes>
|
||||||
|
inline void
|
||||||
|
js::jit::RegionLock::release(void* addr)
|
||||||
|
{
|
||||||
|
MOZ_CRASH("release()");
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // jit_arm64_AtomicOperations_arm64_h
|
@ -8,7 +8,7 @@
|
|||||||
#include "jit/SharedICHelpers.h"
|
#include "jit/SharedICHelpers.h"
|
||||||
|
|
||||||
#ifdef JS_ARM64_SIMULATOR
|
#ifdef JS_ARM64_SIMULATOR
|
||||||
// TODO #include "jit/arm64/Assembler-arm64.h"
|
#include "jit/arm64/Assembler-arm64.h"
|
||||||
#include "jit/arm64/BaselineCompiler-arm64.h"
|
#include "jit/arm64/BaselineCompiler-arm64.h"
|
||||||
#include "jit/arm64/vixl/Debugger-vixl.h"
|
#include "jit/arm64/vixl/Debugger-vixl.h"
|
||||||
#endif
|
#endif
|
||||||
|
688
js/src/jit/arm64/MacroAssembler-arm64.cpp
Normal file
688
js/src/jit/arm64/MacroAssembler-arm64.cpp
Normal file
@ -0,0 +1,688 @@
|
|||||||
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||||
|
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||||
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||||
|
|
||||||
|
#include "jit/arm64/MacroAssembler-arm64.h"
|
||||||
|
|
||||||
|
// TODO #include "jit/arm64/MoveEmitter-arm64.h"
|
||||||
|
#include "jit/arm64/SharedICRegisters-arm64.h"
|
||||||
|
#include "jit/Bailouts.h"
|
||||||
|
#include "jit/BaselineFrame.h"
|
||||||
|
#include "jit/MacroAssembler.h"
|
||||||
|
|
||||||
|
namespace js {
|
||||||
|
namespace jit {
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
|
||||||
|
{
|
||||||
|
ARMRegister dest(output, 32);
|
||||||
|
Fcvtns(dest, ARMFPRegister(input, 64));
|
||||||
|
|
||||||
|
{
|
||||||
|
vixl::UseScratchRegisterScope temps(this);
|
||||||
|
const ARMRegister scratch32 = temps.AcquireW();
|
||||||
|
|
||||||
|
Mov(scratch32, Operand(0xff));
|
||||||
|
Cmp(dest, scratch32);
|
||||||
|
Csel(dest, dest, scratch32, LessThan);
|
||||||
|
}
|
||||||
|
|
||||||
|
Cmp(dest, Operand(0));
|
||||||
|
Csel(dest, wzr, dest, LessThan);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::buildFakeExitFrame(Register scratch, uint32_t* offset)
|
||||||
|
{
|
||||||
|
mozilla::DebugOnly<uint32_t> initialDepth = framePushed();
|
||||||
|
uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
|
||||||
|
|
||||||
|
asMasm().Push(Imm32(descriptor)); // descriptor_
|
||||||
|
|
||||||
|
enterNoPool(3);
|
||||||
|
Label fakeCallsite;
|
||||||
|
Adr(ARMRegister(scratch, 64), &fakeCallsite);
|
||||||
|
asMasm().Push(scratch);
|
||||||
|
bind(&fakeCallsite);
|
||||||
|
uint32_t pseudoReturnOffset = currentOffset();
|
||||||
|
leaveNoPool();
|
||||||
|
|
||||||
|
MOZ_ASSERT(framePushed() == initialDepth + ExitFrameLayout::Size());
|
||||||
|
|
||||||
|
*offset = pseudoReturnOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::callWithExitFrame(JitCode* target)
|
||||||
|
{
|
||||||
|
uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
|
||||||
|
asMasm().Push(Imm32(descriptor));
|
||||||
|
call(target);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::alignFrameForICArguments(MacroAssembler::AfterICSaveLive& aic)
|
||||||
|
{
|
||||||
|
// Exists for MIPS compatibility.
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::restoreFrameAlignmentForICArguments(MacroAssembler::AfterICSaveLive& aic)
|
||||||
|
{
|
||||||
|
// Exists for MIPS compatibility.
|
||||||
|
}
|
||||||
|
|
||||||
|
js::jit::MacroAssembler&
|
||||||
|
MacroAssemblerCompat::asMasm()
|
||||||
|
{
|
||||||
|
return *static_cast<js::jit::MacroAssembler*>(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
const js::jit::MacroAssembler&
|
||||||
|
MacroAssemblerCompat::asMasm() const
|
||||||
|
{
|
||||||
|
return *static_cast<const js::jit::MacroAssembler*>(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
vixl::MacroAssembler&
|
||||||
|
MacroAssemblerCompat::asVIXL()
|
||||||
|
{
|
||||||
|
return *static_cast<vixl::MacroAssembler*>(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
const vixl::MacroAssembler&
|
||||||
|
MacroAssemblerCompat::asVIXL() const
|
||||||
|
{
|
||||||
|
return *static_cast<const vixl::MacroAssembler*>(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferOffset
|
||||||
|
MacroAssemblerCompat::movePatchablePtr(ImmPtr ptr, Register dest)
|
||||||
|
{
|
||||||
|
const size_t numInst = 1; // Inserting one load instruction.
|
||||||
|
const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes.
|
||||||
|
uint8_t* literalAddr = (uint8_t*)(&ptr.value); // TODO: Should be const.
|
||||||
|
|
||||||
|
// Scratch space for generating the load instruction.
|
||||||
|
//
|
||||||
|
// allocEntry() will use InsertIndexIntoTag() to store a temporary
|
||||||
|
// index to the corresponding PoolEntry in the instruction itself.
|
||||||
|
//
|
||||||
|
// That index will be fixed up later when finishPool()
|
||||||
|
// walks over all marked loads and calls PatchConstantPoolLoad().
|
||||||
|
uint32_t instructionScratch = 0;
|
||||||
|
|
||||||
|
// Emit the instruction mask in the scratch space.
|
||||||
|
// The offset doesn't matter: it will be fixed up later.
|
||||||
|
vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0);
|
||||||
|
|
||||||
|
// Add the entry to the pool, fix up the LDR imm19 offset,
|
||||||
|
// and add the completed instruction to the buffer.
|
||||||
|
return armbuffer_.allocEntry(numInst, numPoolEntries,
|
||||||
|
(uint8_t*)&instructionScratch, literalAddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferOffset
|
||||||
|
MacroAssemblerCompat::movePatchablePtr(ImmWord ptr, Register dest)
|
||||||
|
{
|
||||||
|
const size_t numInst = 1; // Inserting one load instruction.
|
||||||
|
const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes.
|
||||||
|
uint8_t* literalAddr = (uint8_t*)(&ptr.value);
|
||||||
|
|
||||||
|
// Scratch space for generating the load instruction.
|
||||||
|
//
|
||||||
|
// allocEntry() will use InsertIndexIntoTag() to store a temporary
|
||||||
|
// index to the corresponding PoolEntry in the instruction itself.
|
||||||
|
//
|
||||||
|
// That index will be fixed up later when finishPool()
|
||||||
|
// walks over all marked loads and calls PatchConstantPoolLoad().
|
||||||
|
uint32_t instructionScratch = 0;
|
||||||
|
|
||||||
|
// Emit the instruction mask in the scratch space.
|
||||||
|
// The offset doesn't matter: it will be fixed up later.
|
||||||
|
vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0);
|
||||||
|
|
||||||
|
// Add the entry to the pool, fix up the LDR imm19 offset,
|
||||||
|
// and add the completed instruction to the buffer.
|
||||||
|
return armbuffer_.allocEntry(numInst, numPoolEntries,
|
||||||
|
(uint8_t*)&instructionScratch, literalAddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::handleFailureWithHandlerTail(void* handler)
|
||||||
|
{
|
||||||
|
// Reserve space for exception information.
|
||||||
|
int64_t size = (sizeof(ResumeFromException) + 7) & ~7;
|
||||||
|
Sub(GetStackPointer64(), GetStackPointer64(), Operand(size));
|
||||||
|
if (!GetStackPointer64().Is(sp))
|
||||||
|
Mov(sp, GetStackPointer64());
|
||||||
|
|
||||||
|
Mov(x0, GetStackPointer64());
|
||||||
|
|
||||||
|
// Call the handler.
|
||||||
|
setupUnalignedABICall(1, r1);
|
||||||
|
passABIArg(r0);
|
||||||
|
callWithABI(handler);
|
||||||
|
|
||||||
|
Label entryFrame;
|
||||||
|
Label catch_;
|
||||||
|
Label finally;
|
||||||
|
Label return_;
|
||||||
|
Label bailout;
|
||||||
|
|
||||||
|
MOZ_ASSERT(GetStackPointer64().Is(x28)); // Lets the code below be a little cleaner.
|
||||||
|
|
||||||
|
loadPtr(Address(r28, offsetof(ResumeFromException, kind)), r0);
|
||||||
|
branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame);
|
||||||
|
branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
|
||||||
|
branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
|
||||||
|
branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
|
||||||
|
branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
|
||||||
|
|
||||||
|
breakpoint(); // Invalid kind.
|
||||||
|
|
||||||
|
// No exception handler. Load the error value, load the new stack pointer,
|
||||||
|
// and return from the entry frame.
|
||||||
|
bind(&entryFrame);
|
||||||
|
moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
|
||||||
|
loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28);
|
||||||
|
retn(Imm32(1 * sizeof(void*))); // Pop from stack and return.
|
||||||
|
|
||||||
|
// If we found a catch handler, this must be a baseline frame. Restore state
|
||||||
|
// and jump to the catch block.
|
||||||
|
bind(&catch_);
|
||||||
|
loadPtr(Address(r28, offsetof(ResumeFromException, target)), r0);
|
||||||
|
loadPtr(Address(r28, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
|
||||||
|
loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28);
|
||||||
|
syncStackPtr();
|
||||||
|
Br(x0);
|
||||||
|
|
||||||
|
// If we found a finally block, this must be a baseline frame.
|
||||||
|
// Push two values expected by JSOP_RETSUB: BooleanValue(true)
|
||||||
|
// and the exception.
|
||||||
|
bind(&finally);
|
||||||
|
ARMRegister exception = x1;
|
||||||
|
Ldr(exception, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, exception)));
|
||||||
|
Ldr(x0, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, target)));
|
||||||
|
Ldr(ARMRegister(BaselineFrameReg, 64),
|
||||||
|
MemOperand(GetStackPointer64(), offsetof(ResumeFromException, framePointer)));
|
||||||
|
Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), offsetof(ResumeFromException, stackPointer)));
|
||||||
|
syncStackPtr();
|
||||||
|
pushValue(BooleanValue(true));
|
||||||
|
push(exception);
|
||||||
|
Br(x0);
|
||||||
|
|
||||||
|
// Only used in debug mode. Return BaselineFrame->returnValue() to the caller.
|
||||||
|
bind(&return_);
|
||||||
|
loadPtr(Address(r28, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
|
||||||
|
loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28);
|
||||||
|
loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
|
||||||
|
JSReturnOperand);
|
||||||
|
movePtr(BaselineFrameReg, r28);
|
||||||
|
vixl::MacroAssembler::Pop(ARMRegister(BaselineFrameReg, 64), vixl::lr);
|
||||||
|
syncStackPtr();
|
||||||
|
vixl::MacroAssembler::Ret(vixl::lr);
|
||||||
|
|
||||||
|
// If we are bailing out to baseline to handle an exception,
|
||||||
|
// jump to the bailout tail stub.
|
||||||
|
bind(&bailout);
|
||||||
|
Ldr(x2, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, bailoutInfo)));
|
||||||
|
Ldr(x1, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, target)));
|
||||||
|
Mov(x0, BAILOUT_RETURN_OK);
|
||||||
|
Br(x1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::setupABICall(uint32_t args)
|
||||||
|
{
|
||||||
|
MOZ_ASSERT(!inCall_);
|
||||||
|
inCall_ = true;
|
||||||
|
|
||||||
|
args_ = args;
|
||||||
|
usedOutParam_ = false;
|
||||||
|
passedIntArgs_ = 0;
|
||||||
|
passedFloatArgs_ = 0;
|
||||||
|
passedArgTypes_ = 0;
|
||||||
|
stackForCall_ = ShadowStackSpace;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::setupUnalignedABICall(uint32_t args, Register scratch)
|
||||||
|
{
|
||||||
|
setupABICall(args);
|
||||||
|
dynamicAlignment_ = true;
|
||||||
|
|
||||||
|
int64_t alignment = ~(int64_t(ABIStackAlignment) - 1);
|
||||||
|
ARMRegister scratch64(scratch, 64);
|
||||||
|
|
||||||
|
// Always save LR -- Baseline ICs assume that LR isn't modified.
|
||||||
|
push(lr);
|
||||||
|
|
||||||
|
// Unhandled for sp -- needs slightly different logic.
|
||||||
|
MOZ_ASSERT(!GetStackPointer64().Is(sp));
|
||||||
|
|
||||||
|
// Remember the stack address on entry.
|
||||||
|
Mov(scratch64, GetStackPointer64());
|
||||||
|
|
||||||
|
// Make alignment, including the effective push of the previous sp.
|
||||||
|
Sub(GetStackPointer64(), GetStackPointer64(), Operand(8));
|
||||||
|
And(GetStackPointer64(), GetStackPointer64(), Operand(alignment));
|
||||||
|
|
||||||
|
// If the PseudoStackPointer is used, sp must be <= psp before a write is valid.
|
||||||
|
syncStackPtr();
|
||||||
|
|
||||||
|
// Store previous sp to the top of the stack, aligned.
|
||||||
|
Str(scratch64, MemOperand(GetStackPointer64(), 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::passABIArg(const MoveOperand& from, MoveOp::Type type)
|
||||||
|
{
|
||||||
|
if (!enoughMemory_)
|
||||||
|
return;
|
||||||
|
|
||||||
|
Register activeSP = Register::FromCode(GetStackPointer64().code());
|
||||||
|
if (type == MoveOp::GENERAL) {
|
||||||
|
Register dest;
|
||||||
|
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General;
|
||||||
|
if (GetIntArgReg(passedIntArgs_++, passedFloatArgs_, &dest)) {
|
||||||
|
if (!from.isGeneralReg() || from.reg() != dest)
|
||||||
|
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(dest), type);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(activeSP, stackForCall_), type);
|
||||||
|
stackForCall_ += sizeof(int64_t);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
MOZ_ASSERT(type == MoveOp::FLOAT32 || type == MoveOp::DOUBLE);
|
||||||
|
if (type == MoveOp::FLOAT32)
|
||||||
|
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32;
|
||||||
|
else
|
||||||
|
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double;
|
||||||
|
|
||||||
|
FloatRegister fdest;
|
||||||
|
if (GetFloatArgReg(passedIntArgs_, passedFloatArgs_++, &fdest)) {
|
||||||
|
if (!from.isFloatReg() || from.floatReg() != fdest)
|
||||||
|
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(fdest), type);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(activeSP, stackForCall_), type);
|
||||||
|
switch (type) {
|
||||||
|
case MoveOp::FLOAT32: stackForCall_ += sizeof(float); break;
|
||||||
|
case MoveOp::DOUBLE: stackForCall_ += sizeof(double); break;
|
||||||
|
default: MOZ_CRASH("Unexpected float register class argument type");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::passABIArg(Register reg)
|
||||||
|
{
|
||||||
|
passABIArg(MoveOperand(reg), MoveOp::GENERAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::passABIArg(FloatRegister reg, MoveOp::Type type)
|
||||||
|
{
|
||||||
|
passABIArg(MoveOperand(reg), type);
|
||||||
|
}
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::passABIOutParam(Register reg)
|
||||||
|
{
|
||||||
|
if (!enoughMemory_)
|
||||||
|
return;
|
||||||
|
MOZ_ASSERT(!usedOutParam_);
|
||||||
|
usedOutParam_ = true;
|
||||||
|
if (reg == r8)
|
||||||
|
return;
|
||||||
|
enoughMemory_ = moveResolver_.addMove(MoveOperand(reg), MoveOperand(r8), MoveOp::GENERAL);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::callWithABIPre(uint32_t* stackAdjust)
|
||||||
|
{
|
||||||
|
*stackAdjust = stackForCall_;
|
||||||
|
// ARM64 /really/ wants the stack to always be aligned. Since we're already tracking it
|
||||||
|
// getting it aligned for an abi call is pretty easy.
|
||||||
|
*stackAdjust += ComputeByteAlignment(*stackAdjust, StackAlignment);
|
||||||
|
asMasm().reserveStack(*stackAdjust);
|
||||||
|
{
|
||||||
|
moveResolver_.resolve();
|
||||||
|
MoveEmitter emitter(asMasm());
|
||||||
|
emitter.emit(moveResolver_);
|
||||||
|
emitter.finish();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call boundaries communicate stack via sp.
|
||||||
|
syncStackPtr();
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
|
||||||
|
{
|
||||||
|
// Call boundaries communicate stack via sp.
|
||||||
|
if (!GetStackPointer64().Is(sp))
|
||||||
|
Mov(GetStackPointer64(), sp);
|
||||||
|
|
||||||
|
inCall_ = false;
|
||||||
|
asMasm().freeStack(stackAdjust);
|
||||||
|
|
||||||
|
// Restore the stack pointer from entry.
|
||||||
|
if (dynamicAlignment_)
|
||||||
|
Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), 0));
|
||||||
|
|
||||||
|
// Restore LR.
|
||||||
|
pop(lr);
|
||||||
|
|
||||||
|
// TODO: This one shouldn't be necessary -- check that callers
|
||||||
|
// aren't enforcing the ABI themselves!
|
||||||
|
syncStackPtr();
|
||||||
|
|
||||||
|
// If the ABI's return regs are where ION is expecting them, then
|
||||||
|
// no other work needs to be done.
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(DEBUG) && defined(JS_ARM64_SIMULATOR)
|
||||||
|
static void
|
||||||
|
AssertValidABIFunctionType(uint32_t passedArgTypes)
|
||||||
|
{
|
||||||
|
switch (passedArgTypes) {
|
||||||
|
case Args_General0:
|
||||||
|
case Args_General1:
|
||||||
|
case Args_General2:
|
||||||
|
case Args_General3:
|
||||||
|
case Args_General4:
|
||||||
|
case Args_General5:
|
||||||
|
case Args_General6:
|
||||||
|
case Args_General7:
|
||||||
|
case Args_General8:
|
||||||
|
case Args_Double_None:
|
||||||
|
case Args_Int_Double:
|
||||||
|
case Args_Float32_Float32:
|
||||||
|
case Args_Double_Double:
|
||||||
|
case Args_Double_Int:
|
||||||
|
case Args_Double_DoubleInt:
|
||||||
|
case Args_Double_DoubleDouble:
|
||||||
|
case Args_Double_DoubleDoubleDouble:
|
||||||
|
case Args_Double_DoubleDoubleDoubleDouble:
|
||||||
|
case Args_Double_IntDouble:
|
||||||
|
case Args_Int_IntDouble:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
MOZ_CRASH("Unexpected type");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif // DEBUG && JS_ARM64_SIMULATOR
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::callWithABI(void* fun, MoveOp::Type result)
|
||||||
|
{
|
||||||
|
#ifdef JS_ARM64_SIMULATOR
|
||||||
|
MOZ_ASSERT(passedIntArgs_ + passedFloatArgs_ <= 15);
|
||||||
|
passedArgTypes_ <<= ArgType_Shift;
|
||||||
|
switch (result) {
|
||||||
|
case MoveOp::GENERAL: passedArgTypes_ |= ArgType_General; break;
|
||||||
|
case MoveOp::DOUBLE: passedArgTypes_ |= ArgType_Double; break;
|
||||||
|
case MoveOp::FLOAT32: passedArgTypes_ |= ArgType_Float32; break;
|
||||||
|
default: MOZ_CRASH("Invalid return type");
|
||||||
|
}
|
||||||
|
# ifdef DEBUG
|
||||||
|
AssertValidABIFunctionType(passedArgTypes_);
|
||||||
|
# endif
|
||||||
|
ABIFunctionType type = ABIFunctionType(passedArgTypes_);
|
||||||
|
fun = vixl::Simulator::RedirectNativeFunction(fun, type);
|
||||||
|
#endif // JS_ARM64_SIMULATOR
|
||||||
|
|
||||||
|
uint32_t stackAdjust;
|
||||||
|
callWithABIPre(&stackAdjust);
|
||||||
|
call(ImmPtr(fun));
|
||||||
|
callWithABIPost(stackAdjust, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::callWithABI(Register fun, MoveOp::Type result)
|
||||||
|
{
|
||||||
|
movePtr(fun, ip0);
|
||||||
|
|
||||||
|
uint32_t stackAdjust;
|
||||||
|
callWithABIPre(&stackAdjust);
|
||||||
|
call(ip0);
|
||||||
|
callWithABIPost(stackAdjust, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::callWithABI(AsmJSImmPtr imm, MoveOp::Type result)
|
||||||
|
{
|
||||||
|
uint32_t stackAdjust;
|
||||||
|
callWithABIPre(&stackAdjust);
|
||||||
|
call(imm);
|
||||||
|
callWithABIPost(stackAdjust, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::callWithABI(Address fun, MoveOp::Type result)
|
||||||
|
{
|
||||||
|
loadPtr(fun, ip0);
|
||||||
|
|
||||||
|
uint32_t stackAdjust;
|
||||||
|
callWithABIPre(&stackAdjust);
|
||||||
|
call(ip0);
|
||||||
|
callWithABIPost(stackAdjust, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::branchPtrInNurseryRange(Condition cond, Register ptr, Register temp,
|
||||||
|
Label* label)
|
||||||
|
{
|
||||||
|
MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
|
||||||
|
MOZ_ASSERT(ptr != temp);
|
||||||
|
MOZ_ASSERT(ptr != ScratchReg && ptr != ScratchReg2); // Both may be used internally.
|
||||||
|
MOZ_ASSERT(temp != ScratchReg && temp != ScratchReg2);
|
||||||
|
|
||||||
|
const Nursery& nursery = GetJitContext()->runtime->gcNursery();
|
||||||
|
movePtr(ImmWord(-ptrdiff_t(nursery.start())), temp);
|
||||||
|
addPtr(ptr, temp);
|
||||||
|
branchPtr(cond == Assembler::Equal ? Assembler::Below : Assembler::AboveOrEqual,
|
||||||
|
temp, ImmWord(nursery.nurserySize()), label);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp,
|
||||||
|
Label* label)
|
||||||
|
{
|
||||||
|
MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
|
||||||
|
MOZ_ASSERT(temp != ScratchReg && temp != ScratchReg2); // Both may be used internally.
|
||||||
|
|
||||||
|
// 'Value' representing the start of the nursery tagged as a JSObject
|
||||||
|
const Nursery& nursery = GetJitContext()->runtime->gcNursery();
|
||||||
|
Value start = ObjectValue(*reinterpret_cast<JSObject*>(nursery.start()));
|
||||||
|
|
||||||
|
movePtr(ImmWord(-ptrdiff_t(start.asRawBits())), temp);
|
||||||
|
addPtr(value.valueReg(), temp);
|
||||||
|
branchPtr(cond == Assembler::Equal ? Assembler::Below : Assembler::AboveOrEqual,
|
||||||
|
temp, ImmWord(nursery.nurserySize()), label);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::callAndPushReturnAddress(Label* label)
|
||||||
|
{
|
||||||
|
// FIXME: Jandem said he would refactor the code to avoid making
|
||||||
|
// this instruction required, but probably forgot about it.
|
||||||
|
// Instead of implementing this function, we should make it unnecessary.
|
||||||
|
Label ret;
|
||||||
|
{
|
||||||
|
vixl::UseScratchRegisterScope temps(this);
|
||||||
|
const ARMRegister scratch64 = temps.AcquireX();
|
||||||
|
|
||||||
|
Adr(scratch64, &ret);
|
||||||
|
asMasm().Push(scratch64.asUnsized());
|
||||||
|
}
|
||||||
|
|
||||||
|
Bl(label);
|
||||||
|
bind(&ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssemblerCompat::breakpoint()
|
||||||
|
{
|
||||||
|
static int code = 0xA77;
|
||||||
|
Brk((code++) & 0xffff);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===============================================================
|
||||||
|
// Stack manipulation functions.
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::reserveStack(uint32_t amount)
|
||||||
|
{
|
||||||
|
// TODO: This bumps |sp| every time we reserve using a second register.
|
||||||
|
// It would save some instructions if we had a fixed frame size.
|
||||||
|
vixl::MacroAssembler::Claim(Operand(amount));
|
||||||
|
adjustFrame(amount);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::PushRegsInMask(LiveRegisterSet set)
|
||||||
|
{
|
||||||
|
for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ) {
|
||||||
|
vixl::CPURegister src[4] = { vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg };
|
||||||
|
|
||||||
|
for (size_t i = 0; i < 4 && iter.more(); i++) {
|
||||||
|
src[i] = ARMRegister(*iter, 64);
|
||||||
|
++iter;
|
||||||
|
adjustFrame(8);
|
||||||
|
}
|
||||||
|
vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ) {
|
||||||
|
vixl::CPURegister src[4] = { vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg };
|
||||||
|
|
||||||
|
for (size_t i = 0; i < 4 && iter.more(); i++) {
|
||||||
|
src[i] = ARMFPRegister(*iter, 64);
|
||||||
|
++iter;
|
||||||
|
adjustFrame(8);
|
||||||
|
}
|
||||||
|
vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
|
||||||
|
{
|
||||||
|
// The offset of the data from the stack pointer.
|
||||||
|
uint32_t offset = 0;
|
||||||
|
|
||||||
|
for (FloatRegisterIterator iter(set.fpus().reduceSetForPush()); iter.more(); ) {
|
||||||
|
vixl::CPURegister dest[2] = { vixl::NoCPUReg, vixl::NoCPUReg };
|
||||||
|
uint32_t nextOffset = offset;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < 2 && iter.more(); i++) {
|
||||||
|
if (!ignore.has(*iter))
|
||||||
|
dest[i] = ARMFPRegister(*iter, 64);
|
||||||
|
++iter;
|
||||||
|
nextOffset += sizeof(double);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!dest[0].IsNone() && !dest[1].IsNone())
|
||||||
|
Ldp(dest[0], dest[1], MemOperand(GetStackPointer64(), offset));
|
||||||
|
else if (!dest[0].IsNone())
|
||||||
|
Ldr(dest[0], MemOperand(GetStackPointer64(), offset));
|
||||||
|
else if (!dest[1].IsNone())
|
||||||
|
Ldr(dest[1], MemOperand(GetStackPointer64(), offset + sizeof(double)));
|
||||||
|
|
||||||
|
offset = nextOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
MOZ_ASSERT(offset == set.fpus().getPushSizeInBytes());
|
||||||
|
|
||||||
|
for (GeneralRegisterIterator iter(set.gprs()); iter.more(); ) {
|
||||||
|
vixl::CPURegister dest[2] = { vixl::NoCPUReg, vixl::NoCPUReg };
|
||||||
|
uint32_t nextOffset = offset;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < 2 && iter.more(); i++) {
|
||||||
|
if (!ignore.has(*iter))
|
||||||
|
dest[i] = ARMRegister(*iter, 64);
|
||||||
|
++iter;
|
||||||
|
nextOffset += sizeof(uint64_t);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!dest[0].IsNone() && !dest[1].IsNone())
|
||||||
|
Ldp(dest[0], dest[1], MemOperand(GetStackPointer64(), offset));
|
||||||
|
else if (!dest[0].IsNone())
|
||||||
|
Ldr(dest[0], MemOperand(GetStackPointer64(), offset));
|
||||||
|
else if (!dest[1].IsNone())
|
||||||
|
Ldr(dest[1], MemOperand(GetStackPointer64(), offset + sizeof(uint64_t)));
|
||||||
|
|
||||||
|
offset = nextOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t bytesPushed = set.gprs().size() * sizeof(uint64_t) + set.fpus().getPushSizeInBytes();
|
||||||
|
MOZ_ASSERT(offset == bytesPushed);
|
||||||
|
freeStack(bytesPushed);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::Push(Register reg)
|
||||||
|
{
|
||||||
|
push(reg);
|
||||||
|
adjustFrame(sizeof(intptr_t));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::Push(const Imm32 imm)
|
||||||
|
{
|
||||||
|
push(imm);
|
||||||
|
adjustFrame(sizeof(intptr_t));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::Push(const ImmWord imm)
|
||||||
|
{
|
||||||
|
push(imm);
|
||||||
|
adjustFrame(sizeof(intptr_t));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::Push(const ImmPtr imm)
|
||||||
|
{
|
||||||
|
push(imm);
|
||||||
|
adjustFrame(sizeof(intptr_t));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::Push(const ImmGCPtr ptr)
|
||||||
|
{
|
||||||
|
push(ptr);
|
||||||
|
adjustFrame(sizeof(intptr_t));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::Push(FloatRegister f)
|
||||||
|
{
|
||||||
|
push(f);
|
||||||
|
adjustFrame(sizeof(double));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::Pop(const Register reg)
|
||||||
|
{
|
||||||
|
pop(reg);
|
||||||
|
adjustFrame(-1 * int64_t(sizeof(int64_t)));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MacroAssembler::Pop(const ValueOperand& val)
|
||||||
|
{
|
||||||
|
pop(val);
|
||||||
|
adjustFrame(-1 * int64_t(sizeof(int64_t)));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace jit
|
||||||
|
} // namespace js
|
3317
js/src/jit/arm64/MacroAssembler-arm64.h
Normal file
3317
js/src/jit/arm64/MacroAssembler-arm64.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -27,8 +27,7 @@
|
|||||||
#ifndef VIXL_A64_MACRO_ASSEMBLER_A64_H_
|
#ifndef VIXL_A64_MACRO_ASSEMBLER_A64_H_
|
||||||
#define VIXL_A64_MACRO_ASSEMBLER_A64_H_
|
#define VIXL_A64_MACRO_ASSEMBLER_A64_H_
|
||||||
|
|
||||||
// TODO: Re-enable once landed.
|
#include "jit/arm64/Assembler-arm64.h"
|
||||||
// #include "jit/arm64/Assembler-arm64.h"
|
|
||||||
|
|
||||||
#include "jit/arm64/vixl/Debugger-vixl.h"
|
#include "jit/arm64/vixl/Debugger-vixl.h"
|
||||||
#include "jit/arm64/vixl/Globals-vixl.h"
|
#include "jit/arm64/vixl/Globals-vixl.h"
|
||||||
|
Loading…
Reference in New Issue
Block a user