mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 1207827 - Remove ARM64 temporary offset buffers. r=nbp
The ARM64 assembler no longer needs to keep track of code offsets for later translation to 'final' offsets. The AssemblerBuffer offsets are directly usable now. Remove tmpDataRelocations_, tmpPreBarriers_, tmpJumpRelocations_, and the finalOffset() method.
This commit is contained in:
parent
ded97ec570
commit
b55878b885
@ -83,22 +83,13 @@ Assembler::finish()
|
||||
|
||||
// The jump relocation table starts with a fixed-width integer pointing
|
||||
// to the start of the extended jump table.
|
||||
if (tmpJumpRelocations_.length())
|
||||
jumpRelocations_.writeFixedUint32_t(toFinalOffset(ExtendedJumpTable_));
|
||||
|
||||
for (unsigned int i = 0; i < tmpJumpRelocations_.length(); i++) {
|
||||
JumpRelocation& reloc = tmpJumpRelocations_[i];
|
||||
|
||||
// Each entry in the relocations table is an (offset, extendedTableIndex) pair.
|
||||
jumpRelocations_.writeUnsigned(toFinalOffset(reloc.jump));
|
||||
jumpRelocations_.writeUnsigned(reloc.extendedTableIndex);
|
||||
// Space for this integer is allocated by Assembler::addJumpRelocation()
|
||||
// before writing the first entry.
|
||||
// Don't touch memory if we saw an OOM error.
|
||||
if (jumpRelocations_.length() && !oom()) {
|
||||
MOZ_ASSERT(jumpRelocations_.length() >= sizeof(uint32_t));
|
||||
*(uint32_t*)jumpRelocations_.buffer() = ExtendedJumpTable_.getOffset();
|
||||
}
|
||||
|
||||
for (unsigned int i = 0; i < tmpDataRelocations_.length(); i++)
|
||||
dataRelocations_.writeUnsigned(toFinalOffset(tmpDataRelocations_[i]));
|
||||
|
||||
for (unsigned int i = 0; i < tmpPreBarriers_.length(); i++)
|
||||
preBarriers_.writeUnsigned(toFinalOffset(tmpPreBarriers_[i]));
|
||||
}
|
||||
|
||||
BufferOffset
|
||||
@ -159,9 +150,9 @@ Assembler::executableCopy(uint8_t* buffer)
|
||||
}
|
||||
|
||||
Instruction* target = (Instruction*)rp.target;
|
||||
Instruction* branch = (Instruction*)(buffer + toFinalOffset(rp.offset));
|
||||
Instruction* branch = (Instruction*)(buffer + rp.offset.getOffset());
|
||||
JumpTableEntry* extendedJumpTable =
|
||||
reinterpret_cast<JumpTableEntry*>(buffer + toFinalOffset(ExtendedJumpTable_));
|
||||
reinterpret_cast<JumpTableEntry*>(buffer + ExtendedJumpTable_.getOffset());
|
||||
if (branch->BranchType() != vixl::UnknownBranchType) {
|
||||
if (branch->IsTargetReachable(target)) {
|
||||
branch->SetImmPCOffsetTarget(target);
|
||||
@ -301,8 +292,16 @@ Assembler::addJumpRelocation(BufferOffset src, Relocation::Kind reloc)
|
||||
// Only JITCODE relocations are patchable at runtime.
|
||||
MOZ_ASSERT(reloc == Relocation::JITCODE);
|
||||
|
||||
// Each relocation requires an entry in the extended jump table.
|
||||
tmpJumpRelocations_.append(JumpRelocation(src, pendingJumps_.length()));
|
||||
// The jump relocation table starts with a fixed-width integer pointing
|
||||
// to the start of the extended jump table. But, we don't know the
|
||||
// actual extended jump table offset yet, so write a 0 which we'll
|
||||
// patch later in Assembler::finish().
|
||||
if (!jumpRelocations_.length())
|
||||
jumpRelocations_.writeFixedUint32_t(0);
|
||||
|
||||
// Each entry in the table is an (offset, extendedTableIndex) pair.
|
||||
jumpRelocations_.writeUnsigned(src.getOffset());
|
||||
jumpRelocations_.writeUnsigned(pendingJumps_.length());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -342,11 +342,6 @@ class Assembler : public vixl::Assembler
|
||||
static void FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader& reader,
|
||||
const ObjectVector& nurseryObjects);
|
||||
|
||||
// Convert a BufferOffset to a final byte offset from the start of the code buffer.
|
||||
size_t toFinalOffset(BufferOffset offset) {
|
||||
return size_t(offset.getOffset());
|
||||
}
|
||||
|
||||
public:
|
||||
// A Jump table entry is 2 instructions, with 8 bytes of raw data
|
||||
static const size_t SizeOfJumpTableEntry = 16;
|
||||
@ -402,13 +397,6 @@ class Assembler : public vixl::Assembler
|
||||
{ }
|
||||
};
|
||||
|
||||
// Because ARM and A64 use a code buffer that allows for constant pool insertion,
|
||||
// the actual offset of each jump cannot be known until finalization.
|
||||
// These vectors store the WIP offsets.
|
||||
js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpDataRelocations_;
|
||||
js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpPreBarriers_;
|
||||
js::Vector<JumpRelocation, 0, SystemAllocPolicy> tmpJumpRelocations_;
|
||||
|
||||
// Structure for fixing up pc-relative loads/jumps when the machine
|
||||
// code gets moved (executable copy, gc, etc.).
|
||||
struct RelativePatch
|
||||
|
@ -2532,19 +2532,19 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
|
||||
// load: offset to the load instruction obtained by movePatchablePtr().
|
||||
void writeDataRelocation(ImmGCPtr ptr, BufferOffset load) {
|
||||
if (ptr.value)
|
||||
tmpDataRelocations_.append(load);
|
||||
dataRelocations_.writeUnsigned(load.getOffset());
|
||||
}
|
||||
void writeDataRelocation(const Value& val, BufferOffset load) {
|
||||
if (val.isMarkable()) {
|
||||
gc::Cell* cell = reinterpret_cast<gc::Cell*>(val.toGCThing());
|
||||
if (cell && gc::IsInsideNursery(cell))
|
||||
embedsNurseryPointers_ = true;
|
||||
tmpDataRelocations_.append(load);
|
||||
dataRelocations_.writeUnsigned(load.getOffset());
|
||||
}
|
||||
}
|
||||
|
||||
void writePrebarrierOffset(CodeOffsetLabel label) {
|
||||
tmpPreBarriers_.append(BufferOffset(label.offset()));
|
||||
preBarriers_.writeUnsigned(label.offset());
|
||||
}
|
||||
|
||||
void computeEffectiveAddress(const Address& address, Register dest) {
|
||||
|
Loading…
Reference in New Issue
Block a user