Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,438 @@
//===-- AArch64ELFObjectWriter.cpp - AArch64 ELF Writer -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file handles ELF-specific object emission, converting LLVM's internal
// fixups into the appropriate relocations.
//
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/AArch64FixupKinds.h"
#include "MCTargetDesc/AArch64MCExpr.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstdint>
using namespace llvm;
namespace {
class AArch64ELFObjectWriter : public MCELFObjectTargetWriter {
public:
AArch64ELFObjectWriter(uint8_t OSABI, bool IsLittleEndian, bool IsILP32);
~AArch64ELFObjectWriter() override = default;
protected:
unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
const MCFixup &Fixup, bool IsPCRel) const override;
bool IsILP32;
};
} // end anonymous namespace
AArch64ELFObjectWriter::AArch64ELFObjectWriter(uint8_t OSABI,
bool IsLittleEndian,
bool IsILP32)
: MCELFObjectTargetWriter(/*Is64Bit*/ true, OSABI, ELF::EM_AARCH64,
/*HasRelocationAddend*/ true),
IsILP32(IsILP32) {}
#define R_CLS(rtype) \
IsILP32 ? ELF::R_AARCH64_P32_##rtype : ELF::R_AARCH64_##rtype
#define BAD_ILP32_MOV(lp64rtype) \
"ILP32 absolute MOV relocation not " \
"supported (LP64 eqv: " #lp64rtype ")"
// assumes IsILP32 is true
static bool isNonILP32reloc(const MCFixup &Fixup,
AArch64MCExpr::VariantKind RefKind,
MCContext &Ctx) {
if ((unsigned)Fixup.getKind() != AArch64::fixup_aarch64_movw)
return false;
switch (RefKind) {
case AArch64MCExpr::VK_ABS_G3:
Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(MOVW_UABS_G3));
return true;
case AArch64MCExpr::VK_ABS_G2:
Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(MOVW_UABS_G2));
return true;
case AArch64MCExpr::VK_ABS_G2_S:
Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(MOVW_SABS_G2));
return true;
case AArch64MCExpr::VK_ABS_G2_NC:
Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(MOVW_UABS_G2_NC));
return true;
case AArch64MCExpr::VK_ABS_G1_S:
Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(MOVW_SABS_G1));
return true;
case AArch64MCExpr::VK_ABS_G1_NC:
Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(MOVW_UABS_G1_NC));
return true;
case AArch64MCExpr::VK_DTPREL_G2:
Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(TLSLD_MOVW_DTPREL_G2));
return true;
case AArch64MCExpr::VK_DTPREL_G1_NC:
Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(TLSLD_MOVW_DTPREL_G1_NC));
return true;
case AArch64MCExpr::VK_TPREL_G2:
Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(TLSLE_MOVW_TPREL_G2));
return true;
case AArch64MCExpr::VK_TPREL_G1_NC:
Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(TLSLE_MOVW_TPREL_G1_NC));
return true;
case AArch64MCExpr::VK_GOTTPREL_G1:
Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(TLSIE_MOVW_GOTTPREL_G1));
return true;
case AArch64MCExpr::VK_GOTTPREL_G0_NC:
Ctx.reportError(Fixup.getLoc(), BAD_ILP32_MOV(TLSIE_MOVW_GOTTPREL_G0_NC));
return true;
default:
return false;
}
return false;
}
unsigned AArch64ELFObjectWriter::getRelocType(MCContext &Ctx,
const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
AArch64MCExpr::VariantKind RefKind =
static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
bool IsNC = AArch64MCExpr::isNotChecked(RefKind);
assert((!Target.getSymA() ||
Target.getSymA()->getKind() == MCSymbolRefExpr::VK_None) &&
"Should only be expression-level modifiers here");
assert((!Target.getSymB() ||
Target.getSymB()->getKind() == MCSymbolRefExpr::VK_None) &&
"Should only be expression-level modifiers here");
if (IsPCRel) {
switch ((unsigned)Fixup.getKind()) {
case FK_Data_1:
Ctx.reportError(Fixup.getLoc(), "1-byte data relocations not supported");
return ELF::R_AARCH64_NONE;
case FK_Data_2:
return R_CLS(PREL16);
case FK_Data_4:
return R_CLS(PREL32);
case FK_Data_8:
if (IsILP32) {
Ctx.reportError(Fixup.getLoc(),
"ILP32 8 byte PC relative data "
"relocation not supported (LP64 eqv: PREL64)");
return ELF::R_AARCH64_NONE;
} else
return ELF::R_AARCH64_PREL64;
case AArch64::fixup_aarch64_pcrel_adr_imm21:
assert(SymLoc == AArch64MCExpr::VK_NONE && "unexpected ADR relocation");
return R_CLS(ADR_PREL_LO21);
case AArch64::fixup_aarch64_pcrel_adrp_imm21:
if (SymLoc == AArch64MCExpr::VK_ABS && !IsNC)
return R_CLS(ADR_PREL_PG_HI21);
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC) {
if (IsILP32) {
Ctx.reportError(Fixup.getLoc(),
"invalid fixup for 32-bit pcrel ADRP instruction "
"VK_ABS VK_NC");
return ELF::R_AARCH64_NONE;
} else {
return ELF::R_AARCH64_ADR_PREL_PG_HI21_NC;
}
}
if (SymLoc == AArch64MCExpr::VK_GOT && !IsNC)
return R_CLS(ADR_GOT_PAGE);
if (SymLoc == AArch64MCExpr::VK_GOTTPREL && !IsNC)
return R_CLS(TLSIE_ADR_GOTTPREL_PAGE21);
if (SymLoc == AArch64MCExpr::VK_TLSDESC && !IsNC)
return R_CLS(TLSDESC_ADR_PAGE21);
Ctx.reportError(Fixup.getLoc(),
"invalid symbol kind for ADRP relocation");
return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_pcrel_branch26:
return R_CLS(JUMP26);
case AArch64::fixup_aarch64_pcrel_call26:
return R_CLS(CALL26);
case AArch64::fixup_aarch64_ldr_pcrel_imm19:
if (SymLoc == AArch64MCExpr::VK_GOTTPREL)
return R_CLS(TLSIE_LD_GOTTPREL_PREL19);
return R_CLS(LD_PREL_LO19);
case AArch64::fixup_aarch64_pcrel_branch14:
return R_CLS(TSTBR14);
case AArch64::fixup_aarch64_pcrel_branch19:
return R_CLS(CONDBR19);
default:
Ctx.reportError(Fixup.getLoc(), "Unsupported pc-relative fixup kind");
return ELF::R_AARCH64_NONE;
}
} else {
if (IsILP32 && isNonILP32reloc(Fixup, RefKind, Ctx))
return ELF::R_AARCH64_NONE;
switch ((unsigned)Fixup.getKind()) {
case FK_Data_1:
Ctx.reportError(Fixup.getLoc(), "1-byte data relocations not supported");
return ELF::R_AARCH64_NONE;
case FK_Data_2:
return R_CLS(ABS16);
case FK_Data_4:
return R_CLS(ABS32);
case FK_Data_8:
if (IsILP32) {
Ctx.reportError(Fixup.getLoc(),
"ILP32 8 byte absolute data "
"relocation not supported (LP64 eqv: ABS64)");
return ELF::R_AARCH64_NONE;
} else
return ELF::R_AARCH64_ABS64;
case AArch64::fixup_aarch64_add_imm12:
if (RefKind == AArch64MCExpr::VK_DTPREL_HI12)
return R_CLS(TLSLD_ADD_DTPREL_HI12);
if (RefKind == AArch64MCExpr::VK_TPREL_HI12)
return R_CLS(TLSLE_ADD_TPREL_HI12);
if (RefKind == AArch64MCExpr::VK_DTPREL_LO12_NC)
return R_CLS(TLSLD_ADD_DTPREL_LO12_NC);
if (RefKind == AArch64MCExpr::VK_DTPREL_LO12)
return R_CLS(TLSLD_ADD_DTPREL_LO12);
if (RefKind == AArch64MCExpr::VK_TPREL_LO12_NC)
return R_CLS(TLSLE_ADD_TPREL_LO12_NC);
if (RefKind == AArch64MCExpr::VK_TPREL_LO12)
return R_CLS(TLSLE_ADD_TPREL_LO12);
if (RefKind == AArch64MCExpr::VK_TLSDESC_LO12)
return R_CLS(TLSDESC_ADD_LO12);
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return R_CLS(ADD_ABS_LO12_NC);
Ctx.reportError(Fixup.getLoc(),
"invalid fixup for add (uimm12) instruction");
return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_ldst_imm12_scale1:
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return R_CLS(LDST8_ABS_LO12_NC);
if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
return R_CLS(TLSLD_LDST8_DTPREL_LO12);
if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
return R_CLS(TLSLD_LDST8_DTPREL_LO12_NC);
if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
return R_CLS(TLSLE_LDST8_TPREL_LO12);
if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
return R_CLS(TLSLE_LDST8_TPREL_LO12_NC);
Ctx.reportError(Fixup.getLoc(),
"invalid fixup for 8-bit load/store instruction");
return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_ldst_imm12_scale2:
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return R_CLS(LDST16_ABS_LO12_NC);
if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
return R_CLS(TLSLD_LDST16_DTPREL_LO12);
if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
return R_CLS(TLSLD_LDST16_DTPREL_LO12_NC);
if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
return R_CLS(TLSLE_LDST16_TPREL_LO12);
if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
return R_CLS(TLSLE_LDST16_TPREL_LO12_NC);
Ctx.reportError(Fixup.getLoc(),
"invalid fixup for 16-bit load/store instruction");
return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_ldst_imm12_scale4:
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return R_CLS(LDST32_ABS_LO12_NC);
if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
return R_CLS(TLSLD_LDST32_DTPREL_LO12);
if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
return R_CLS(TLSLD_LDST32_DTPREL_LO12_NC);
if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
return R_CLS(TLSLE_LDST32_TPREL_LO12);
if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
return R_CLS(TLSLE_LDST32_TPREL_LO12_NC);
if (SymLoc == AArch64MCExpr::VK_GOT && IsNC) {
if (IsILP32) {
return ELF::R_AARCH64_P32_LD32_GOT_LO12_NC;
} else {
Ctx.reportError(Fixup.getLoc(),
"LP64 4 byte unchecked GOT load/store relocation "
"not supported (ILP32 eqv: LD32_GOT_LO12_NC");
return ELF::R_AARCH64_NONE;
}
}
if (SymLoc == AArch64MCExpr::VK_GOT && !IsNC) {
if (IsILP32) {
Ctx.reportError(Fixup.getLoc(),
"ILP32 4 byte checked GOT load/store relocation "
"not supported (unchecked eqv: LD32_GOT_LO12_NC)");
} else {
Ctx.reportError(Fixup.getLoc(),
"LP64 4 byte checked GOT load/store relocation "
"not supported (unchecked/ILP32 eqv: "
"LD32_GOT_LO12_NC)");
}
return ELF::R_AARCH64_NONE;
}
if (SymLoc == AArch64MCExpr::VK_GOTTPREL && IsNC) {
if (IsILP32) {
return ELF::R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC;
} else {
Ctx.reportError(Fixup.getLoc(),
"LP64 32-bit load/store "
"relocation not supported (ILP32 eqv: "
"TLSIE_LD32_GOTTPREL_LO12_NC)");
return ELF::R_AARCH64_NONE;
}
}
if (SymLoc == AArch64MCExpr::VK_TLSDESC && !IsNC) {
if (IsILP32) {
return ELF::R_AARCH64_P32_TLSDESC_LD32_LO12;
} else {
Ctx.reportError(Fixup.getLoc(),
"LP64 4 byte TLSDESC load/store relocation "
"not supported (ILP32 eqv: TLSDESC_LD64_LO12)");
return ELF::R_AARCH64_NONE;
}
}
Ctx.reportError(Fixup.getLoc(),
"invalid fixup for 32-bit load/store instruction "
"fixup_aarch64_ldst_imm12_scale4");
return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_ldst_imm12_scale8:
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return R_CLS(LDST64_ABS_LO12_NC);
if (SymLoc == AArch64MCExpr::VK_GOT && IsNC) {
if (!IsILP32) {
return ELF::R_AARCH64_LD64_GOT_LO12_NC;
} else {
Ctx.reportError(Fixup.getLoc(), "ILP32 64-bit load/store "
"relocation not supported (LP64 eqv: "
"LD64_GOT_LO12_NC)");
return ELF::R_AARCH64_NONE;
}
}
if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
return R_CLS(TLSLD_LDST64_DTPREL_LO12);
if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
return R_CLS(TLSLD_LDST64_DTPREL_LO12_NC);
if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
return R_CLS(TLSLE_LDST64_TPREL_LO12);
if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
return R_CLS(TLSLE_LDST64_TPREL_LO12_NC);
if (SymLoc == AArch64MCExpr::VK_GOTTPREL && IsNC) {
if (!IsILP32) {
return ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
} else {
Ctx.reportError(Fixup.getLoc(), "ILP32 64-bit load/store "
"relocation not supported (LP64 eqv: "
"TLSIE_LD64_GOTTPREL_LO12_NC)");
return ELF::R_AARCH64_NONE;
}
}
if (SymLoc == AArch64MCExpr::VK_TLSDESC) {
if (!IsILP32) {
return ELF::R_AARCH64_TLSDESC_LD64_LO12;
} else {
Ctx.reportError(Fixup.getLoc(), "ILP32 64-bit load/store "
"relocation not supported (LP64 eqv: "
"TLSDESC_LD64_LO12)");
return ELF::R_AARCH64_NONE;
}
}
Ctx.reportError(Fixup.getLoc(),
"invalid fixup for 64-bit load/store instruction");
return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_ldst_imm12_scale16:
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return R_CLS(LDST128_ABS_LO12_NC);
if (SymLoc == AArch64MCExpr::VK_DTPREL && !IsNC)
return R_CLS(TLSLD_LDST128_DTPREL_LO12);
if (SymLoc == AArch64MCExpr::VK_DTPREL && IsNC)
return R_CLS(TLSLD_LDST128_DTPREL_LO12_NC);
if (SymLoc == AArch64MCExpr::VK_TPREL && !IsNC)
return R_CLS(TLSLE_LDST128_TPREL_LO12);
if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
return R_CLS(TLSLE_LDST128_TPREL_LO12_NC);
Ctx.reportError(Fixup.getLoc(),
"invalid fixup for 128-bit load/store instruction");
return ELF::R_AARCH64_NONE;
// ILP32 case not reached here, tested with isNonILP32reloc
case AArch64::fixup_aarch64_movw:
if (RefKind == AArch64MCExpr::VK_ABS_G3)
return ELF::R_AARCH64_MOVW_UABS_G3;
if (RefKind == AArch64MCExpr::VK_ABS_G2)
return ELF::R_AARCH64_MOVW_UABS_G2;
if (RefKind == AArch64MCExpr::VK_ABS_G2_S)
return ELF::R_AARCH64_MOVW_SABS_G2;
if (RefKind == AArch64MCExpr::VK_ABS_G2_NC)
return ELF::R_AARCH64_MOVW_UABS_G2_NC;
if (RefKind == AArch64MCExpr::VK_ABS_G1)
return R_CLS(MOVW_UABS_G1);
if (RefKind == AArch64MCExpr::VK_ABS_G1_S)
return ELF::R_AARCH64_MOVW_SABS_G1;
if (RefKind == AArch64MCExpr::VK_ABS_G1_NC)
return ELF::R_AARCH64_MOVW_UABS_G1_NC;
if (RefKind == AArch64MCExpr::VK_ABS_G0)
return R_CLS(MOVW_UABS_G0);
if (RefKind == AArch64MCExpr::VK_ABS_G0_S)
return R_CLS(MOVW_SABS_G0);
if (RefKind == AArch64MCExpr::VK_ABS_G0_NC)
return R_CLS(MOVW_UABS_G0_NC);
if (RefKind == AArch64MCExpr::VK_DTPREL_G2)
return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G2;
if (RefKind == AArch64MCExpr::VK_DTPREL_G1)
return R_CLS(TLSLD_MOVW_DTPREL_G1);
if (RefKind == AArch64MCExpr::VK_DTPREL_G1_NC)
return ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC;
if (RefKind == AArch64MCExpr::VK_DTPREL_G0)
return R_CLS(TLSLD_MOVW_DTPREL_G0);
if (RefKind == AArch64MCExpr::VK_DTPREL_G0_NC)
return R_CLS(TLSLD_MOVW_DTPREL_G0_NC);
if (RefKind == AArch64MCExpr::VK_TPREL_G2)
return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G2;
if (RefKind == AArch64MCExpr::VK_TPREL_G1)
return R_CLS(TLSLE_MOVW_TPREL_G1);
if (RefKind == AArch64MCExpr::VK_TPREL_G1_NC)
return ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC;
if (RefKind == AArch64MCExpr::VK_TPREL_G0)
return R_CLS(TLSLE_MOVW_TPREL_G0);
if (RefKind == AArch64MCExpr::VK_TPREL_G0_NC)
return R_CLS(TLSLE_MOVW_TPREL_G0_NC);
if (RefKind == AArch64MCExpr::VK_GOTTPREL_G1)
return ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
if (RefKind == AArch64MCExpr::VK_GOTTPREL_G0_NC)
return ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
Ctx.reportError(Fixup.getLoc(),
"invalid fixup for movz/movk instruction");
return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_tlsdesc_call:
return R_CLS(TLSDESC_CALL);
default:
Ctx.reportError(Fixup.getLoc(), "Unknown ELF relocation type");
return ELF::R_AARCH64_NONE;
}
}
llvm_unreachable("Unimplemented fixup -> relocation");
}
std::unique_ptr<MCObjectWriter>
llvm::createAArch64ELFObjectWriter(raw_pwrite_stream &OS, uint8_t OSABI,
bool IsLittleEndian, bool IsILP32) {
auto MOTW =
llvm::make_unique<AArch64ELFObjectWriter>(OSABI, IsLittleEndian, IsILP32);
return createELFObjectWriter(std::move(MOTW), OS, IsLittleEndian);
}

View File

@ -0,0 +1,232 @@
//===- lib/MC/AArch64ELFStreamer.cpp - ELF Object Output for AArch64 ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file assembles .s files and emits AArch64 ELF .o object files. Different
// from generic ELF streamer in emitting mapping symbols ($x and $d) to delimit
// regions of data and code.
//
//===----------------------------------------------------------------------===//
#include "AArch64TargetStreamer.h"
#include "AArch64WinCOFFStreamer.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbolELF.h"
#include "llvm/MC/MCWinCOFFStreamer.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
namespace {
class AArch64ELFStreamer;
class AArch64TargetAsmStreamer : public AArch64TargetStreamer {
formatted_raw_ostream &OS;
void emitInst(uint32_t Inst) override;
public:
AArch64TargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
};
AArch64TargetAsmStreamer::AArch64TargetAsmStreamer(MCStreamer &S,
formatted_raw_ostream &OS)
: AArch64TargetStreamer(S), OS(OS) {}
void AArch64TargetAsmStreamer::emitInst(uint32_t Inst) {
OS << "\t.inst\t0x" << Twine::utohexstr(Inst) << "\n";
}
class AArch64TargetELFStreamer : public AArch64TargetStreamer {
private:
AArch64ELFStreamer &getStreamer();
void emitInst(uint32_t Inst) override;
public:
AArch64TargetELFStreamer(MCStreamer &S) : AArch64TargetStreamer(S) {}
};
/// Extend the generic ELFStreamer class so that it can emit mapping symbols at
/// the appropriate points in the object files. These symbols are defined in the
/// AArch64 ELF ABI:
/// infocenter.arm.com/help/topic/com.arm.doc.ihi0056a/IHI0056A_aaelf64.pdf
///
/// In brief: $x or $d should be emitted at the start of each contiguous region
/// of A64 code or data in a section. In practice, this emission does not rely
/// on explicit assembler directives but on inherent properties of the
/// directives doing the emission (e.g. ".byte" is data, "add x0, x0, x0" an
/// instruction).
///
/// As a result this system is orthogonal to the DataRegion infrastructure used
/// by MachO. Beware!
class AArch64ELFStreamer : public MCELFStreamer {
public:
friend class AArch64TargetELFStreamer;
AArch64ELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
raw_pwrite_stream &OS,
std::unique_ptr<MCCodeEmitter> Emitter)
: MCELFStreamer(Context, std::move(TAB), OS, std::move(Emitter)),
MappingSymbolCounter(0), LastEMS(EMS_None) {}
void ChangeSection(MCSection *Section, const MCExpr *Subsection) override {
// We have to keep track of the mapping symbol state of any sections we
// use. Each one should start off as EMS_None, which is provided as the
// default constructor by DenseMap::lookup.
LastMappingSymbols[getPreviousSection().first] = LastEMS;
LastEMS = LastMappingSymbols.lookup(Section);
MCELFStreamer::ChangeSection(Section, Subsection);
}
// Reset state between object emissions
void reset() override {
MappingSymbolCounter = 0;
MCELFStreamer::reset();
LastMappingSymbols.clear();
LastEMS = EMS_None;
}
/// This function is the one used to emit instruction data into the ELF
/// streamer. We override it to add the appropriate mapping symbol if
/// necessary.
void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
bool) override {
EmitA64MappingSymbol();
MCELFStreamer::EmitInstruction(Inst, STI);
}
/// Emit a 32-bit value as an instruction. This is only used for the .inst
/// directive, EmitInstruction should be used in other cases.
void emitInst(uint32_t Inst) {
char Buffer[4];
// We can't just use EmitIntValue here, as that will emit a data mapping
// symbol, and swap the endianness on big-endian systems (instructions are
// always little-endian).
for (unsigned I = 0; I < 4; ++I) {
Buffer[I] = uint8_t(Inst);
Inst >>= 8;
}
EmitA64MappingSymbol();
MCELFStreamer::EmitBytes(StringRef(Buffer, 4));
}
/// This is one of the functions used to emit data into an ELF section, so the
/// AArch64 streamer overrides it to add the appropriate mapping symbol ($d)
/// if necessary.
void EmitBytes(StringRef Data) override {
EmitDataMappingSymbol();
MCELFStreamer::EmitBytes(Data);
}
/// This is one of the functions used to emit data into an ELF section, so the
/// AArch64 streamer overrides it to add the appropriate mapping symbol ($d)
/// if necessary.
void EmitValueImpl(const MCExpr *Value, unsigned Size, SMLoc Loc) override {
EmitDataMappingSymbol();
MCELFStreamer::EmitValueImpl(Value, Size, Loc);
}
private:
enum ElfMappingSymbol {
EMS_None,
EMS_A64,
EMS_Data
};
void EmitDataMappingSymbol() {
if (LastEMS == EMS_Data)
return;
EmitMappingSymbol("$d");
LastEMS = EMS_Data;
}
void EmitA64MappingSymbol() {
if (LastEMS == EMS_A64)
return;
EmitMappingSymbol("$x");
LastEMS = EMS_A64;
}
void EmitMappingSymbol(StringRef Name) {
auto *Symbol = cast<MCSymbolELF>(getContext().getOrCreateSymbol(
Name + "." + Twine(MappingSymbolCounter++)));
EmitLabel(Symbol);
Symbol->setType(ELF::STT_NOTYPE);
Symbol->setBinding(ELF::STB_LOCAL);
Symbol->setExternal(false);
}
int64_t MappingSymbolCounter;
DenseMap<const MCSection *, ElfMappingSymbol> LastMappingSymbols;
ElfMappingSymbol LastEMS;
};
} // end anonymous namespace
AArch64ELFStreamer &AArch64TargetELFStreamer::getStreamer() {
return static_cast<AArch64ELFStreamer &>(Streamer);
}
void AArch64TargetELFStreamer::emitInst(uint32_t Inst) {
getStreamer().emitInst(Inst);
}
namespace llvm {
MCTargetStreamer *createAArch64AsmTargetStreamer(MCStreamer &S,
formatted_raw_ostream &OS,
MCInstPrinter *InstPrint,
bool isVerboseAsm) {
return new AArch64TargetAsmStreamer(S, OS);
}
MCELFStreamer *createAArch64ELFStreamer(MCContext &Context,
std::unique_ptr<MCAsmBackend> TAB,
raw_pwrite_stream &OS,
std::unique_ptr<MCCodeEmitter> Emitter,
bool RelaxAll) {
AArch64ELFStreamer *S =
new AArch64ELFStreamer(Context, std::move(TAB), OS, std::move(Emitter));
if (RelaxAll)
S->getAssembler().setRelaxAll(true);
return S;
}
MCTargetStreamer *
createAArch64ObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) {
const Triple &TT = STI.getTargetTriple();
if (TT.isOSBinFormatELF())
return new AArch64TargetELFStreamer(S);
if (TT.isOSBinFormatCOFF())
return new AArch64TargetWinCOFFStreamer(S);
return nullptr;
}
} // end namespace llvm

View File

@ -0,0 +1,28 @@
//===-- AArch64ELFStreamer.h - ELF Streamer for AArch64 ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements ELF streamer information for the AArch64 backend.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ELFSTREAMER_H
#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ELFSTREAMER_H
#include "llvm/MC/MCELFStreamer.h"
namespace llvm {
MCELFStreamer *createAArch64ELFStreamer(MCContext &Context,
std::unique_ptr<MCAsmBackend> TAB,
raw_pwrite_stream &OS,
std::unique_ptr<MCCodeEmitter> Emitter,
bool RelaxAll);
}
#endif

View File

@ -0,0 +1,70 @@
//===-- AArch64FixupKinds.h - AArch64 Specific Fixup Entries ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64FIXUPKINDS_H
#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64FIXUPKINDS_H
#include "llvm/MC/MCFixup.h"
namespace llvm {
namespace AArch64 {
enum Fixups {
// A 21-bit pc-relative immediate inserted into an ADR instruction.
fixup_aarch64_pcrel_adr_imm21 = FirstTargetFixupKind,
// A 21-bit pc-relative immediate inserted into an ADRP instruction.
fixup_aarch64_pcrel_adrp_imm21,
// 12-bit fixup for add/sub instructions. No alignment adjustment. All value
// bits are encoded.
fixup_aarch64_add_imm12,
// unsigned 12-bit fixups for load and store instructions.
fixup_aarch64_ldst_imm12_scale1,
fixup_aarch64_ldst_imm12_scale2,
fixup_aarch64_ldst_imm12_scale4,
fixup_aarch64_ldst_imm12_scale8,
fixup_aarch64_ldst_imm12_scale16,
// The high 19 bits of a 21-bit pc-relative immediate. Same encoding as
// fixup_aarch64_pcrel_adrhi, except this is used by pc-relative loads and
// generates relocations directly when necessary.
fixup_aarch64_ldr_pcrel_imm19,
// FIXME: comment
fixup_aarch64_movw,
// The high 14 bits of a 21-bit pc-relative immediate.
fixup_aarch64_pcrel_branch14,
// The high 19 bits of a 21-bit pc-relative immediate. Same encoding as
// fixup_aarch64_pcrel_adrhi, except this is use by b.cc and generates
// relocations directly when necessary.
fixup_aarch64_pcrel_branch19,
// The high 26 bits of a 28-bit pc-relative immediate.
fixup_aarch64_pcrel_branch26,
// The high 26 bits of a 28-bit pc-relative immediate. Distinguished from
// branch26 only on ELF.
fixup_aarch64_pcrel_call26,
// zero-space placeholder for the ELF R_AARCH64_TLSDESC_CALL relocation.
fixup_aarch64_tlsdesc_call,
// Marker
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
};
} // end namespace AArch64
} // end namespace llvm
#endif

View File

@ -0,0 +1,125 @@
//===-- AArch64MCAsmInfo.cpp - AArch64 asm properties ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the declarations of the AArch64MCAsmInfo properties.
//
//===----------------------------------------------------------------------===//
#include "AArch64MCAsmInfo.h"
#include "llvm/ADT/Triple.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
enum AsmWriterVariantTy {
Default = -1,
Generic = 0,
Apple = 1
};
static cl::opt<AsmWriterVariantTy> AsmWriterVariant(
"aarch64-neon-syntax", cl::init(Default),
cl::desc("Choose style of NEON code to emit from AArch64 backend:"),
cl::values(clEnumValN(Generic, "generic", "Emit generic NEON assembly"),
clEnumValN(Apple, "apple", "Emit Apple-style NEON assembly")));
AArch64MCAsmInfoDarwin::AArch64MCAsmInfoDarwin() {
// We prefer NEON instructions to be printed in the short, Apple-specific
// form when targeting Darwin.
AssemblerDialect = AsmWriterVariant == Default ? Apple : AsmWriterVariant;
PrivateGlobalPrefix = "L";
PrivateLabelPrefix = "L";
SeparatorString = "%%";
CommentString = ";";
CodePointerSize = CalleeSaveStackSlotSize = 8;
AlignmentIsInBytes = false;
UsesELFSectionDirectiveForBSS = true;
SupportsDebugInformation = true;
UseDataRegionDirectives = true;
ExceptionsType = ExceptionHandling::DwarfCFI;
}
const MCExpr *AArch64MCAsmInfoDarwin::getExprForPersonalitySymbol(
const MCSymbol *Sym, unsigned Encoding, MCStreamer &Streamer) const {
// On Darwin, we can reference dwarf symbols with foo@GOT-., which
// is an indirect pc-relative reference. The default implementation
// won't reference using the GOT, so we need this target-specific
// version.
MCContext &Context = Streamer.getContext();
const MCExpr *Res =
MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOT, Context);
MCSymbol *PCSym = Context.createTempSymbol();
Streamer.EmitLabel(PCSym);
const MCExpr *PC = MCSymbolRefExpr::create(PCSym, Context);
return MCBinaryExpr::createSub(Res, PC, Context);
}
AArch64MCAsmInfoELF::AArch64MCAsmInfoELF(const Triple &T) {
if (T.getArch() == Triple::aarch64_be)
IsLittleEndian = false;
// We prefer NEON instructions to be printed in the generic form when
// targeting ELF.
AssemblerDialect = AsmWriterVariant == Default ? Generic : AsmWriterVariant;
CodePointerSize = 8;
// ".comm align is in bytes but .align is pow-2."
AlignmentIsInBytes = false;
CommentString = "//";
PrivateGlobalPrefix = ".L";
PrivateLabelPrefix = ".L";
Code32Directive = ".code\t32";
Data16bitsDirective = "\t.hword\t";
Data32bitsDirective = "\t.word\t";
Data64bitsDirective = "\t.xword\t";
UseDataRegionDirectives = false;
WeakRefDirective = "\t.weak\t";
SupportsDebugInformation = true;
// Exceptions handling
ExceptionsType = ExceptionHandling::DwarfCFI;
UseIntegratedAssembler = true;
HasIdentDirective = true;
}
AArch64MCAsmInfoCOFF::AArch64MCAsmInfoCOFF() {
PrivateGlobalPrefix = ".L";
PrivateLabelPrefix = ".L";
Data16bitsDirective = "\t.hword\t";
Data32bitsDirective = "\t.word\t";
Data64bitsDirective = "\t.xword\t";
AlignmentIsInBytes = false;
SupportsDebugInformation = true;
CodePointerSize = 8;
}
AArch64MCAsmInfoMicrosoftCOFF::AArch64MCAsmInfoMicrosoftCOFF() {
CommentString = ";";
ExceptionsType = ExceptionHandling::WinEH;
}
AArch64MCAsmInfoGNUCOFF::AArch64MCAsmInfoGNUCOFF() {
CommentString = "//";
ExceptionsType = ExceptionHandling::DwarfCFI;
}

View File

@ -0,0 +1,51 @@
//=====-- AArch64MCAsmInfo.h - AArch64 asm properties ---------*- C++ -*--====//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the AArch64MCAsmInfo class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCASMINFO_H
#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCASMINFO_H
#include "llvm/MC/MCAsmInfoCOFF.h"
#include "llvm/MC/MCAsmInfoDarwin.h"
#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
class MCStreamer;
class Target;
class Triple;
struct AArch64MCAsmInfoDarwin : public MCAsmInfoDarwin {
explicit AArch64MCAsmInfoDarwin();
const MCExpr *
getExprForPersonalitySymbol(const MCSymbol *Sym, unsigned Encoding,
MCStreamer &Streamer) const override;
};
struct AArch64MCAsmInfoELF : public MCAsmInfoELF {
explicit AArch64MCAsmInfoELF(const Triple &T);
};
struct AArch64MCAsmInfoCOFF : public MCAsmInfoCOFF {
explicit AArch64MCAsmInfoCOFF();
};
struct AArch64MCAsmInfoMicrosoftCOFF : public AArch64MCAsmInfoCOFF {
explicit AArch64MCAsmInfoMicrosoftCOFF();
};
struct AArch64MCAsmInfoGNUCOFF : public AArch64MCAsmInfoCOFF {
explicit AArch64MCAsmInfoGNUCOFF();
};
} // namespace llvm
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,145 @@
//===-- AArch64MCExpr.cpp - AArch64 specific MC expression classes --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of the assembly expression modifiers
// accepted by the AArch64 architecture (e.g. ":lo12:", ":gottprel_g1:", ...).
//
//===----------------------------------------------------------------------===//
#include "AArch64MCExpr.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbolELF.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Object/ELF.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
#define DEBUG_TYPE "aarch64symbolrefexpr"
const AArch64MCExpr *AArch64MCExpr::create(const MCExpr *Expr, VariantKind Kind,
MCContext &Ctx) {
return new (Ctx) AArch64MCExpr(Expr, Kind);
}
StringRef AArch64MCExpr::getVariantKindName() const {
switch (static_cast<uint32_t>(getKind())) {
case VK_CALL: return "";
case VK_LO12: return ":lo12:";
case VK_ABS_G3: return ":abs_g3:";
case VK_ABS_G2: return ":abs_g2:";
case VK_ABS_G2_S: return ":abs_g2_s:";
case VK_ABS_G2_NC: return ":abs_g2_nc:";
case VK_ABS_G1: return ":abs_g1:";
case VK_ABS_G1_S: return ":abs_g1_s:";
case VK_ABS_G1_NC: return ":abs_g1_nc:";
case VK_ABS_G0: return ":abs_g0:";
case VK_ABS_G0_S: return ":abs_g0_s:";
case VK_ABS_G0_NC: return ":abs_g0_nc:";
case VK_DTPREL_G2: return ":dtprel_g2:";
case VK_DTPREL_G1: return ":dtprel_g1:";
case VK_DTPREL_G1_NC: return ":dtprel_g1_nc:";
case VK_DTPREL_G0: return ":dtprel_g0:";
case VK_DTPREL_G0_NC: return ":dtprel_g0_nc:";
case VK_DTPREL_HI12: return ":dtprel_hi12:";
case VK_DTPREL_LO12: return ":dtprel_lo12:";
case VK_DTPREL_LO12_NC: return ":dtprel_lo12_nc:";
case VK_TPREL_G2: return ":tprel_g2:";
case VK_TPREL_G1: return ":tprel_g1:";
case VK_TPREL_G1_NC: return ":tprel_g1_nc:";
case VK_TPREL_G0: return ":tprel_g0:";
case VK_TPREL_G0_NC: return ":tprel_g0_nc:";
case VK_TPREL_HI12: return ":tprel_hi12:";
case VK_TPREL_LO12: return ":tprel_lo12:";
case VK_TPREL_LO12_NC: return ":tprel_lo12_nc:";
case VK_TLSDESC_LO12: return ":tlsdesc_lo12:";
case VK_ABS_PAGE: return "";
case VK_ABS_PAGE_NC: return ":pg_hi21_nc:";
case VK_GOT_PAGE: return ":got:";
case VK_GOT_LO12: return ":got_lo12:";
case VK_GOTTPREL_PAGE: return ":gottprel:";
case VK_GOTTPREL_LO12_NC: return ":gottprel_lo12:";
case VK_GOTTPREL_G1: return ":gottprel_g1:";
case VK_GOTTPREL_G0_NC: return ":gottprel_g0_nc:";
case VK_TLSDESC: return "";
case VK_TLSDESC_PAGE: return ":tlsdesc:";
default:
llvm_unreachable("Invalid ELF symbol kind");
}
}
void AArch64MCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
if (getKind() != VK_NONE)
OS << getVariantKindName();
Expr->print(OS, MAI);
}
void AArch64MCExpr::visitUsedExpr(MCStreamer &Streamer) const {
Streamer.visitUsedExpr(*getSubExpr());
}
MCFragment *AArch64MCExpr::findAssociatedFragment() const {
llvm_unreachable("FIXME: what goes here?");
}
bool AArch64MCExpr::evaluateAsRelocatableImpl(MCValue &Res,
const MCAsmLayout *Layout,
const MCFixup *Fixup) const {
if (!getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup))
return false;
Res =
MCValue::get(Res.getSymA(), Res.getSymB(), Res.getConstant(), getKind());
return true;
}
static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) {
switch (Expr->getKind()) {
case MCExpr::Target:
llvm_unreachable("Can't handle nested target expression");
break;
case MCExpr::Constant:
break;
case MCExpr::Binary: {
const MCBinaryExpr *BE = cast<MCBinaryExpr>(Expr);
fixELFSymbolsInTLSFixupsImpl(BE->getLHS(), Asm);
fixELFSymbolsInTLSFixupsImpl(BE->getRHS(), Asm);
break;
}
case MCExpr::SymbolRef: {
// We're known to be under a TLS fixup, so any symbol should be
// modified. There should be only one.
const MCSymbolRefExpr &SymRef = *cast<MCSymbolRefExpr>(Expr);
cast<MCSymbolELF>(SymRef.getSymbol()).setType(ELF::STT_TLS);
break;
}
case MCExpr::Unary:
fixELFSymbolsInTLSFixupsImpl(cast<MCUnaryExpr>(Expr)->getSubExpr(), Asm);
break;
}
}
void AArch64MCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
switch (getSymbolLoc(Kind)) {
default:
return;
case VK_DTPREL:
case VK_GOTTPREL:
case VK_TPREL:
case VK_TLSDESC:
break;
}
fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm);
}

View File

@ -0,0 +1,168 @@
//=--- AArch64MCExpr.h - AArch64 specific MC expression classes ---*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes AArch64-specific MCExprs, used for modifiers like
// ":lo12:" or ":gottprel_g1:".
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCEXPR_H
#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCEXPR_H
#include "llvm/MC/MCExpr.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
class AArch64MCExpr : public MCTargetExpr {
public:
enum VariantKind {
VK_NONE = 0x000,
// Symbol locations specifying (roughly speaking) what calculation should be
// performed to construct the final address for the relocated
// symbol. E.g. direct, via the GOT, ...
VK_ABS = 0x001,
VK_SABS = 0x002,
VK_GOT = 0x003,
VK_DTPREL = 0x004,
VK_GOTTPREL = 0x005,
VK_TPREL = 0x006,
VK_TLSDESC = 0x007,
VK_SymLocBits = 0x00f,
// Variants specifying which part of the final address calculation is
// used. E.g. the low 12 bits for an ADD/LDR, the middle 16 bits for a
// MOVZ/MOVK.
VK_PAGE = 0x010,
VK_PAGEOFF = 0x020,
VK_HI12 = 0x030,
VK_G0 = 0x040,
VK_G1 = 0x050,
VK_G2 = 0x060,
VK_G3 = 0x070,
VK_AddressFragBits = 0x0f0,
// Whether the final relocation is a checked one (where a linker should
// perform a range-check on the final address) or not. Note that this field
// is unfortunately sometimes omitted from the assembly syntax. E.g. :lo12:
// on its own is a non-checked relocation. We side with ELF on being
// explicit about this!
VK_NC = 0x100,
// Convenience definitions for referring to specific textual representations
// of relocation specifiers. Note that this means the "_NC" is sometimes
// omitted in line with assembly syntax here (VK_LO12 rather than VK_LO12_NC
// since a user would write ":lo12:").
VK_CALL = VK_ABS,
VK_ABS_PAGE = VK_ABS | VK_PAGE,
VK_ABS_PAGE_NC = VK_ABS | VK_PAGE | VK_NC,
VK_ABS_G3 = VK_ABS | VK_G3,
VK_ABS_G2 = VK_ABS | VK_G2,
VK_ABS_G2_S = VK_SABS | VK_G2,
VK_ABS_G2_NC = VK_ABS | VK_G2 | VK_NC,
VK_ABS_G1 = VK_ABS | VK_G1,
VK_ABS_G1_S = VK_SABS | VK_G1,
VK_ABS_G1_NC = VK_ABS | VK_G1 | VK_NC,
VK_ABS_G0 = VK_ABS | VK_G0,
VK_ABS_G0_S = VK_SABS | VK_G0,
VK_ABS_G0_NC = VK_ABS | VK_G0 | VK_NC,
VK_LO12 = VK_ABS | VK_PAGEOFF | VK_NC,
VK_GOT_LO12 = VK_GOT | VK_PAGEOFF | VK_NC,
VK_GOT_PAGE = VK_GOT | VK_PAGE,
VK_DTPREL_G2 = VK_DTPREL | VK_G2,
VK_DTPREL_G1 = VK_DTPREL | VK_G1,
VK_DTPREL_G1_NC = VK_DTPREL | VK_G1 | VK_NC,
VK_DTPREL_G0 = VK_DTPREL | VK_G0,
VK_DTPREL_G0_NC = VK_DTPREL | VK_G0 | VK_NC,
VK_DTPREL_HI12 = VK_DTPREL | VK_HI12,
VK_DTPREL_LO12 = VK_DTPREL | VK_PAGEOFF,
VK_DTPREL_LO12_NC = VK_DTPREL | VK_PAGEOFF | VK_NC,
VK_GOTTPREL_PAGE = VK_GOTTPREL | VK_PAGE,
VK_GOTTPREL_LO12_NC = VK_GOTTPREL | VK_PAGEOFF | VK_NC,
VK_GOTTPREL_G1 = VK_GOTTPREL | VK_G1,
VK_GOTTPREL_G0_NC = VK_GOTTPREL | VK_G0 | VK_NC,
VK_TPREL_G2 = VK_TPREL | VK_G2,
VK_TPREL_G1 = VK_TPREL | VK_G1,
VK_TPREL_G1_NC = VK_TPREL | VK_G1 | VK_NC,
VK_TPREL_G0 = VK_TPREL | VK_G0,
VK_TPREL_G0_NC = VK_TPREL | VK_G0 | VK_NC,
VK_TPREL_HI12 = VK_TPREL | VK_HI12,
VK_TPREL_LO12 = VK_TPREL | VK_PAGEOFF,
VK_TPREL_LO12_NC = VK_TPREL | VK_PAGEOFF | VK_NC,
VK_TLSDESC_LO12 = VK_TLSDESC | VK_PAGEOFF,
VK_TLSDESC_PAGE = VK_TLSDESC | VK_PAGE,
VK_INVALID = 0xfff
};
private:
const MCExpr *Expr;
const VariantKind Kind;
explicit AArch64MCExpr(const MCExpr *Expr, VariantKind Kind)
: Expr(Expr), Kind(Kind) {}
public:
/// @name Construction
/// @{
static const AArch64MCExpr *create(const MCExpr *Expr, VariantKind Kind,
MCContext &Ctx);
/// @}
/// @name Accessors
/// @{
/// Get the kind of this expression.
VariantKind getKind() const { return Kind; }
/// Get the expression this modifier applies to.
const MCExpr *getSubExpr() const { return Expr; }
/// @}
/// @name VariantKind information extractors.
/// @{
static VariantKind getSymbolLoc(VariantKind Kind) {
return static_cast<VariantKind>(Kind & VK_SymLocBits);
}
static VariantKind getAddressFrag(VariantKind Kind) {
return static_cast<VariantKind>(Kind & VK_AddressFragBits);
}
static bool isNotChecked(VariantKind Kind) { return Kind & VK_NC; }
/// @}
/// Convert the variant kind into an ELF-appropriate modifier
/// (e.g. ":got:", ":lo12:").
StringRef getVariantKindName() const;
void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override;
void visitUsedExpr(MCStreamer &Streamer) const override;
MCFragment *findAssociatedFragment() const override;
bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout,
const MCFixup *Fixup) const override;
void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override;
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
}
static bool classof(const AArch64MCExpr *) { return true; }
};
} // end namespace llvm
#endif

View File

@ -0,0 +1,181 @@
//===-- AArch64MCTargetDesc.cpp - AArch64 Target Descriptions ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides AArch64 specific target descriptions.
//
//===----------------------------------------------------------------------===//
#include "AArch64MCTargetDesc.h"
#include "AArch64ELFStreamer.h"
#include "AArch64MCAsmInfo.h"
#include "AArch64WinCOFFStreamer.h"
#include "InstPrinter/AArch64InstPrinter.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
#define GET_INSTRINFO_MC_DESC
#include "AArch64GenInstrInfo.inc"
#define GET_SUBTARGETINFO_MC_DESC
#include "AArch64GenSubtargetInfo.inc"
#define GET_REGINFO_MC_DESC
#include "AArch64GenRegisterInfo.inc"
static MCInstrInfo *createAArch64MCInstrInfo() {
MCInstrInfo *X = new MCInstrInfo();
InitAArch64MCInstrInfo(X);
return X;
}
static MCSubtargetInfo *
createAArch64MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
if (CPU.empty())
CPU = "generic";
return createAArch64MCSubtargetInfoImpl(TT, CPU, FS);
}
void AArch64_MC::initLLVMToCVRegMapping(MCRegisterInfo *MRI) {
for (unsigned Reg = AArch64::NoRegister + 1;
Reg < AArch64::NUM_TARGET_REGS; ++Reg) {
unsigned CV = MRI->getEncodingValue(Reg);
MRI->mapLLVMRegToCVReg(Reg, CV);
}
}
static MCRegisterInfo *createAArch64MCRegisterInfo(const Triple &Triple) {
MCRegisterInfo *X = new MCRegisterInfo();
InitAArch64MCRegisterInfo(X, AArch64::LR);
AArch64_MC::initLLVMToCVRegMapping(X);
return X;
}
static MCAsmInfo *createAArch64MCAsmInfo(const MCRegisterInfo &MRI,
const Triple &TheTriple) {
MCAsmInfo *MAI;
if (TheTriple.isOSBinFormatMachO())
MAI = new AArch64MCAsmInfoDarwin();
else if (TheTriple.isWindowsMSVCEnvironment())
MAI = new AArch64MCAsmInfoMicrosoftCOFF();
else if (TheTriple.isOSBinFormatCOFF())
MAI = new AArch64MCAsmInfoGNUCOFF();
else {
assert(TheTriple.isOSBinFormatELF() && "Invalid target");
MAI = new AArch64MCAsmInfoELF(TheTriple);
}
// Initial state of the frame pointer is SP.
unsigned Reg = MRI.getDwarfRegNum(AArch64::SP, true);
MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 0);
MAI->addInitialFrameState(Inst);
return MAI;
}
static MCInstPrinter *createAArch64MCInstPrinter(const Triple &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
const MCInstrInfo &MII,
const MCRegisterInfo &MRI) {
if (SyntaxVariant == 0)
return new AArch64InstPrinter(MAI, MII, MRI);
if (SyntaxVariant == 1)
return new AArch64AppleInstPrinter(MAI, MII, MRI);
return nullptr;
}
static MCStreamer *createELFStreamer(const Triple &T, MCContext &Ctx,
std::unique_ptr<MCAsmBackend> &&TAB,
raw_pwrite_stream &OS,
std::unique_ptr<MCCodeEmitter> &&Emitter,
bool RelaxAll) {
return createAArch64ELFStreamer(Ctx, std::move(TAB), OS, std::move(Emitter),
RelaxAll);
}
static MCStreamer *createMachOStreamer(MCContext &Ctx,
std::unique_ptr<MCAsmBackend> &&TAB,
raw_pwrite_stream &OS,
std::unique_ptr<MCCodeEmitter> &&Emitter,
bool RelaxAll,
bool DWARFMustBeAtTheEnd) {
return createMachOStreamer(Ctx, std::move(TAB), OS, std::move(Emitter),
RelaxAll, DWARFMustBeAtTheEnd,
/*LabelSections*/ true);
}
static MCStreamer *
createWinCOFFStreamer(MCContext &Ctx, std::unique_ptr<MCAsmBackend> &&TAB,
raw_pwrite_stream &OS,
std::unique_ptr<MCCodeEmitter> &&Emitter, bool RelaxAll,
bool IncrementalLinkerCompatible) {
return createAArch64WinCOFFStreamer(Ctx, std::move(TAB), OS,
std::move(Emitter), RelaxAll,
IncrementalLinkerCompatible);
}
static MCInstrAnalysis *createAArch64InstrAnalysis(const MCInstrInfo *Info) {
return new MCInstrAnalysis(Info);
}
// Force static initialization.
extern "C" void LLVMInitializeAArch64TargetMC() {
for (Target *T : {&getTheAArch64leTarget(), &getTheAArch64beTarget(),
&getTheARM64Target()}) {
// Register the MC asm info.
RegisterMCAsmInfoFn X(*T, createAArch64MCAsmInfo);
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(*T, createAArch64MCInstrInfo);
// Register the MC register info.
TargetRegistry::RegisterMCRegInfo(*T, createAArch64MCRegisterInfo);
// Register the MC subtarget info.
TargetRegistry::RegisterMCSubtargetInfo(*T, createAArch64MCSubtargetInfo);
// Register the MC instruction analyzer.
TargetRegistry::RegisterMCInstrAnalysis(*T, createAArch64InstrAnalysis);
// Register the MC Code Emitter
TargetRegistry::RegisterMCCodeEmitter(*T, createAArch64MCCodeEmitter);
// Register the obj streamers.
TargetRegistry::RegisterELFStreamer(*T, createELFStreamer);
TargetRegistry::RegisterMachOStreamer(*T, createMachOStreamer);
TargetRegistry::RegisterCOFFStreamer(*T, createWinCOFFStreamer);
// Register the obj target streamer.
TargetRegistry::RegisterObjectTargetStreamer(
*T, createAArch64ObjectTargetStreamer);
// Register the asm streamer.
TargetRegistry::RegisterAsmTargetStreamer(*T,
createAArch64AsmTargetStreamer);
// Register the MCInstPrinter.
TargetRegistry::RegisterMCInstPrinter(*T, createAArch64MCInstPrinter);
}
// Register the asm backend.
for (Target *T : {&getTheAArch64leTarget(), &getTheARM64Target()})
TargetRegistry::RegisterMCAsmBackend(*T, createAArch64leAsmBackend);
TargetRegistry::RegisterMCAsmBackend(getTheAArch64beTarget(),
createAArch64beAsmBackend);
}

View File

@ -0,0 +1,95 @@
//===-- AArch64MCTargetDesc.h - AArch64 Target Descriptions -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides AArch64 specific target descriptions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCTARGETDESC_H
#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCTARGETDESC_H
#include "llvm/Support/DataTypes.h"
#include <memory>
namespace llvm {
class formatted_raw_ostream;
class MCAsmBackend;
class MCCodeEmitter;
class MCContext;
class MCInstrInfo;
class MCInstPrinter;
class MCRegisterInfo;
class MCObjectWriter;
class MCStreamer;
class MCSubtargetInfo;
class MCTargetOptions;
class MCTargetStreamer;
class StringRef;
class Target;
class Triple;
class raw_ostream;
class raw_pwrite_stream;
Target &getTheAArch64leTarget();
Target &getTheAArch64beTarget();
Target &getTheARM64Target();
MCCodeEmitter *createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
MCContext &Ctx);
MCAsmBackend *createAArch64leAsmBackend(const Target &T,
const MCSubtargetInfo &STI,
const MCRegisterInfo &MRI,
const MCTargetOptions &Options);
MCAsmBackend *createAArch64beAsmBackend(const Target &T,
const MCSubtargetInfo &STI,
const MCRegisterInfo &MRI,
const MCTargetOptions &Options);
std::unique_ptr<MCObjectWriter>
createAArch64ELFObjectWriter(raw_pwrite_stream &OS, uint8_t OSABI,
bool IsLittleEndian, bool IsILP32);
std::unique_ptr<MCObjectWriter>
createAArch64MachObjectWriter(raw_pwrite_stream &OS, uint32_t CPUType,
uint32_t CPUSubtype);
std::unique_ptr<MCObjectWriter>
createAArch64WinCOFFObjectWriter(raw_pwrite_stream &OS);
MCTargetStreamer *createAArch64AsmTargetStreamer(MCStreamer &S,
formatted_raw_ostream &OS,
MCInstPrinter *InstPrint,
bool isVerboseAsm);
MCTargetStreamer *createAArch64ObjectTargetStreamer(MCStreamer &S,
const MCSubtargetInfo &STI);
namespace AArch64_MC {
void initLLVMToCVRegMapping(MCRegisterInfo *MRI);
}
} // End llvm namespace
// Defines symbolic names for AArch64 registers. This defines a mapping from
// register name to register number.
//
#define GET_REGINFO_ENUM
#include "AArch64GenRegisterInfo.inc"
// Defines symbolic names for the AArch64 instructions.
//
#define GET_INSTRINFO_ENUM
#include "AArch64GenInstrInfo.inc"
#define GET_SUBTARGETINFO_ENUM
#include "AArch64GenSubtargetInfo.inc"
#endif

View File

@ -0,0 +1,439 @@
//===-- AArch64MachObjectWriter.cpp - ARM Mach Object Writer --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/AArch64FixupKinds.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCFragment.h"
#include "llvm/MC/MCMachObjectWriter.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
#include <cstdint>
using namespace llvm;
namespace {
class AArch64MachObjectWriter : public MCMachObjectTargetWriter {
bool getAArch64FixupKindMachOInfo(const MCFixup &Fixup, unsigned &RelocType,
const MCSymbolRefExpr *Sym,
unsigned &Log2Size, const MCAssembler &Asm);
public:
AArch64MachObjectWriter(uint32_t CPUType, uint32_t CPUSubtype)
: MCMachObjectTargetWriter(true /* is64Bit */, CPUType, CPUSubtype) {}
void recordRelocation(MachObjectWriter *Writer, MCAssembler &Asm,
const MCAsmLayout &Layout, const MCFragment *Fragment,
const MCFixup &Fixup, MCValue Target,
uint64_t &FixedValue) override;
};
} // end anonymous namespace
bool AArch64MachObjectWriter::getAArch64FixupKindMachOInfo(
const MCFixup &Fixup, unsigned &RelocType, const MCSymbolRefExpr *Sym,
unsigned &Log2Size, const MCAssembler &Asm) {
RelocType = unsigned(MachO::ARM64_RELOC_UNSIGNED);
Log2Size = ~0U;
switch ((unsigned)Fixup.getKind()) {
default:
return false;
case FK_Data_1:
Log2Size = Log2_32(1);
return true;
case FK_Data_2:
Log2Size = Log2_32(2);
return true;
case FK_Data_4:
Log2Size = Log2_32(4);
if (Sym->getKind() == MCSymbolRefExpr::VK_GOT)
RelocType = unsigned(MachO::ARM64_RELOC_POINTER_TO_GOT);
return true;
case FK_Data_8:
Log2Size = Log2_32(8);
if (Sym->getKind() == MCSymbolRefExpr::VK_GOT)
RelocType = unsigned(MachO::ARM64_RELOC_POINTER_TO_GOT);
return true;
case AArch64::fixup_aarch64_add_imm12:
case AArch64::fixup_aarch64_ldst_imm12_scale1:
case AArch64::fixup_aarch64_ldst_imm12_scale2:
case AArch64::fixup_aarch64_ldst_imm12_scale4:
case AArch64::fixup_aarch64_ldst_imm12_scale8:
case AArch64::fixup_aarch64_ldst_imm12_scale16:
Log2Size = Log2_32(4);
switch (Sym->getKind()) {
default:
return false;
case MCSymbolRefExpr::VK_PAGEOFF:
RelocType = unsigned(MachO::ARM64_RELOC_PAGEOFF12);
return true;
case MCSymbolRefExpr::VK_GOTPAGEOFF:
RelocType = unsigned(MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12);
return true;
case MCSymbolRefExpr::VK_TLVPPAGEOFF:
RelocType = unsigned(MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12);
return true;
}
case AArch64::fixup_aarch64_pcrel_adrp_imm21:
Log2Size = Log2_32(4);
// This encompasses the relocation for the whole 21-bit value.
switch (Sym->getKind()) {
default:
Asm.getContext().reportError(Fixup.getLoc(),
"ADR/ADRP relocations must be GOT relative");
return false;
case MCSymbolRefExpr::VK_PAGE:
RelocType = unsigned(MachO::ARM64_RELOC_PAGE21);
return true;
case MCSymbolRefExpr::VK_GOTPAGE:
RelocType = unsigned(MachO::ARM64_RELOC_GOT_LOAD_PAGE21);
return true;
case MCSymbolRefExpr::VK_TLVPPAGE:
RelocType = unsigned(MachO::ARM64_RELOC_TLVP_LOAD_PAGE21);
return true;
}
return true;
case AArch64::fixup_aarch64_pcrel_branch26:
case AArch64::fixup_aarch64_pcrel_call26:
Log2Size = Log2_32(4);
RelocType = unsigned(MachO::ARM64_RELOC_BRANCH26);
return true;
}
}
static bool canUseLocalRelocation(const MCSectionMachO &Section,
const MCSymbol &Symbol, unsigned Log2Size) {
// Debug info sections can use local relocations.
if (Section.hasAttribute(MachO::S_ATTR_DEBUG))
return true;
// Otherwise, only pointer sized relocations are supported.
if (Log2Size != 3)
return false;
// But only if they don't point to a few forbidden sections.
if (!Symbol.isInSection())
return true;
const MCSectionMachO &RefSec = cast<MCSectionMachO>(Symbol.getSection());
if (RefSec.getType() == MachO::S_CSTRING_LITERALS)
return false;
if (RefSec.getSegmentName() == "__DATA" &&
RefSec.getSectionName() == "__objc_classrefs")
return false;
// FIXME: ld64 currently handles internal pointer-sized relocations
// incorrectly (applying the addend twice). We should be able to return true
// unconditionally by this point when that's fixed.
return false;
}
void AArch64MachObjectWriter::recordRelocation(
MachObjectWriter *Writer, MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup, MCValue Target,
uint64_t &FixedValue) {
unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
// See <reloc.h>.
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment);
unsigned Log2Size = 0;
int64_t Value = 0;
unsigned Index = 0;
unsigned Type = 0;
unsigned Kind = Fixup.getKind();
const MCSymbol *RelSymbol = nullptr;
FixupOffset += Fixup.getOffset();
// AArch64 pcrel relocation addends do not include the section offset.
if (IsPCRel)
FixedValue += FixupOffset;
// ADRP fixups use relocations for the whole symbol value and only
// put the addend in the instruction itself. Clear out any value the
// generic code figured out from the sybmol definition.
if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21)
FixedValue = 0;
// imm19 relocations are for conditional branches, which require
// assembler local symbols. If we got here, that's not what we have,
// so complain loudly.
if (Kind == AArch64::fixup_aarch64_pcrel_branch19) {
Asm.getContext().reportError(Fixup.getLoc(),
"conditional branch requires assembler-local"
" label. '" +
Target.getSymA()->getSymbol().getName() +
"' is external.");
return;
}
// 14-bit branch relocations should only target internal labels, and so
// should never get here.
if (Kind == AArch64::fixup_aarch64_pcrel_branch14) {
Asm.getContext().reportError(Fixup.getLoc(),
"Invalid relocation on conditional branch!");
return;
}
if (!getAArch64FixupKindMachOInfo(Fixup, Type, Target.getSymA(), Log2Size,
Asm)) {
Asm.getContext().reportError(Fixup.getLoc(), "unknown AArch64 fixup kind!");
return;
}
Value = Target.getConstant();
if (Target.isAbsolute()) { // constant
// FIXME: Should this always be extern?
// SymbolNum of 0 indicates the absolute section.
Type = MachO::ARM64_RELOC_UNSIGNED;
if (IsPCRel) {
Asm.getContext().reportError(Fixup.getLoc(),
"PC relative absolute relocation!");
return;
// FIXME: x86_64 sets the type to a branch reloc here. Should we do
// something similar?
}
} else if (Target.getSymB()) { // A - B + constant
const MCSymbol *A = &Target.getSymA()->getSymbol();
const MCSymbol *A_Base = Asm.getAtom(*A);
const MCSymbol *B = &Target.getSymB()->getSymbol();
const MCSymbol *B_Base = Asm.getAtom(*B);
// Check for "_foo@got - .", which comes through here as:
// Ltmp0:
// ... _foo@got - Ltmp0
if (Target.getSymA()->getKind() == MCSymbolRefExpr::VK_GOT &&
Target.getSymB()->getKind() == MCSymbolRefExpr::VK_None &&
Layout.getSymbolOffset(*B) ==
Layout.getFragmentOffset(Fragment) + Fixup.getOffset()) {
// SymB is the PC, so use a PC-rel pointer-to-GOT relocation.
Type = MachO::ARM64_RELOC_POINTER_TO_GOT;
IsPCRel = 1;
MachO::any_relocation_info MRE;
MRE.r_word0 = FixupOffset;
MRE.r_word1 = (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
Writer->addRelocation(A_Base, Fragment->getParent(), MRE);
return;
} else if (Target.getSymA()->getKind() != MCSymbolRefExpr::VK_None ||
Target.getSymB()->getKind() != MCSymbolRefExpr::VK_None) {
// Otherwise, neither symbol can be modified.
Asm.getContext().reportError(Fixup.getLoc(),
"unsupported relocation of modified symbol");
return;
}
// We don't support PCrel relocations of differences.
if (IsPCRel) {
Asm.getContext().reportError(Fixup.getLoc(),
"unsupported pc-relative relocation of "
"difference");
return;
}
// AArch64 always uses external relocations. If there is no symbol to use as
// a base address (a local symbol with no preceding non-local symbol),
// error out.
//
// FIXME: We should probably just synthesize an external symbol and use
// that.
if (!A_Base) {
Asm.getContext().reportError(
Fixup.getLoc(),
"unsupported relocation of local symbol '" + A->getName() +
"'. Must have non-local symbol earlier in section.");
return;
}
if (!B_Base) {
Asm.getContext().reportError(
Fixup.getLoc(),
"unsupported relocation of local symbol '" + B->getName() +
"'. Must have non-local symbol earlier in section.");
return;
}
if (A_Base == B_Base && A_Base) {
Asm.getContext().reportError(
Fixup.getLoc(), "unsupported relocation with identical base");
return;
}
Value += (!A->getFragment() ? 0 : Writer->getSymbolAddress(*A, Layout)) -
(!A_Base || !A_Base->getFragment() ? 0 : Writer->getSymbolAddress(
*A_Base, Layout));
Value -= (!B->getFragment() ? 0 : Writer->getSymbolAddress(*B, Layout)) -
(!B_Base || !B_Base->getFragment() ? 0 : Writer->getSymbolAddress(
*B_Base, Layout));
Type = MachO::ARM64_RELOC_UNSIGNED;
MachO::any_relocation_info MRE;
MRE.r_word0 = FixupOffset;
MRE.r_word1 = (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
Writer->addRelocation(A_Base, Fragment->getParent(), MRE);
RelSymbol = B_Base;
Type = MachO::ARM64_RELOC_SUBTRACTOR;
} else { // A + constant
const MCSymbol *Symbol = &Target.getSymA()->getSymbol();
const MCSectionMachO &Section =
static_cast<const MCSectionMachO &>(*Fragment->getParent());
bool CanUseLocalRelocation =
canUseLocalRelocation(Section, *Symbol, Log2Size);
if (Symbol->isTemporary() && (Value || !CanUseLocalRelocation)) {
const MCSection &Sec = Symbol->getSection();
if (!Asm.getContext().getAsmInfo()->isSectionAtomizableBySymbols(Sec))
Symbol->setUsedInReloc();
}
const MCSymbol *Base = Asm.getAtom(*Symbol);
// If the symbol is a variable and we weren't able to get a Base for it
// (i.e., it's not in the symbol table associated with a section) resolve
// the relocation based its expansion instead.
if (Symbol->isVariable() && !Base) {
// If the evaluation is an absolute value, just use that directly
// to keep things easy.
int64_t Res;
if (Symbol->getVariableValue()->evaluateAsAbsolute(
Res, Layout, Writer->getSectionAddressMap())) {
FixedValue = Res;
return;
}
// FIXME: Will the Target we already have ever have any data in it
// we need to preserve and merge with the new Target? How about
// the FixedValue?
if (!Symbol->getVariableValue()->evaluateAsRelocatable(Target, &Layout,
&Fixup)) {
Asm.getContext().reportError(Fixup.getLoc(),
"unable to resolve variable '" +
Symbol->getName() + "'");
return;
}
return recordRelocation(Writer, Asm, Layout, Fragment, Fixup, Target,
FixedValue);
}
// Relocations inside debug sections always use local relocations when
// possible. This seems to be done because the debugger doesn't fully
// understand relocation entries and expects to find values that
// have already been fixed up.
if (Symbol->isInSection()) {
if (Section.hasAttribute(MachO::S_ATTR_DEBUG))
Base = nullptr;
}
// AArch64 uses external relocations as much as possible. For debug
// sections, and for pointer-sized relocations (.quad), we allow section
// relocations. It's code sections that run into trouble.
if (Base) {
RelSymbol = Base;
// Add the local offset, if needed.
if (Base != Symbol)
Value +=
Layout.getSymbolOffset(*Symbol) - Layout.getSymbolOffset(*Base);
} else if (Symbol->isInSection()) {
if (!CanUseLocalRelocation) {
Asm.getContext().reportError(
Fixup.getLoc(),
"unsupported relocation of local symbol '" + Symbol->getName() +
"'. Must have non-local symbol earlier in section.");
return;
}
// Adjust the relocation to be section-relative.
// The index is the section ordinal (1-based).
const MCSection &Sec = Symbol->getSection();
Index = Sec.getOrdinal() + 1;
Value += Writer->getSymbolAddress(*Symbol, Layout);
if (IsPCRel)
Value -= Writer->getFragmentAddress(Fragment, Layout) +
Fixup.getOffset() + (1ULL << Log2Size);
} else {
// Resolve constant variables.
if (Symbol->isVariable()) {
int64_t Res;
if (Symbol->getVariableValue()->evaluateAsAbsolute(
Res, Layout, Writer->getSectionAddressMap())) {
FixedValue = Res;
return;
}
}
Asm.getContext().reportError(Fixup.getLoc(),
"unsupported relocation of variable '" +
Symbol->getName() + "'");
return;
}
}
// If the relocation kind is Branch26, Page21, or Pageoff12, any addend
// is represented via an Addend relocation, not encoded directly into
// the instruction.
if ((Type == MachO::ARM64_RELOC_BRANCH26 ||
Type == MachO::ARM64_RELOC_PAGE21 ||
Type == MachO::ARM64_RELOC_PAGEOFF12) &&
Value) {
assert((Value & 0xff000000) == 0 && "Added relocation out of range!");
MachO::any_relocation_info MRE;
MRE.r_word0 = FixupOffset;
MRE.r_word1 =
(Index << 0) | (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
Writer->addRelocation(RelSymbol, Fragment->getParent(), MRE);
// Now set up the Addend relocation.
Type = MachO::ARM64_RELOC_ADDEND;
Index = Value;
RelSymbol = nullptr;
IsPCRel = 0;
Log2Size = 2;
// Put zero into the instruction itself. The addend is in the relocation.
Value = 0;
}
// If there's any addend left to handle, encode it in the instruction.
FixedValue = Value;
// struct relocation_info (8 bytes)
MachO::any_relocation_info MRE;
MRE.r_word0 = FixupOffset;
MRE.r_word1 =
(Index << 0) | (IsPCRel << 24) | (Log2Size << 25) | (Type << 28);
Writer->addRelocation(RelSymbol, Fragment->getParent(), MRE);
}
std::unique_ptr<MCObjectWriter>
llvm::createAArch64MachObjectWriter(raw_pwrite_stream &OS, uint32_t CPUType,
uint32_t CPUSubtype) {
return createMachObjectWriter(
llvm::make_unique<AArch64MachObjectWriter>(CPUType, CPUSubtype), OS,
/*IsLittleEndian=*/true);
}

View File

@ -0,0 +1,42 @@
//===- AArch64TargetStreamer.cpp - AArch64TargetStreamer class ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the AArch64TargetStreamer class.
//
//===----------------------------------------------------------------------===//
#include "AArch64TargetStreamer.h"
#include "llvm/MC/ConstantPools.h"
using namespace llvm;
//
// AArch64TargetStreamer Implemenation
//
AArch64TargetStreamer::AArch64TargetStreamer(MCStreamer &S)
: MCTargetStreamer(S), ConstantPools(new AssemblerConstantPools()) {}
AArch64TargetStreamer::~AArch64TargetStreamer() = default;
// The constant pool handling is shared by all AArch64TargetStreamer
// implementations.
const MCExpr *AArch64TargetStreamer::addConstantPoolEntry(const MCExpr *Expr,
unsigned Size,
SMLoc Loc) {
return ConstantPools->addEntry(Streamer, Expr, Size, Loc);
}
void AArch64TargetStreamer::emitCurrentConstantPool() {
ConstantPools->emitForCurrentSection(Streamer);
}
// finish() - write out any non-empty assembler constant pools.
void AArch64TargetStreamer::finish() { ConstantPools->emitAll(Streamer); }
void AArch64TargetStreamer::emitInst(uint32_t Inst) {}

View File

@ -0,0 +1,42 @@
//===-- AArch64TargetStreamer.h - AArch64 Target Streamer ------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64TARGETSTREAMER_H
#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64TARGETSTREAMER_H
#include "llvm/MC/MCStreamer.h"
namespace llvm {
class AArch64TargetStreamer : public MCTargetStreamer {
public:
AArch64TargetStreamer(MCStreamer &S);
~AArch64TargetStreamer() override;
void finish() override;
/// Callback used to implement the ldr= pseudo.
/// Add a new entry to the constant pool for the current section and return an
/// MCExpr that can be used to refer to the constant pool location.
const MCExpr *addConstantPoolEntry(const MCExpr *, unsigned Size, SMLoc Loc);
/// Callback used to implemnt the .ltorg directive.
/// Emit contents of constant pool for the current section.
void emitCurrentConstantPool();
/// Callback used to implement the .inst directive.
virtual void emitInst(uint32_t Inst);
private:
std::unique_ptr<AssemblerConstantPools> ConstantPools;
};
} // end namespace llvm
#endif

View File

@ -0,0 +1,106 @@
//= AArch64WinCOFFObjectWriter.cpp - AArch64 Windows COFF Object Writer C++ =//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===---------------------------------------------------------------------===//
#include "MCTargetDesc/AArch64FixupKinds.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/COFF.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCValue.h"
#include "llvm/MC/MCWinCOFFObjectWriter.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
using namespace llvm;
namespace {
class AArch64WinCOFFObjectWriter : public MCWinCOFFObjectTargetWriter {
public:
AArch64WinCOFFObjectWriter()
: MCWinCOFFObjectTargetWriter(COFF::IMAGE_FILE_MACHINE_ARM64) {}
~AArch64WinCOFFObjectWriter() override = default;
unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
const MCFixup &Fixup, bool IsCrossSection,
const MCAsmBackend &MAB) const override;
bool recordRelocation(const MCFixup &) const override;
};
} // end anonymous namespace
unsigned AArch64WinCOFFObjectWriter::getRelocType(
MCContext &Ctx, const MCValue &Target, const MCFixup &Fixup,
bool IsCrossSection, const MCAsmBackend &MAB) const {
auto Modifier = Target.isAbsolute() ? MCSymbolRefExpr::VK_None
: Target.getSymA()->getKind();
switch (static_cast<unsigned>(Fixup.getKind())) {
default: {
const MCFixupKindInfo &Info = MAB.getFixupKindInfo(Fixup.getKind());
report_fatal_error(Twine("unsupported relocation type: ") + Info.Name);
}
case FK_Data_4:
switch (Modifier) {
default:
return COFF::IMAGE_REL_ARM64_ADDR32;
case MCSymbolRefExpr::VK_COFF_IMGREL32:
return COFF::IMAGE_REL_ARM64_ADDR32NB;
case MCSymbolRefExpr::VK_SECREL:
return COFF::IMAGE_REL_ARM64_SECREL;
}
case FK_Data_8:
return COFF::IMAGE_REL_ARM64_ADDR64;
case FK_SecRel_2:
return COFF::IMAGE_REL_ARM64_SECTION;
case FK_SecRel_4:
return COFF::IMAGE_REL_ARM64_SECREL;
case AArch64::fixup_aarch64_add_imm12:
return COFF::IMAGE_REL_ARM64_PAGEOFFSET_12A;
case AArch64::fixup_aarch64_ldst_imm12_scale1:
case AArch64::fixup_aarch64_ldst_imm12_scale2:
case AArch64::fixup_aarch64_ldst_imm12_scale4:
case AArch64::fixup_aarch64_ldst_imm12_scale8:
case AArch64::fixup_aarch64_ldst_imm12_scale16:
return COFF::IMAGE_REL_ARM64_PAGEOFFSET_12L;
case AArch64::fixup_aarch64_pcrel_adrp_imm21:
return COFF::IMAGE_REL_ARM64_PAGEBASE_REL21;
case AArch64::fixup_aarch64_pcrel_branch26:
case AArch64::fixup_aarch64_pcrel_call26:
return COFF::IMAGE_REL_ARM64_BRANCH26;
}
}
bool AArch64WinCOFFObjectWriter::recordRelocation(const MCFixup &Fixup) const {
return true;
}
namespace llvm {
std::unique_ptr<MCObjectWriter>
createAArch64WinCOFFObjectWriter(raw_pwrite_stream &OS) {
auto MOTW = llvm::make_unique<AArch64WinCOFFObjectWriter>();
return createWinCOFFObjectWriter(std::move(MOTW), OS);
}
} // end namespace llvm

View File

@ -0,0 +1,48 @@
//===-- AArch64WinCOFFStreamer.cpp - ARM Target WinCOFF Streamer ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "AArch64WinCOFFStreamer.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCCodeEmitter.h"
using namespace llvm;
namespace {
class AArch64WinCOFFStreamer : public MCWinCOFFStreamer {
public:
friend class AArch64TargetWinCOFFStreamer;
AArch64WinCOFFStreamer(MCContext &C, std::unique_ptr<MCAsmBackend> AB,
std::unique_ptr<MCCodeEmitter> CE,
raw_pwrite_stream &OS)
: MCWinCOFFStreamer(C, std::move(AB), std::move(CE), OS) {}
void FinishImpl() override;
};
void AArch64WinCOFFStreamer::FinishImpl() {
EmitFrames(nullptr);
MCWinCOFFStreamer::FinishImpl();
}
} // end anonymous namespace
namespace llvm {
MCWinCOFFStreamer *createAArch64WinCOFFStreamer(
MCContext &Context, std::unique_ptr<MCAsmBackend> MAB,
raw_pwrite_stream &OS, std::unique_ptr<MCCodeEmitter> Emitter,
bool RelaxAll, bool IncrementalLinkerCompatible) {
auto *S = new AArch64WinCOFFStreamer(Context, std::move(MAB),
std::move(Emitter), OS);
S->getAssembler().setIncrementalLinkerCompatible(IncrementalLinkerCompatible);
return S;
}
} // end llvm namespace

View File

@ -0,0 +1,42 @@
//===-- AArch64WinCOFFStreamer.h - WinCOFF Streamer for AArch64 -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements WinCOFF streamer information for the AArch64 backend.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64WINCOFFSTREAMER_H
#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64WINCOFFSTREAMER_H
#include "AArch64TargetStreamer.h"
#include "llvm/MC/MCWinCOFFStreamer.h"
namespace {
class AArch64WinCOFFStreamer;
class AArch64TargetWinCOFFStreamer : public llvm::AArch64TargetStreamer {
private:
AArch64WinCOFFStreamer &getStreamer();
public:
AArch64TargetWinCOFFStreamer(llvm::MCStreamer &S)
: AArch64TargetStreamer(S) {}
};
} // end anonymous namespace
namespace llvm {
MCWinCOFFStreamer *createAArch64WinCOFFStreamer(
MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
raw_pwrite_stream &OS, std::unique_ptr<MCCodeEmitter> Emitter,
bool RelaxAll, bool IncrementalLinkerCompatible);
} // end llvm namespace
#endif

View File

@ -0,0 +1,17 @@
add_llvm_library(LLVMAArch64Desc
AArch64AsmBackend.cpp
AArch64ELFObjectWriter.cpp
AArch64ELFStreamer.cpp
AArch64MCAsmInfo.cpp
AArch64MCCodeEmitter.cpp
AArch64MCExpr.cpp
AArch64MCTargetDesc.cpp
AArch64MachObjectWriter.cpp
AArch64TargetStreamer.cpp
AArch64WinCOFFObjectWriter.cpp
AArch64WinCOFFStreamer.cpp
)
add_dependencies(LLVMAArch64Desc AArch64CommonTableGen)
# Hack: we need to include 'main' target directory to grab private headers
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/.. ${CMAKE_CURRENT_BINARY_DIR}/..)

Some files were not shown because too many files have changed in this diff Show More