Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@ -0,0 +1,157 @@
//===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
#define LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
#include "clang/AST/CharUnits.h"
#include "clang/AST/Type.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Type.h"
namespace llvm {
class Value;
class LLVMContext;
class DataLayout;
class Type;
}
namespace clang {
class ASTContext;
class CodeGenOptions;
class TargetInfo;
namespace CodeGen {
class ABIArgInfo;
class Address;
class CGCXXABI;
class CGFunctionInfo;
class CodeGenFunction;
class CodeGenTypes;
class SwiftABIInfo;
namespace swiftcall {
class SwiftAggLowering;
}
// FIXME: All of this stuff should be part of the target interface
// somehow. It is currently here because it is not clear how to factor
// the targets to support this, since the Targets currently live in a
// layer below types n'stuff.
/// ABIInfo - Target specific hooks for defining how a type should be
/// passed or returned from functions.
class ABIInfo {
public:
CodeGen::CodeGenTypes &CGT;
protected:
llvm::CallingConv::ID RuntimeCC;
llvm::CallingConv::ID BuiltinCC;
public:
ABIInfo(CodeGen::CodeGenTypes &cgt)
: CGT(cgt),
RuntimeCC(llvm::CallingConv::C),
BuiltinCC(llvm::CallingConv::C) {}
virtual ~ABIInfo();
virtual bool supportsSwift() const { return false; }
CodeGen::CGCXXABI &getCXXABI() const;
ASTContext &getContext() const;
llvm::LLVMContext &getVMContext() const;
const llvm::DataLayout &getDataLayout() const;
const TargetInfo &getTarget() const;
const CodeGenOptions &getCodeGenOpts() const;
/// Return the calling convention to use for system runtime
/// functions.
llvm::CallingConv::ID getRuntimeCC() const {
return RuntimeCC;
}
/// Return the calling convention to use for compiler builtins
llvm::CallingConv::ID getBuiltinCC() const {
return BuiltinCC;
}
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
/// EmitVAArg - Emit the target dependent code to load a value of
/// \arg Ty from the va_list pointed to by \arg VAListAddr.
// FIXME: This is a gaping layering violation if we wanted to drop
// the ABI information any lower than CodeGen. Of course, for
// VAArg handling it has to be at this level; there is no way to
// abstract this out.
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF,
CodeGen::Address VAListAddr,
QualType Ty) const = 0;
bool isAndroid() const;
/// Emit the target dependent code to load a value of
/// \arg Ty from the \c __builtin_ms_va_list pointed to by \arg VAListAddr.
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF,
CodeGen::Address VAListAddr,
QualType Ty) const;
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const;
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base,
uint64_t Members) const;
virtual bool shouldSignExtUnsignedType(QualType Ty) const;
bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
uint64_t &Members) const;
/// A convenience method to return an indirect ABIArgInfo with an
/// expected alignment equal to the ABI alignment of the given type.
CodeGen::ABIArgInfo
getNaturalAlignIndirect(QualType Ty, bool ByRef = true,
bool Realign = false,
llvm::Type *Padding = nullptr) const;
CodeGen::ABIArgInfo
getNaturalAlignIndirectInReg(QualType Ty, bool Realign = false) const;
};
/// A refining implementation of ABIInfo for targets that support swiftcall.
///
/// If we find ourselves wanting multiple such refinements, they'll probably
/// be independent refinements, and we should probably find another way
/// to do it than simple inheritance.
class SwiftABIInfo : public ABIInfo {
public:
SwiftABIInfo(CodeGen::CodeGenTypes &cgt) : ABIInfo(cgt) {}
bool supportsSwift() const final override { return true; }
virtual bool shouldPassIndirectlyForSwift(CharUnits totalSize,
ArrayRef<llvm::Type*> types,
bool asReturnValue) const = 0;
virtual bool isLegalVectorTypeForSwift(CharUnits totalSize,
llvm::Type *eltTy,
unsigned elts) const;
virtual bool isSwiftErrorInRegister() const = 0;
static bool classof(const ABIInfo *info) {
return info->supportsSwift();
}
};
} // end namespace CodeGen
} // end namespace clang
#endif

View File

@ -0,0 +1,118 @@
//===-- Address.h - An aligned address -------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class provides a simple wrapper for a pair of a pointer and an
// alignment.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
#define LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
#include "llvm/IR/Constants.h"
#include "clang/AST/CharUnits.h"
namespace clang {
namespace CodeGen {
/// An aligned address.
class Address {
llvm::Value *Pointer;
CharUnits Alignment;
public:
Address(llvm::Value *pointer, CharUnits alignment)
: Pointer(pointer), Alignment(alignment) {
assert((!alignment.isZero() || pointer == nullptr) &&
"creating valid address with invalid alignment");
}
static Address invalid() { return Address(nullptr, CharUnits()); }
bool isValid() const { return Pointer != nullptr; }
llvm::Value *getPointer() const {
assert(isValid());
return Pointer;
}
/// Return the type of the pointer value.
llvm::PointerType *getType() const {
return llvm::cast<llvm::PointerType>(getPointer()->getType());
}
/// Return the type of the values stored in this address.
///
/// When IR pointer types lose their element type, we should simply
/// store it in Address instead for the convenience of writing code.
llvm::Type *getElementType() const {
return getType()->getElementType();
}
/// Return the address space that this address resides in.
unsigned getAddressSpace() const {
return getType()->getAddressSpace();
}
/// Return the IR name of the pointer value.
llvm::StringRef getName() const {
return getPointer()->getName();
}
/// Return the alignment of this pointer.
CharUnits getAlignment() const {
assert(isValid());
return Alignment;
}
};
/// A specialization of Address that requires the address to be an
/// LLVM Constant.
class ConstantAddress : public Address {
public:
ConstantAddress(llvm::Constant *pointer, CharUnits alignment)
: Address(pointer, alignment) {}
static ConstantAddress invalid() {
return ConstantAddress(nullptr, CharUnits());
}
llvm::Constant *getPointer() const {
return llvm::cast<llvm::Constant>(Address::getPointer());
}
ConstantAddress getBitCast(llvm::Type *ty) const {
return ConstantAddress(llvm::ConstantExpr::getBitCast(getPointer(), ty),
getAlignment());
}
ConstantAddress getElementBitCast(llvm::Type *ty) const {
return getBitCast(ty->getPointerTo(getAddressSpace()));
}
static bool isaImpl(Address addr) {
return llvm::isa<llvm::Constant>(addr.getPointer());
}
static ConstantAddress castImpl(Address addr) {
return ConstantAddress(llvm::cast<llvm::Constant>(addr.getPointer()),
addr.getAlignment());
}
};
}
// Present a minimal LLVM-like casting interface.
template <class U> inline U cast(CodeGen::Address addr) {
return U::castImpl(addr);
}
template <class U> inline bool isa(CodeGen::Address addr) {
return U::isaImpl(addr);
}
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,284 @@
//===-- CGBlocks.h - state for LLVM CodeGen for blocks ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is the internal state used for llvm translation for block literals.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGBLOCKS_H
#define LLVM_CLANG_LIB_CODEGEN_CGBLOCKS_H
#include "CGBuilder.h"
#include "CGCall.h"
#include "CGValue.h"
#include "CodeGenFunction.h"
#include "CodeGenTypes.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/Type.h"
#include "clang/Basic/TargetInfo.h"
namespace llvm {
class Constant;
class Function;
class GlobalValue;
class DataLayout;
class FunctionType;
class PointerType;
class Value;
class LLVMContext;
}
namespace clang {
namespace CodeGen {
class CGBlockInfo;
// Flags stored in __block variables.
enum BlockByrefFlags {
BLOCK_BYREF_HAS_COPY_DISPOSE = (1 << 25), // compiler
BLOCK_BYREF_LAYOUT_MASK = (0xF << 28), // compiler
BLOCK_BYREF_LAYOUT_EXTENDED = (1 << 28),
BLOCK_BYREF_LAYOUT_NON_OBJECT = (2 << 28),
BLOCK_BYREF_LAYOUT_STRONG = (3 << 28),
BLOCK_BYREF_LAYOUT_WEAK = (4 << 28),
BLOCK_BYREF_LAYOUT_UNRETAINED = (5 << 28)
};
enum BlockLiteralFlags {
BLOCK_HAS_COPY_DISPOSE = (1 << 25),
BLOCK_HAS_CXX_OBJ = (1 << 26),
BLOCK_IS_GLOBAL = (1 << 28),
BLOCK_USE_STRET = (1 << 29),
BLOCK_HAS_SIGNATURE = (1 << 30),
BLOCK_HAS_EXTENDED_LAYOUT = (1 << 31)
};
class BlockFlags {
uint32_t flags;
public:
BlockFlags(uint32_t flags) : flags(flags) {}
BlockFlags() : flags(0) {}
BlockFlags(BlockLiteralFlags flag) : flags(flag) {}
BlockFlags(BlockByrefFlags flag) : flags(flag) {}
uint32_t getBitMask() const { return flags; }
bool empty() const { return flags == 0; }
friend BlockFlags operator|(BlockFlags l, BlockFlags r) {
return BlockFlags(l.flags | r.flags);
}
friend BlockFlags &operator|=(BlockFlags &l, BlockFlags r) {
l.flags |= r.flags;
return l;
}
friend bool operator&(BlockFlags l, BlockFlags r) {
return (l.flags & r.flags);
}
bool operator==(BlockFlags r) {
return (flags == r.flags);
}
};
inline BlockFlags operator|(BlockLiteralFlags l, BlockLiteralFlags r) {
return BlockFlags(l) | BlockFlags(r);
}
enum BlockFieldFlag_t {
BLOCK_FIELD_IS_OBJECT = 0x03, /* id, NSObject, __attribute__((NSObject)),
block, ... */
BLOCK_FIELD_IS_BLOCK = 0x07, /* a block variable */
BLOCK_FIELD_IS_BYREF = 0x08, /* the on stack structure holding the __block
variable */
BLOCK_FIELD_IS_WEAK = 0x10, /* declared __weak, only used in byref copy
helpers */
BLOCK_FIELD_IS_ARC = 0x40, /* field has ARC-specific semantics */
BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
support routines */
BLOCK_BYREF_CURRENT_MAX = 256
};
class BlockFieldFlags {
uint32_t flags;
BlockFieldFlags(uint32_t flags) : flags(flags) {}
public:
BlockFieldFlags() : flags(0) {}
BlockFieldFlags(BlockFieldFlag_t flag) : flags(flag) {}
uint32_t getBitMask() const { return flags; }
bool empty() const { return flags == 0; }
/// Answers whether the flags indicate that this field is an object
/// or block pointer that requires _Block_object_assign/dispose.
bool isSpecialPointer() const { return flags & BLOCK_FIELD_IS_OBJECT; }
friend BlockFieldFlags operator|(BlockFieldFlags l, BlockFieldFlags r) {
return BlockFieldFlags(l.flags | r.flags);
}
friend BlockFieldFlags &operator|=(BlockFieldFlags &l, BlockFieldFlags r) {
l.flags |= r.flags;
return l;
}
friend bool operator&(BlockFieldFlags l, BlockFieldFlags r) {
return (l.flags & r.flags);
}
};
inline BlockFieldFlags operator|(BlockFieldFlag_t l, BlockFieldFlag_t r) {
return BlockFieldFlags(l) | BlockFieldFlags(r);
}
/// Information about the layout of a __block variable.
class BlockByrefInfo {
public:
llvm::StructType *Type;
unsigned FieldIndex;
CharUnits ByrefAlignment;
CharUnits FieldOffset;
};
/// CGBlockInfo - Information to generate a block literal.
class CGBlockInfo {
public:
/// Name - The name of the block, kindof.
StringRef Name;
/// The field index of 'this' within the block, if there is one.
unsigned CXXThisIndex;
class Capture {
uintptr_t Data;
EHScopeStack::stable_iterator Cleanup;
CharUnits::QuantityType Offset;
/// Type of the capture field. Normally, this is identical to the type of
/// the capture's VarDecl, but can be different if there is an enclosing
/// lambda.
QualType FieldType;
public:
bool isIndex() const { return (Data & 1) != 0; }
bool isConstant() const { return !isIndex(); }
unsigned getIndex() const {
assert(isIndex());
return Data >> 1;
}
CharUnits getOffset() const {
assert(isIndex());
return CharUnits::fromQuantity(Offset);
}
EHScopeStack::stable_iterator getCleanup() const {
assert(isIndex());
return Cleanup;
}
void setCleanup(EHScopeStack::stable_iterator cleanup) {
assert(isIndex());
Cleanup = cleanup;
}
llvm::Value *getConstant() const {
assert(isConstant());
return reinterpret_cast<llvm::Value*>(Data);
}
QualType fieldType() const {
return FieldType;
}
static Capture makeIndex(unsigned index, CharUnits offset,
QualType FieldType) {
Capture v;
v.Data = (index << 1) | 1;
v.Offset = offset.getQuantity();
v.FieldType = FieldType;
return v;
}
static Capture makeConstant(llvm::Value *value) {
Capture v;
v.Data = reinterpret_cast<uintptr_t>(value);
return v;
}
};
/// CanBeGlobal - True if the block can be global, i.e. it has
/// no non-constant captures.
bool CanBeGlobal : 1;
/// True if the block needs a custom copy or dispose function.
bool NeedsCopyDispose : 1;
/// HasCXXObject - True if the block's custom copy/dispose functions
/// need to be run even in GC mode.
bool HasCXXObject : 1;
/// UsesStret : True if the block uses an stret return. Mutable
/// because it gets set later in the block-creation process.
mutable bool UsesStret : 1;
/// HasCapturedVariableLayout : True if block has captured variables
/// and their layout meta-data has been generated.
bool HasCapturedVariableLayout : 1;
/// The mapping of allocated indexes within the block.
llvm::DenseMap<const VarDecl*, Capture> Captures;
Address LocalAddress;
llvm::StructType *StructureType;
const BlockDecl *Block;
const BlockExpr *BlockExpression;
CharUnits BlockSize;
CharUnits BlockAlign;
CharUnits CXXThisOffset;
// Offset of the gap caused by block header having a smaller
// alignment than the alignment of the block descriptor. This
// is the gap offset before the first capturued field.
CharUnits BlockHeaderForcedGapOffset;
// Gap size caused by aligning first field after block header.
// This could be zero if no forced alignment is required.
CharUnits BlockHeaderForcedGapSize;
/// An instruction which dominates the full-expression that the
/// block is inside.
llvm::Instruction *DominatingIP;
/// The next block in the block-info chain. Invalid if this block
/// info is not part of the CGF's block-info chain, which is true
/// if it corresponds to a global block or a block whose expression
/// has been encountered.
CGBlockInfo *NextBlockInfo;
const Capture &getCapture(const VarDecl *var) const {
return const_cast<CGBlockInfo*>(this)->getCapture(var);
}
Capture &getCapture(const VarDecl *var) {
llvm::DenseMap<const VarDecl*, Capture>::iterator
it = Captures.find(var);
assert(it != Captures.end() && "no entry for variable!");
return it->second;
}
const BlockDecl *getBlockDecl() const { return Block; }
const BlockExpr *getBlockExpr() const {
assert(BlockExpression);
assert(BlockExpression->getBlockDecl() == Block);
return BlockExpression;
}
CGBlockInfo(const BlockDecl *blockDecl, StringRef Name);
};
} // end namespace CodeGen
} // end namespace clang
#endif

View File

@ -0,0 +1,291 @@
//===-- CGBuilder.h - Choose IRBuilder implementation ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
#define LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IRBuilder.h"
#include "Address.h"
#include "CodeGenTypeCache.h"
namespace clang {
namespace CodeGen {
class CodeGenFunction;
/// \brief This is an IRBuilder insertion helper that forwards to
/// CodeGenFunction::InsertHelper, which adds necessary metadata to
/// instructions.
class CGBuilderInserter : protected llvm::IRBuilderDefaultInserter {
public:
CGBuilderInserter() = default;
explicit CGBuilderInserter(CodeGenFunction *CGF) : CGF(CGF) {}
protected:
/// \brief This forwards to CodeGenFunction::InsertHelper.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const;
private:
CodeGenFunction *CGF = nullptr;
};
typedef CGBuilderInserter CGBuilderInserterTy;
typedef llvm::IRBuilder<llvm::ConstantFolder, CGBuilderInserterTy>
CGBuilderBaseTy;
class CGBuilderTy : public CGBuilderBaseTy {
/// Storing a reference to the type cache here makes it a lot easier
/// to build natural-feeling, target-specific IR.
const CodeGenTypeCache &TypeCache;
public:
CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::LLVMContext &C)
: CGBuilderBaseTy(C), TypeCache(TypeCache) {}
CGBuilderTy(const CodeGenTypeCache &TypeCache,
llvm::LLVMContext &C, const llvm::ConstantFolder &F,
const CGBuilderInserterTy &Inserter)
: CGBuilderBaseTy(C, F, Inserter), TypeCache(TypeCache) {}
CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::Instruction *I)
: CGBuilderBaseTy(I), TypeCache(TypeCache) {}
CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::BasicBlock *BB)
: CGBuilderBaseTy(BB), TypeCache(TypeCache) {}
llvm::ConstantInt *getSize(CharUnits N) {
return llvm::ConstantInt::get(TypeCache.SizeTy, N.getQuantity());
}
llvm::ConstantInt *getSize(uint64_t N) {
return llvm::ConstantInt::get(TypeCache.SizeTy, N);
}
// Note that we intentionally hide the CreateLoad APIs that don't
// take an alignment.
llvm::LoadInst *CreateLoad(Address Addr, const llvm::Twine &Name = "") {
return CreateAlignedLoad(Addr.getPointer(),
Addr.getAlignment().getQuantity(),
Name);
}
llvm::LoadInst *CreateLoad(Address Addr, const char *Name) {
// This overload is required to prevent string literals from
// ending up in the IsVolatile overload.
return CreateAlignedLoad(Addr.getPointer(),
Addr.getAlignment().getQuantity(),
Name);
}
llvm::LoadInst *CreateLoad(Address Addr, bool IsVolatile,
const llvm::Twine &Name = "") {
return CreateAlignedLoad(Addr.getPointer(),
Addr.getAlignment().getQuantity(),
IsVolatile,
Name);
}
using CGBuilderBaseTy::CreateAlignedLoad;
llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
const llvm::Twine &Name = "") {
return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
}
llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
const char *Name) {
return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
}
llvm::LoadInst *CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr,
CharUnits Align,
const llvm::Twine &Name = "") {
assert(Addr->getType()->getPointerElementType() == Ty);
return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
}
// Note that we intentionally hide the CreateStore APIs that don't
// take an alignment.
llvm::StoreInst *CreateStore(llvm::Value *Val, Address Addr,
bool IsVolatile = false) {
return CreateAlignedStore(Val, Addr.getPointer(),
Addr.getAlignment().getQuantity(), IsVolatile);
}
using CGBuilderBaseTy::CreateAlignedStore;
llvm::StoreInst *CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr,
CharUnits Align, bool IsVolatile = false) {
return CreateAlignedStore(Val, Addr, Align.getQuantity(), IsVolatile);
}
// FIXME: these "default-aligned" APIs should be removed,
// but I don't feel like fixing all the builtin code right now.
llvm::StoreInst *CreateDefaultAlignedStore(llvm::Value *Val,
llvm::Value *Addr,
bool IsVolatile = false) {
return CGBuilderBaseTy::CreateStore(Val, Addr, IsVolatile);
}
/// Emit a load from an i1 flag variable.
llvm::LoadInst *CreateFlagLoad(llvm::Value *Addr,
const llvm::Twine &Name = "") {
assert(Addr->getType()->getPointerElementType() == getInt1Ty());
return CreateAlignedLoad(getInt1Ty(), Addr, CharUnits::One(), Name);
}
/// Emit a store to an i1 flag variable.
llvm::StoreInst *CreateFlagStore(bool Value, llvm::Value *Addr) {
assert(Addr->getType()->getPointerElementType() == getInt1Ty());
return CreateAlignedStore(getInt1(Value), Addr, CharUnits::One());
}
using CGBuilderBaseTy::CreateBitCast;
Address CreateBitCast(Address Addr, llvm::Type *Ty,
const llvm::Twine &Name = "") {
return Address(CreateBitCast(Addr.getPointer(), Ty, Name),
Addr.getAlignment());
}
using CGBuilderBaseTy::CreateAddrSpaceCast;
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty,
const llvm::Twine &Name = "") {
return Address(CreateAddrSpaceCast(Addr.getPointer(), Ty, Name),
Addr.getAlignment());
}
/// Cast the element type of the given address to a different type,
/// preserving information like the alignment and address space.
Address CreateElementBitCast(Address Addr, llvm::Type *Ty,
const llvm::Twine &Name = "") {
auto PtrTy = Ty->getPointerTo(Addr.getAddressSpace());
return CreateBitCast(Addr, PtrTy, Name);
}
using CGBuilderBaseTy::CreatePointerBitCastOrAddrSpaceCast;
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty,
const llvm::Twine &Name = "") {
llvm::Value *Ptr =
CreatePointerBitCastOrAddrSpaceCast(Addr.getPointer(), Ty, Name);
return Address(Ptr, Addr.getAlignment());
}
using CGBuilderBaseTy::CreateStructGEP;
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset,
const llvm::Twine &Name = "") {
return Address(CreateStructGEP(Addr.getElementType(),
Addr.getPointer(), Index, Name),
Addr.getAlignment().alignmentAtOffset(Offset));
}
Address CreateStructGEP(Address Addr, unsigned Index,
const llvm::StructLayout *Layout,
const llvm::Twine &Name = "") {
auto Offset = CharUnits::fromQuantity(Layout->getElementOffset(Index));
return CreateStructGEP(Addr, Index, Offset, Name);
}
/// Given
/// %addr = [n x T]* ...
/// produce
/// %name = getelementptr inbounds %addr, i64 0, i64 index
/// where i64 is actually the target word size.
///
/// This API assumes that drilling into an array like this is always
/// an inbounds operation.
///
/// \param EltSize - the size of the type T in bytes
Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize,
const llvm::Twine &Name = "") {
return Address(CreateInBoundsGEP(Addr.getPointer(),
{getSize(CharUnits::Zero()),
getSize(Index)},
Name),
Addr.getAlignment().alignmentAtOffset(Index * EltSize));
}
/// Given
/// %addr = T* ...
/// produce
/// %name = getelementptr inbounds %addr, i64 index
/// where i64 is actually the target word size.
///
/// \param EltSize - the size of the type T in bytes
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index,
CharUnits EltSize,
const llvm::Twine &Name = "") {
return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Index), Name),
Addr.getAlignment().alignmentAtOffset(Index * EltSize));
}
/// Given
/// %addr = T* ...
/// produce
/// %name = getelementptr inbounds %addr, i64 index
/// where i64 is actually the target word size.
///
/// \param EltSize - the size of the type T in bytes
Address CreateConstGEP(Address Addr, uint64_t Index, CharUnits EltSize,
const llvm::Twine &Name = "") {
return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Index), Name),
Addr.getAlignment().alignmentAtOffset(Index * EltSize));
}
/// Given a pointer to i8, adjust it by a given constant offset.
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Addr.getElementType() == TypeCache.Int8Ty);
return Address(CreateInBoundsGEP(Addr.getPointer(), getSize(Offset), Name),
Addr.getAlignment().alignmentAtOffset(Offset));
}
Address CreateConstByteGEP(Address Addr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Addr.getElementType() == TypeCache.Int8Ty);
return Address(CreateGEP(Addr.getPointer(), getSize(Offset), Name),
Addr.getAlignment().alignmentAtOffset(Offset));
}
llvm::Value *CreateConstInBoundsByteGEP(llvm::Value *Ptr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Ptr->getType()->getPointerElementType() == TypeCache.Int8Ty);
return CreateInBoundsGEP(Ptr, getSize(Offset), Name);
}
llvm::Value *CreateConstByteGEP(llvm::Value *Ptr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Ptr->getType()->getPointerElementType() == TypeCache.Int8Ty);
return CreateGEP(Ptr, getSize(Offset), Name);
}
using CGBuilderBaseTy::CreateMemCpy;
llvm::CallInst *CreateMemCpy(Address Dest, Address Src, llvm::Value *Size,
bool IsVolatile = false) {
auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size,
Align.getQuantity(), IsVolatile);
}
llvm::CallInst *CreateMemCpy(Address Dest, Address Src, uint64_t Size,
bool IsVolatile = false) {
auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size,
Align.getQuantity(), IsVolatile);
}
using CGBuilderBaseTy::CreateMemMove;
llvm::CallInst *CreateMemMove(Address Dest, Address Src, llvm::Value *Size,
bool IsVolatile = false) {
auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
return CreateMemMove(Dest.getPointer(), Src.getPointer(), Size,
Align.getQuantity(), IsVolatile);
}
using CGBuilderBaseTy::CreateMemSet;
llvm::CallInst *CreateMemSet(Address Dest, llvm::Value *Value,
llvm::Value *Size, bool IsVolatile = false) {
return CreateMemSet(Dest.getPointer(), Value, Size,
Dest.getAlignment().getQuantity(), IsVolatile);
}
};
} // end namespace CodeGen
} // end namespace clang
#endif

View File

@ -0,0 +1 @@
35ae114c4f25f77271d36ef6f5ab555a25c0b624

View File

@ -0,0 +1,379 @@
//===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides a class for CUDA code generation targeting the NVIDIA CUDA
// runtime library.
//
//===----------------------------------------------------------------------===//
#include "CGCUDARuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "clang/AST/Decl.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
using namespace clang;
using namespace CodeGen;
namespace {
class CGNVCUDARuntime : public CGCUDARuntime {
private:
llvm::IntegerType *IntTy, *SizeTy;
llvm::Type *VoidTy;
llvm::PointerType *CharPtrTy, *VoidPtrTy, *VoidPtrPtrTy;
/// Convenience reference to LLVM Context
llvm::LLVMContext &Context;
/// Convenience reference to the current module
llvm::Module &TheModule;
/// Keeps track of kernel launch stubs emitted in this module
llvm::SmallVector<llvm::Function *, 16> EmittedKernels;
llvm::SmallVector<std::pair<llvm::GlobalVariable *, unsigned>, 16> DeviceVars;
/// Keeps track of variables containing handles of GPU binaries. Populated by
/// ModuleCtorFunction() and used to create corresponding cleanup calls in
/// ModuleDtorFunction()
llvm::SmallVector<llvm::GlobalVariable *, 16> GpuBinaryHandles;
llvm::Constant *getSetupArgumentFn() const;
llvm::Constant *getLaunchFn() const;
/// Creates a function to register all kernel stubs generated in this module.
llvm::Function *makeRegisterGlobalsFn();
/// Helper function that generates a constant string and returns a pointer to
/// the start of the string. The result of this function can be used anywhere
/// where the C code specifies const char*.
llvm::Constant *makeConstantString(const std::string &Str,
const std::string &Name = "",
const std::string &SectionName = "",
unsigned Alignment = 0) {
llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
llvm::ConstantInt::get(SizeTy, 0)};
auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
llvm::GlobalVariable *GV =
cast<llvm::GlobalVariable>(ConstStr.getPointer());
if (!SectionName.empty())
GV->setSection(SectionName);
if (Alignment)
GV->setAlignment(Alignment);
return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
ConstStr.getPointer(), Zeros);
}
void emitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args);
public:
CGNVCUDARuntime(CodeGenModule &CGM);
void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override;
void registerDeviceVar(llvm::GlobalVariable &Var, unsigned Flags) override {
DeviceVars.push_back(std::make_pair(&Var, Flags));
}
/// Creates module constructor function
llvm::Function *makeModuleCtorFunction() override;
/// Creates module destructor function
llvm::Function *makeModuleDtorFunction() override;
};
}
CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
: CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
TheModule(CGM.getModule()) {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
IntTy = CGM.IntTy;
SizeTy = CGM.SizeTy;
VoidTy = CGM.VoidTy;
CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
VoidPtrPtrTy = VoidPtrTy->getPointerTo();
}
llvm::Constant *CGNVCUDARuntime::getSetupArgumentFn() const {
// cudaError_t cudaSetupArgument(void *, size_t, size_t)
llvm::Type *Params[] = {VoidPtrTy, SizeTy, SizeTy};
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy,
Params, false),
"cudaSetupArgument");
}
llvm::Constant *CGNVCUDARuntime::getLaunchFn() const {
// cudaError_t cudaLaunch(char *)
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
}
void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
FunctionArgList &Args) {
EmittedKernels.push_back(CGF.CurFn);
emitDeviceStubBody(CGF, Args);
}
void CGNVCUDARuntime::emitDeviceStubBody(CodeGenFunction &CGF,
FunctionArgList &Args) {
// Emit a call to cudaSetupArgument for each arg in Args.
llvm::Constant *cudaSetupArgFn = getSetupArgumentFn();
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
CharUnits Offset = CharUnits::Zero();
for (const VarDecl *A : Args) {
CharUnits TyWidth, TyAlign;
std::tie(TyWidth, TyAlign) =
CGM.getContext().getTypeInfoInChars(A->getType());
Offset = Offset.alignTo(TyAlign);
llvm::Value *Args[] = {
CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
VoidPtrTy),
llvm::ConstantInt::get(SizeTy, TyWidth.getQuantity()),
llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
};
llvm::CallSite CS = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
llvm::Value *CSZero = CGF.Builder.CreateICmpEQ(CS.getInstruction(), Zero);
llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
CGF.Builder.CreateCondBr(CSZero, NextBlock, EndBlock);
CGF.EmitBlock(NextBlock);
Offset += TyWidth;
}
// Emit the call to cudaLaunch
llvm::Constant *cudaLaunchFn = getLaunchFn();
llvm::Value *Arg = CGF.Builder.CreatePointerCast(CGF.CurFn, CharPtrTy);
CGF.EmitRuntimeCallOrInvoke(cudaLaunchFn, Arg);
CGF.EmitBranch(EndBlock);
CGF.EmitBlock(EndBlock);
}
/// Creates a function that sets up state on the host side for CUDA objects that
/// have a presence on both the host and device sides. Specifically, registers
/// the host side of kernel functions and device global variables with the CUDA
/// runtime.
/// \code
/// void __cuda_register_globals(void** GpuBinaryHandle) {
/// __cudaRegisterFunction(GpuBinaryHandle,Kernel0,...);
/// ...
/// __cudaRegisterFunction(GpuBinaryHandle,KernelM,...);
/// __cudaRegisterVar(GpuBinaryHandle, GlobalVar0, ...);
/// ...
/// __cudaRegisterVar(GpuBinaryHandle, GlobalVarN, ...);
/// }
/// \endcode
llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
// No need to register anything
if (EmittedKernels.empty() && DeviceVars.empty())
return nullptr;
llvm::Function *RegisterKernelsFunc = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
llvm::GlobalValue::InternalLinkage, "__cuda_register_globals", &TheModule);
llvm::BasicBlock *EntryBB =
llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc);
CGBuilderTy Builder(CGM, Context);
Builder.SetInsertPoint(EntryBB);
// void __cudaRegisterFunction(void **, const char *, char *, const char *,
// int, uint3*, uint3*, dim3*, dim3*, int*)
llvm::Type *RegisterFuncParams[] = {
VoidPtrPtrTy, CharPtrTy, CharPtrTy, CharPtrTy, IntTy,
VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()};
llvm::Constant *RegisterFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, RegisterFuncParams, false),
"__cudaRegisterFunction");
// Extract GpuBinaryHandle passed as the first argument passed to
// __cuda_register_globals() and generate __cudaRegisterFunction() call for
// each emitted kernel.
llvm::Argument &GpuBinaryHandlePtr = *RegisterKernelsFunc->arg_begin();
for (llvm::Function *Kernel : EmittedKernels) {
llvm::Constant *KernelName = makeConstantString(Kernel->getName());
llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy);
llvm::Value *Args[] = {
&GpuBinaryHandlePtr, Builder.CreateBitCast(Kernel, VoidPtrTy),
KernelName, KernelName, llvm::ConstantInt::get(IntTy, -1), NullPtr,
NullPtr, NullPtr, NullPtr,
llvm::ConstantPointerNull::get(IntTy->getPointerTo())};
Builder.CreateCall(RegisterFunc, Args);
}
// void __cudaRegisterVar(void **, char *, char *, const char *,
// int, int, int, int)
llvm::Type *RegisterVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
CharPtrTy, IntTy, IntTy,
IntTy, IntTy};
llvm::Constant *RegisterVar = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, RegisterVarParams, false),
"__cudaRegisterVar");
for (auto &Pair : DeviceVars) {
llvm::GlobalVariable *Var = Pair.first;
unsigned Flags = Pair.second;
llvm::Constant *VarName = makeConstantString(Var->getName());
uint64_t VarSize =
CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
llvm::Value *Args[] = {
&GpuBinaryHandlePtr,
Builder.CreateBitCast(Var, VoidPtrTy),
VarName,
VarName,
llvm::ConstantInt::get(IntTy, (Flags & ExternDeviceVar) ? 1 : 0),
llvm::ConstantInt::get(IntTy, VarSize),
llvm::ConstantInt::get(IntTy, (Flags & ConstantDeviceVar) ? 1 : 0),
llvm::ConstantInt::get(IntTy, 0)};
Builder.CreateCall(RegisterVar, Args);
}
Builder.CreateRetVoid();
return RegisterKernelsFunc;
}
/// Creates a global constructor function for the module:
/// \code
/// void __cuda_module_ctor(void*) {
/// Handle0 = __cudaRegisterFatBinary(GpuBinaryBlob0);
/// __cuda_register_globals(Handle0);
/// ...
/// HandleN = __cudaRegisterFatBinary(GpuBinaryBlobN);
/// __cuda_register_globals(HandleN);
/// }
/// \endcode
llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// No need to generate ctors/dtors if there are no GPU binaries.
if (CGM.getCodeGenOpts().CudaGpuBinaryFileNames.empty())
return nullptr;
// void __cuda_register_globals(void* handle);
llvm::Function *RegisterGlobalsFunc = makeRegisterGlobalsFn();
// void ** __cudaRegisterFatBinary(void *);
llvm::Constant *RegisterFatbinFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false),
"__cudaRegisterFatBinary");
// struct { int magic, int version, void * gpu_binary, void * dont_care };
llvm::StructType *FatbinWrapperTy =
llvm::StructType::get(IntTy, IntTy, VoidPtrTy, VoidPtrTy);
llvm::Function *ModuleCtorFunc = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
llvm::GlobalValue::InternalLinkage, "__cuda_module_ctor", &TheModule);
llvm::BasicBlock *CtorEntryBB =
llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc);
CGBuilderTy CtorBuilder(CGM, Context);
CtorBuilder.SetInsertPoint(CtorEntryBB);
// For each GPU binary, register it with the CUDA runtime and store returned
// handle in a global variable and save the handle in GpuBinaryHandles vector
// to be cleaned up in destructor on exit. Then associate all known kernels
// with the GPU binary handle so CUDA runtime can figure out what to call on
// the GPU side.
for (const std::string &GpuBinaryFileName :
CGM.getCodeGenOpts().CudaGpuBinaryFileNames) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> GpuBinaryOrErr =
llvm::MemoryBuffer::getFileOrSTDIN(GpuBinaryFileName);
if (std::error_code EC = GpuBinaryOrErr.getError()) {
CGM.getDiags().Report(diag::err_cannot_open_file) << GpuBinaryFileName
<< EC.message();
continue;
}
const char *FatbinConstantName =
CGM.getTriple().isMacOSX() ? "__NV_CUDA,__nv_fatbin" : ".nv_fatbin";
// NVIDIA's cuobjdump looks for fatbins in this section.
const char *FatbinSectionName =
CGM.getTriple().isMacOSX() ? "__NV_CUDA,__fatbin" : ".nvFatBinSegment";
// Create initialized wrapper structure that points to the loaded GPU binary
ConstantInitBuilder Builder(CGM);
auto Values = Builder.beginStruct(FatbinWrapperTy);
// Fatbin wrapper magic.
Values.addInt(IntTy, 0x466243b1);
// Fatbin version.
Values.addInt(IntTy, 1);
// Data.
Values.add(makeConstantString(GpuBinaryOrErr.get()->getBuffer(),
"", FatbinConstantName, 8));
// Unused in fatbin v1.
Values.add(llvm::ConstantPointerNull::get(VoidPtrTy));
llvm::GlobalVariable *FatbinWrapper =
Values.finishAndCreateGlobal("__cuda_fatbin_wrapper",
CGM.getPointerAlign(),
/*constant*/ true);
FatbinWrapper->setSection(FatbinSectionName);
// GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper);
llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
RegisterFatbinFunc,
CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
llvm::GlobalVariable *GpuBinaryHandle = new llvm::GlobalVariable(
TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage,
llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle");
CtorBuilder.CreateAlignedStore(RegisterFatbinCall, GpuBinaryHandle,
CGM.getPointerAlign());
// Call __cuda_register_globals(GpuBinaryHandle);
if (RegisterGlobalsFunc)
CtorBuilder.CreateCall(RegisterGlobalsFunc, RegisterFatbinCall);
// Save GpuBinaryHandle so we can unregister it in destructor.
GpuBinaryHandles.push_back(GpuBinaryHandle);
}
CtorBuilder.CreateRetVoid();
return ModuleCtorFunc;
}
/// Creates a global destructor function that unregisters all GPU code blobs
/// registered by constructor.
/// \code
/// void __cuda_module_dtor(void*) {
/// __cudaUnregisterFatBinary(Handle0);
/// ...
/// __cudaUnregisterFatBinary(HandleN);
/// }
/// \endcode
llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
// No need for destructor if we don't have handles to unregister.
if (GpuBinaryHandles.empty())
return nullptr;
// void __cudaUnregisterFatBinary(void ** handle);
llvm::Constant *UnregisterFatbinFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
"__cudaUnregisterFatBinary");
llvm::Function *ModuleDtorFunc = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
llvm::GlobalValue::InternalLinkage, "__cuda_module_dtor", &TheModule);
llvm::BasicBlock *DtorEntryBB =
llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc);
CGBuilderTy DtorBuilder(CGM, Context);
DtorBuilder.SetInsertPoint(DtorEntryBB);
for (llvm::GlobalVariable *GpuBinaryHandle : GpuBinaryHandles) {
auto HandleValue =
DtorBuilder.CreateAlignedLoad(GpuBinaryHandle, CGM.getPointerAlign());
DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
}
DtorBuilder.CreateRetVoid();
return ModuleDtorFunc;
}
CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
return new CGNVCUDARuntime(CGM);
}

View File

@ -0,0 +1,46 @@
//===----- CGCUDARuntime.cpp - Interface to CUDA Runtimes -----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides an abstract class for CUDA code generation. Concrete
// subclasses of this implement code generation for specific CUDA
// runtime libraries.
//
//===----------------------------------------------------------------------===//
#include "CGCUDARuntime.h"
#include "CGCall.h"
#include "CodeGenFunction.h"
#include "clang/AST/Decl.h"
#include "clang/AST/ExprCXX.h"
using namespace clang;
using namespace CodeGen;
CGCUDARuntime::~CGCUDARuntime() {}
RValue CGCUDARuntime::EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
const CUDAKernelCallExpr *E,
ReturnValueSlot ReturnValue) {
llvm::BasicBlock *ConfigOKBlock = CGF.createBasicBlock("kcall.configok");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("kcall.end");
CodeGenFunction::ConditionalEvaluation eval(CGF);
CGF.EmitBranchOnBoolExpr(E->getConfig(), ContBlock, ConfigOKBlock,
/*TrueCount=*/0);
eval.begin(CGF);
CGF.EmitBlock(ConfigOKBlock);
CGF.EmitSimpleCallExpr(E, ReturnValue);
CGF.EmitBranch(ContBlock);
CGF.EmitBlock(ContBlock);
eval.end(CGF);
return RValue::get(nullptr);
}

View File

@ -0,0 +1,73 @@
//===----- CGCUDARuntime.h - Interface to CUDA Runtimes ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides an abstract class for CUDA code generation. Concrete
// subclasses of this implement code generation for specific CUDA
// runtime libraries.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
namespace llvm {
class Function;
class GlobalVariable;
}
namespace clang {
class CUDAKernelCallExpr;
namespace CodeGen {
class CodeGenFunction;
class CodeGenModule;
class FunctionArgList;
class ReturnValueSlot;
class RValue;
class CGCUDARuntime {
protected:
CodeGenModule &CGM;
public:
// Global variable properties that must be passed to CUDA runtime.
enum DeviceVarFlags {
ExternDeviceVar = 0x01, // extern
ConstantDeviceVar = 0x02, // __constant__
};
CGCUDARuntime(CodeGenModule &CGM) : CGM(CGM) {}
virtual ~CGCUDARuntime();
virtual RValue EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
const CUDAKernelCallExpr *E,
ReturnValueSlot ReturnValue);
/// Emits a kernel launch stub.
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) = 0;
virtual void registerDeviceVar(llvm::GlobalVariable &Var, unsigned Flags) = 0;
/// Constructs and returns a module initialization function or nullptr if it's
/// not needed. Must be called after all kernels have been emitted.
virtual llvm::Function *makeModuleCtorFunction() = 0;
/// Returns a module cleanup function or nullptr if it's not needed.
/// Must be called after ModuleCtorFunction
virtual llvm::Function *makeModuleDtorFunction() = 0;
};
/// Creates an instance of a CUDA runtime class.
CGCUDARuntime *CreateNVCUDARuntime(CodeGenModule &CGM);
}
}
#endif

View File

@ -0,0 +1,321 @@
//===--- CGCXX.cpp - Emit LLVM Code for declarations ----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code dealing with C++ code generation.
//
//===----------------------------------------------------------------------===//
// We might split this into multiple files if it gets too unwieldy
#include "CodeGenModule.h"
#include "CGCXXABI.h"
#include "CodeGenFunction.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace CodeGen;
/// Try to emit a base destructor as an alias to its primary
/// base-class destructor.
bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (!getCodeGenOpts().CXXCtorDtorAliases)
return true;
// Producing an alias to a base class ctor/dtor can degrade debug quality
// as the debugger cannot tell them apart.
if (getCodeGenOpts().OptimizationLevel == 0)
return true;
// If sanitizing memory to check for use-after-dtor, do not emit as
// an alias, unless this class owns no members.
if (getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
!D->getParent()->field_empty())
return true;
// If the destructor doesn't have a trivial body, we have to emit it
// separately.
if (!D->hasTrivialBody())
return true;
const CXXRecordDecl *Class = D->getParent();
// We are going to instrument this destructor, so give up even if it is
// currently empty.
if (Class->mayInsertExtraPadding())
return true;
// If we need to manipulate a VTT parameter, give up.
if (Class->getNumVBases()) {
// Extra Credit: passing extra parameters is perfectly safe
// in many calling conventions, so only bail out if the ctor's
// calling convention is nonstandard.
return true;
}
// If any field has a non-trivial destructor, we have to emit the
// destructor separately.
for (const auto *I : Class->fields())
if (I->getType().isDestructedType())
return true;
// Try to find a unique base class with a non-trivial destructor.
const CXXRecordDecl *UniqueBase = nullptr;
for (const auto &I : Class->bases()) {
// We're in the base destructor, so skip virtual bases.
if (I.isVirtual()) continue;
// Skip base classes with trivial destructors.
const auto *Base =
cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
if (Base->hasTrivialDestructor()) continue;
// If we've already found a base class with a non-trivial
// destructor, give up.
if (UniqueBase) return true;
UniqueBase = Base;
}
// If we didn't find any bases with a non-trivial destructor, then
// the base destructor is actually effectively trivial, which can
// happen if it was needlessly user-defined or if there are virtual
// bases with non-trivial destructors.
if (!UniqueBase)
return true;
// If the base is at a non-zero offset, give up.
const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(Class);
if (!ClassLayout.getBaseClassOffset(UniqueBase).isZero())
return true;
// Give up if the calling conventions don't match. We could update the call,
// but it is probably not worth it.
const CXXDestructorDecl *BaseD = UniqueBase->getDestructor();
if (BaseD->getType()->getAs<FunctionType>()->getCallConv() !=
D->getType()->getAs<FunctionType>()->getCallConv())
return true;
return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
GlobalDecl(BaseD, Dtor_Base));
}
/// Try to emit a definition as a global alias for another definition.
/// If \p InEveryTU is true, we know that an equivalent alias can be produced
/// in every translation unit.
bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
GlobalDecl TargetDecl) {
if (!getCodeGenOpts().CXXCtorDtorAliases)
return true;
// The alias will use the linkage of the referent. If we can't
// support aliases with that linkage, fail.
llvm::GlobalValue::LinkageTypes Linkage = getFunctionLinkage(AliasDecl);
// We can't use an alias if the linkage is not valid for one.
if (!llvm::GlobalAlias::isValidLinkage(Linkage))
return true;
llvm::GlobalValue::LinkageTypes TargetLinkage =
getFunctionLinkage(TargetDecl);
// Check if we have it already.
StringRef MangledName = getMangledName(AliasDecl);
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry && !Entry->isDeclaration())
return false;
if (Replacements.count(MangledName))
return false;
// Derive the type for the alias.
llvm::Type *AliasValueType = getTypes().GetFunctionType(AliasDecl);
llvm::PointerType *AliasType = AliasValueType->getPointerTo();
// Find the referent. Some aliases might require a bitcast, in
// which case the caller is responsible for ensuring the soundness
// of these semantics.
auto *Ref = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl));
llvm::Constant *Aliasee = Ref;
if (Ref->getType() != AliasType)
Aliasee = llvm::ConstantExpr::getBitCast(Ref, AliasType);
// Instead of creating as alias to a linkonce_odr, replace all of the uses
// of the aliasee.
if (llvm::GlobalValue::isDiscardableIfUnused(Linkage) &&
!(TargetLinkage == llvm::GlobalValue::AvailableExternallyLinkage &&
TargetDecl.getDecl()->hasAttr<AlwaysInlineAttr>())) {
// FIXME: An extern template instantiation will create functions with
// linkage "AvailableExternally". In libc++, some classes also define
// members with attribute "AlwaysInline" and expect no reference to
// be generated. It is desirable to reenable this optimisation after
// corresponding LLVM changes.
addReplacement(MangledName, Aliasee);
return false;
}
// If we have a weak, non-discardable alias (weak, weak_odr), like an extern
// template instantiation or a dllexported class, avoid forming it on COFF.
// A COFF weak external alias cannot satisfy a normal undefined symbol
// reference from another TU. The other TU must also mark the referenced
// symbol as weak, which we cannot rely on.
if (llvm::GlobalValue::isWeakForLinker(Linkage) &&
getTriple().isOSBinFormatCOFF()) {
return true;
}
// If we don't have a definition for the destructor yet or the definition is
// avaialable_externally, don't emit an alias. We can't emit aliases to
// declarations; that's just not how aliases work.
if (Ref->isDeclarationForLinker())
return true;
// Don't create an alias to a linker weak symbol. This avoids producing
// different COMDATs in different TUs. Another option would be to
// output the alias both for weak_odr and linkonce_odr, but that
// requires explicit comdat support in the IL.
if (llvm::GlobalValue::isWeakForLinker(TargetLinkage))
return true;
// Create the alias with no name.
auto *Alias = llvm::GlobalAlias::create(AliasValueType, 0, Linkage, "",
Aliasee, &getModule());
// Switch any previous uses to the alias.
if (Entry) {
assert(Entry->getType() == AliasType &&
"declaration exists with different type");
Alias->takeName(Entry);
Entry->replaceAllUsesWith(Alias);
Entry->eraseFromParent();
} else {
Alias->setName(MangledName);
}
// Finally, set up the alias with its proper name and attributes.
setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
return false;
}
llvm::Function *CodeGenModule::codegenCXXStructor(const CXXMethodDecl *MD,
StructorType Type) {
const CGFunctionInfo &FnInfo =
getTypes().arrangeCXXStructorDeclaration(MD, Type);
auto *Fn = cast<llvm::Function>(
getAddrOfCXXStructor(MD, Type, &FnInfo, /*FnType=*/nullptr,
/*DontDefer=*/true, ForDefinition));
GlobalDecl GD;
if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
GD = GlobalDecl(DD, toCXXDtorType(Type));
} else {
const auto *CD = cast<CXXConstructorDecl>(MD);
GD = GlobalDecl(CD, toCXXCtorType(Type));
}
setFunctionLinkage(GD, Fn);
setFunctionDLLStorageClass(GD, Fn);
CodeGenFunction(*this).GenerateCode(GD, Fn, FnInfo);
setFunctionDefinitionAttributes(MD, Fn);
SetLLVMFunctionAttributesForDefinition(MD, Fn);
return Fn;
}
llvm::Constant *CodeGenModule::getAddrOfCXXStructor(
const CXXMethodDecl *MD, StructorType Type, const CGFunctionInfo *FnInfo,
llvm::FunctionType *FnType, bool DontDefer,
ForDefinition_t IsForDefinition) {
GlobalDecl GD;
if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
GD = GlobalDecl(CD, toCXXCtorType(Type));
} else {
GD = GlobalDecl(cast<CXXDestructorDecl>(MD), toCXXDtorType(Type));
}
if (!FnType) {
if (!FnInfo)
FnInfo = &getTypes().arrangeCXXStructorDeclaration(MD, Type);
FnType = getTypes().GetFunctionType(*FnInfo);
}
return GetOrCreateLLVMFunction(
getMangledName(GD), FnType, GD, /*ForVTable=*/false, DontDefer,
/*isThunk=*/false, /*ExtraAttrs=*/llvm::AttributeList(), IsForDefinition);
}
static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
GlobalDecl GD,
llvm::Type *Ty,
const CXXRecordDecl *RD) {
assert(!CGF.CGM.getTarget().getCXXABI().isMicrosoft() &&
"No kext in Microsoft ABI");
GD = GD.getCanonicalDecl();
CodeGenModule &CGM = CGF.CGM;
llvm::Value *VTable = CGM.getCXXABI().getAddrOfVTable(RD, CharUnits());
Ty = Ty->getPointerTo()->getPointerTo();
VTable = CGF.Builder.CreateBitCast(VTable, Ty);
assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
const VTableLayout &VTLayout = CGM.getItaniumVTableContext().getVTableLayout(RD);
VTableLayout::AddressPointLocation AddressPoint =
VTLayout.getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
VTableIndex += VTLayout.getVTableOffset(AddressPoint.VTableIndex) +
AddressPoint.AddressPointIndex;
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
llvm::Value *VFunc =
CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes);
CGCallee Callee(GD.getDecl(), VFunc);
return Callee;
}
/// BuildAppleKextVirtualCall - This routine is to support gcc's kext ABI making
/// indirect call to virtual functions. It makes the call through indexing
/// into the vtable.
CGCallee
CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
NestedNameSpecifier *Qual,
llvm::Type *Ty) {
assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
"BuildAppleKextVirtualCall - bad Qual kind");
const Type *QTy = Qual->getAsType();
QualType T = QualType(QTy, 0);
const RecordType *RT = T->getAs<RecordType>();
assert(RT && "BuildAppleKextVirtualCall - Qual type must be record");
const auto *RD = cast<CXXRecordDecl>(RT->getDecl());
if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD))
return BuildAppleKextVirtualDestructorCall(DD, Dtor_Complete, RD);
return ::BuildAppleKextVirtualCall(*this, MD, Ty, RD);
}
/// BuildVirtualCall - This routine makes indirect vtable call for
/// call to virtual destructors. It returns 0 if it could not do it.
CGCallee
CodeGenFunction::BuildAppleKextVirtualDestructorCall(
const CXXDestructorDecl *DD,
CXXDtorType Type,
const CXXRecordDecl *RD) {
assert(DD->isVirtual() && Type != Dtor_Base);
// Compute the function type we're calling.
const CGFunctionInfo &FInfo = CGM.getTypes().arrangeCXXStructorDeclaration(
DD, StructorType::Complete);
llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
return ::BuildAppleKextVirtualCall(*this, GlobalDecl(DD, Type), Ty, RD);
}

View File

@ -0,0 +1,307 @@
//===----- CGCXXABI.cpp - Interface to C++ ABIs ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides an abstract class for C++ code generation. Concrete subclasses
// of this implement code generation for specific C++ ABIs.
//
//===----------------------------------------------------------------------===//
#include "CGCXXABI.h"
#include "CGCleanup.h"
using namespace clang;
using namespace CodeGen;
CGCXXABI::~CGCXXABI() { }
void CGCXXABI::ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S) {
DiagnosticsEngine &Diags = CGF.CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot yet compile %0 in this ABI");
Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
DiagID)
<< S;
}
bool CGCXXABI::canCopyArgument(const CXXRecordDecl *RD) const {
// We can only copy the argument if there exists at least one trivial,
// non-deleted copy or move constructor.
return RD->canPassInRegisters();
}
llvm::Constant *CGCXXABI::GetBogusMemberPointer(QualType T) {
return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
}
llvm::Type *
CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
}
CGCallee CGCXXABI::EmitLoadOfMemberFunctionPointer(
CodeGenFunction &CGF, const Expr *E, Address This,
llvm::Value *&ThisPtrForCall,
llvm::Value *MemPtr, const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "calls through member pointers");
ThisPtrForCall = This.getPointer();
const FunctionProtoType *FPT =
MPT->getPointeeType()->getAs<FunctionProtoType>();
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
llvm::Constant *FnPtr = llvm::Constant::getNullValue(FTy->getPointerTo());
return CGCallee::forDirect(FnPtr, FPT);
}
llvm::Value *
CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "loads of member pointers");
llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())
->getPointerTo(Base.getAddressSpace());
return llvm::Constant::getNullValue(Ty);
}
llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
llvm::Value *Src) {
ErrorUnsupportedABI(CGF, "member function pointer conversions");
return GetBogusMemberPointer(E->getType());
}
llvm::Constant *CGCXXABI::EmitMemberPointerConversion(const CastExpr *E,
llvm::Constant *Src) {
return GetBogusMemberPointer(E->getType());
}
llvm::Value *
CGCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
llvm::Value *L,
llvm::Value *R,
const MemberPointerType *MPT,
bool Inequality) {
ErrorUnsupportedABI(CGF, "member function pointer comparison");
return CGF.Builder.getFalse();
}
llvm::Value *
CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
llvm::Value *MemPtr,
const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "member function pointer null testing");
return CGF.Builder.getFalse();
}
llvm::Constant *
CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
return GetBogusMemberPointer(QualType(MPT, 0));
}
llvm::Constant *CGCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
return GetBogusMemberPointer(CGM.getContext().getMemberPointerType(
MD->getType(), MD->getParent()->getTypeForDecl()));
}
llvm::Constant *CGCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
CharUnits offset) {
return GetBogusMemberPointer(QualType(MPT, 0));
}
llvm::Constant *CGCXXABI::EmitMemberPointer(const APValue &MP, QualType MPT) {
return GetBogusMemberPointer(MPT);
}
bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
// Fake answer.
return true;
}
void CGCXXABI::buildThisParam(CodeGenFunction &CGF, FunctionArgList &params) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
// FIXME: I'm not entirely sure I like using a fake decl just for code
// generation. Maybe we can come up with a better way?
auto *ThisDecl = ImplicitParamDecl::Create(
CGM.getContext(), nullptr, MD->getLocation(),
&CGM.getContext().Idents.get("this"), MD->getThisType(CGM.getContext()),
ImplicitParamDecl::CXXThis);
params.push_back(ThisDecl);
CGF.CXXABIThisDecl = ThisDecl;
// Compute the presumed alignment of 'this', which basically comes
// down to whether we know it's a complete object or not.
auto &Layout = CGF.getContext().getASTRecordLayout(MD->getParent());
if (MD->getParent()->getNumVBases() == 0 || // avoid vcall in common case
MD->getParent()->hasAttr<FinalAttr>() ||
!isThisCompleteObject(CGF.CurGD)) {
CGF.CXXABIThisAlignment = Layout.getAlignment();
} else {
CGF.CXXABIThisAlignment = Layout.getNonVirtualAlignment();
}
}
llvm::Value *CGCXXABI::loadIncomingCXXThis(CodeGenFunction &CGF) {
return CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getThisDecl(CGF)),
"this");
}
void CGCXXABI::setCXXABIThisValue(CodeGenFunction &CGF, llvm::Value *ThisPtr) {
/// Initialize the 'this' slot.
assert(getThisDecl(CGF) && "no 'this' variable for function");
CGF.CXXABIThisValue = ThisPtr;
}
void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType) {
CGF.EmitReturnOfRValue(RV, ResultType);
}
CharUnits CGCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
if (!requiresArrayCookie(expr))
return CharUnits::Zero();
return getArrayCookieSizeImpl(expr->getAllocatedType());
}
CharUnits CGCXXABI::getArrayCookieSizeImpl(QualType elementType) {
// BOGUS
return CharUnits::Zero();
}
Address CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
Address NewPtr,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType) {
// Should never be called.
ErrorUnsupportedABI(CGF, "array cookie initialization");
return Address::invalid();
}
bool CGCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
QualType elementType) {
// If the class's usual deallocation function takes two arguments,
// it needs a cookie.
if (expr->doesUsualArrayDeleteWantSize())
return true;
return elementType.isDestructedType();
}
bool CGCXXABI::requiresArrayCookie(const CXXNewExpr *expr) {
// If the class's usual deallocation function takes two arguments,
// it needs a cookie.
if (expr->doesUsualArrayDeleteWantSize())
return true;
return expr->getAllocatedType().isDestructedType();
}
void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, Address ptr,
const CXXDeleteExpr *expr, QualType eltTy,
llvm::Value *&numElements,
llvm::Value *&allocPtr, CharUnits &cookieSize) {
// Derive a char* in the same address space as the pointer.
ptr = CGF.Builder.CreateElementBitCast(ptr, CGF.Int8Ty);
// If we don't need an array cookie, bail out early.
if (!requiresArrayCookie(expr, eltTy)) {
allocPtr = ptr.getPointer();
numElements = nullptr;
cookieSize = CharUnits::Zero();
return;
}
cookieSize = getArrayCookieSizeImpl(eltTy);
Address allocAddr =
CGF.Builder.CreateConstInBoundsByteGEP(ptr, -cookieSize);
allocPtr = allocAddr.getPointer();
numElements = readArrayCookieImpl(CGF, allocAddr, cookieSize);
}
llvm::Value *CGCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
Address ptr,
CharUnits cookieSize) {
ErrorUnsupportedABI(CGF, "reading a new[] cookie");
return llvm::ConstantInt::get(CGF.SizeTy, 0);
}
/// Returns the adjustment, in bytes, required for the given
/// member-pointer operation. Returns null if no adjustment is
/// required.
llvm::Constant *CGCXXABI::getMemberPointerAdjustment(const CastExpr *E) {
assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
E->getCastKind() == CK_BaseToDerivedMemberPointer);
QualType derivedType;
if (E->getCastKind() == CK_DerivedToBaseMemberPointer)
derivedType = E->getSubExpr()->getType();
else
derivedType = E->getType();
const CXXRecordDecl *derivedClass =
derivedType->castAs<MemberPointerType>()->getClass()->getAsCXXRecordDecl();
return CGM.GetNonVirtualBaseClassOffset(derivedClass,
E->path_begin(),
E->path_end());
}
CharUnits CGCXXABI::getMemberPointerPathAdjustment(const APValue &MP) {
// TODO: Store base specifiers in APValue member pointer paths so we can
// easily reuse CGM.GetNonVirtualBaseClassOffset().
const ValueDecl *MPD = MP.getMemberPointerDecl();
CharUnits ThisAdjustment = CharUnits::Zero();
ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
bool DerivedMember = MP.isMemberPointerToDerivedMember();
const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
for (unsigned I = 0, N = Path.size(); I != N; ++I) {
const CXXRecordDecl *Base = RD;
const CXXRecordDecl *Derived = Path[I];
if (DerivedMember)
std::swap(Base, Derived);
ThisAdjustment +=
getContext().getASTRecordLayout(Derived).getBaseClassOffset(Base);
RD = Path[I];
}
if (DerivedMember)
ThisAdjustment = -ThisAdjustment;
return ThisAdjustment;
}
llvm::BasicBlock *
CGCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
const CXXRecordDecl *RD) {
if (CGM.getTarget().getCXXABI().hasConstructorVariants())
llvm_unreachable("shouldn't be called in this ABI");
ErrorUnsupportedABI(CGF, "complete object detection in ctor");
return nullptr;
}
bool CGCXXABI::NeedsVTTParameter(GlobalDecl GD) {
return false;
}
llvm::CallInst *
CGCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
llvm::Value *Exn) {
// Just call std::terminate and ignore the violating exception.
return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
}
CatchTypeInfo CGCXXABI::getCatchAllTypeInfo() {
return CatchTypeInfo{nullptr, 0};
}
std::vector<CharUnits> CGCXXABI::getVBPtrOffsets(const CXXRecordDecl *RD) {
return std::vector<CharUnits>();
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
38d7344572d3d32e1641f4dc42d8484baa2452c2

View File

@ -0,0 +1,308 @@
//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// These classes wrap the information about a call or function
// definition used to handle ABI compliancy.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGCALL_H
#define LLVM_CLANG_LIB_CODEGEN_CGCALL_H
#include "CGValue.h"
#include "EHScopeStack.h"
#include "clang/AST/CanonicalType.h"
#include "clang/AST/Type.h"
#include "llvm/IR/Value.h"
// FIXME: Restructure so we don't have to expose so much stuff.
#include "ABIInfo.h"
namespace llvm {
class AttributeList;
class Function;
class Type;
class Value;
}
namespace clang {
class ASTContext;
class Decl;
class FunctionDecl;
class ObjCMethodDecl;
class VarDecl;
namespace CodeGen {
/// Abstract information about a function or function prototype.
class CGCalleeInfo {
/// \brief The function prototype of the callee.
const FunctionProtoType *CalleeProtoTy;
/// \brief The function declaration of the callee.
const Decl *CalleeDecl;
public:
explicit CGCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl(nullptr) {}
CGCalleeInfo(const FunctionProtoType *calleeProtoTy, const Decl *calleeDecl)
: CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {}
CGCalleeInfo(const FunctionProtoType *calleeProtoTy)
: CalleeProtoTy(calleeProtoTy), CalleeDecl(nullptr) {}
CGCalleeInfo(const Decl *calleeDecl)
: CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {}
const FunctionProtoType *getCalleeFunctionProtoType() const {
return CalleeProtoTy;
}
const Decl *getCalleeDecl() const { return CalleeDecl; }
};
/// All available information about a concrete callee.
class CGCallee {
enum class SpecialKind : uintptr_t {
Invalid,
Builtin,
PseudoDestructor,
Last = PseudoDestructor
};
struct BuiltinInfoStorage {
const FunctionDecl *Decl;
unsigned ID;
};
struct PseudoDestructorInfoStorage {
const CXXPseudoDestructorExpr *Expr;
};
SpecialKind KindOrFunctionPointer;
union {
CGCalleeInfo AbstractInfo;
BuiltinInfoStorage BuiltinInfo;
PseudoDestructorInfoStorage PseudoDestructorInfo;
};
explicit CGCallee(SpecialKind kind) : KindOrFunctionPointer(kind) {}
CGCallee(const FunctionDecl *builtinDecl, unsigned builtinID)
: KindOrFunctionPointer(SpecialKind::Builtin) {
BuiltinInfo.Decl = builtinDecl;
BuiltinInfo.ID = builtinID;
}
public:
CGCallee() : KindOrFunctionPointer(SpecialKind::Invalid) {}
/// Construct a callee. Call this constructor directly when this
/// isn't a direct call.
CGCallee(const CGCalleeInfo &abstractInfo, llvm::Value *functionPtr)
: KindOrFunctionPointer(SpecialKind(uintptr_t(functionPtr))) {
AbstractInfo = abstractInfo;
assert(functionPtr && "configuring callee without function pointer");
assert(functionPtr->getType()->isPointerTy());
assert(functionPtr->getType()->getPointerElementType()->isFunctionTy());
}
static CGCallee forBuiltin(unsigned builtinID,
const FunctionDecl *builtinDecl) {
CGCallee result(SpecialKind::Builtin);
result.BuiltinInfo.Decl = builtinDecl;
result.BuiltinInfo.ID = builtinID;
return result;
}
static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E) {
CGCallee result(SpecialKind::PseudoDestructor);
result.PseudoDestructorInfo.Expr = E;
return result;
}
static CGCallee forDirect(llvm::Constant *functionPtr,
const CGCalleeInfo &abstractInfo = CGCalleeInfo()) {
return CGCallee(abstractInfo, functionPtr);
}
bool isBuiltin() const {
return KindOrFunctionPointer == SpecialKind::Builtin;
}
const FunctionDecl *getBuiltinDecl() const {
assert(isBuiltin());
return BuiltinInfo.Decl;
}
unsigned getBuiltinID() const {
assert(isBuiltin());
return BuiltinInfo.ID;
}
bool isPseudoDestructor() const {
return KindOrFunctionPointer == SpecialKind::PseudoDestructor;
}
const CXXPseudoDestructorExpr *getPseudoDestructorExpr() const {
assert(isPseudoDestructor());
return PseudoDestructorInfo.Expr;
}
bool isOrdinary() const {
return uintptr_t(KindOrFunctionPointer) > uintptr_t(SpecialKind::Last);
}
const CGCalleeInfo &getAbstractInfo() const {
assert(isOrdinary());
return AbstractInfo;
}
llvm::Value *getFunctionPointer() const {
assert(isOrdinary());
return reinterpret_cast<llvm::Value*>(uintptr_t(KindOrFunctionPointer));
}
llvm::FunctionType *getFunctionType() const {
return cast<llvm::FunctionType>(
getFunctionPointer()->getType()->getPointerElementType());
}
void setFunctionPointer(llvm::Value *functionPtr) {
assert(isOrdinary());
KindOrFunctionPointer = SpecialKind(uintptr_t(functionPtr));
}
};
struct CallArg {
RValue RV;
QualType Ty;
bool NeedsCopy;
CallArg(RValue rv, QualType ty, bool needscopy)
: RV(rv), Ty(ty), NeedsCopy(needscopy)
{ }
};
/// CallArgList - Type for representing both the value and type of
/// arguments in a call.
class CallArgList :
public SmallVector<CallArg, 16> {
public:
CallArgList() : StackBase(nullptr) {}
struct Writeback {
/// The original argument. Note that the argument l-value
/// is potentially null.
LValue Source;
/// The temporary alloca.
Address Temporary;
/// A value to "use" after the writeback, or null.
llvm::Value *ToUse;
};
struct CallArgCleanup {
EHScopeStack::stable_iterator Cleanup;
/// The "is active" insertion point. This instruction is temporary and
/// will be removed after insertion.
llvm::Instruction *IsActiveIP;
};
void add(RValue rvalue, QualType type, bool needscopy = false) {
push_back(CallArg(rvalue, type, needscopy));
}
/// Add all the arguments from another CallArgList to this one. After doing
/// this, the old CallArgList retains its list of arguments, but must not
/// be used to emit a call.
void addFrom(const CallArgList &other) {
insert(end(), other.begin(), other.end());
Writebacks.insert(Writebacks.end(),
other.Writebacks.begin(), other.Writebacks.end());
CleanupsToDeactivate.insert(CleanupsToDeactivate.end(),
other.CleanupsToDeactivate.begin(),
other.CleanupsToDeactivate.end());
assert(!(StackBase && other.StackBase) && "can't merge stackbases");
if (!StackBase)
StackBase = other.StackBase;
}
void addWriteback(LValue srcLV, Address temporary,
llvm::Value *toUse) {
Writeback writeback = { srcLV, temporary, toUse };
Writebacks.push_back(writeback);
}
bool hasWritebacks() const { return !Writebacks.empty(); }
typedef llvm::iterator_range<SmallVectorImpl<Writeback>::const_iterator>
writeback_const_range;
writeback_const_range writebacks() const {
return writeback_const_range(Writebacks.begin(), Writebacks.end());
}
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup,
llvm::Instruction *IsActiveIP) {
CallArgCleanup ArgCleanup;
ArgCleanup.Cleanup = Cleanup;
ArgCleanup.IsActiveIP = IsActiveIP;
CleanupsToDeactivate.push_back(ArgCleanup);
}
ArrayRef<CallArgCleanup> getCleanupsToDeactivate() const {
return CleanupsToDeactivate;
}
void allocateArgumentMemory(CodeGenFunction &CGF);
llvm::Instruction *getStackBase() const { return StackBase; }
void freeArgumentMemory(CodeGenFunction &CGF) const;
/// \brief Returns if we're using an inalloca struct to pass arguments in
/// memory.
bool isUsingInAlloca() const { return StackBase; }
private:
SmallVector<Writeback, 1> Writebacks;
/// Deactivate these cleanups immediately before making the call. This
/// is used to cleanup objects that are owned by the callee once the call
/// occurs.
SmallVector<CallArgCleanup, 1> CleanupsToDeactivate;
/// The stacksave call. It dominates all of the argument evaluation.
llvm::CallInst *StackBase;
};
/// FunctionArgList - Type for representing both the decl and type
/// of parameters to a function. The decl must be either a
/// ParmVarDecl or ImplicitParamDecl.
class FunctionArgList : public SmallVector<const VarDecl*, 16> {
};
/// ReturnValueSlot - Contains the address where the return value of a
/// function can be stored, and whether the address is volatile or not.
class ReturnValueSlot {
llvm::PointerIntPair<llvm::Value *, 2, unsigned int> Value;
CharUnits Alignment;
// Return value slot flags
enum Flags {
IS_VOLATILE = 0x1,
IS_UNUSED = 0x2,
};
public:
ReturnValueSlot() {}
ReturnValueSlot(Address Addr, bool IsVolatile, bool IsUnused = false)
: Value(Addr.isValid() ? Addr.getPointer() : nullptr,
(IsVolatile ? IS_VOLATILE : 0) | (IsUnused ? IS_UNUSED : 0)),
Alignment(Addr.isValid() ? Addr.getAlignment() : CharUnits::Zero()) {}
bool isNull() const { return !getValue().isValid(); }
bool isVolatile() const { return Value.getInt() & IS_VOLATILE; }
Address getValue() const { return Address(Value.getPointer(), Alignment); }
bool isUnused() const { return Value.getInt() & IS_UNUSED; }
};
} // end namespace CodeGen
} // end namespace clang
#endif

View File

@ -0,0 +1 @@
a6915071ec1743c89fd4e660d831c33f78bfae18

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More