Imported Upstream version 5.18.0.167

Former-commit-id: 289509151e0fee68a1b591a20c9f109c3c789d3a
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2018-10-20 08:25:10 +00:00
parent e19d552987
commit b084638f15
28489 changed files with 184 additions and 3866856 deletions

View File

@ -1,476 +0,0 @@
//===-- BrainF.cpp - BrainF compiler example ------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class compiles the BrainF language into LLVM assembly.
//
// The BrainF language has 8 commands:
// Command Equivalent C Action
// ------- ------------ ------
// , *h=getchar(); Read a character from stdin, 255 on EOF
// . putchar(*h); Write a character to stdout
// - --*h; Decrement tape
// + ++*h; Increment tape
// < --h; Move head left
// > ++h; Move head right
// [ while(*h) { Start loop
// ] } End loop
//
//===----------------------------------------------------------------------===//
#include "BrainF.h"
#include "llvm/ADT/APInt.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Casting.h"
#include <cstdlib>
#include <iostream>
using namespace llvm;
//Set the constants for naming
const char *BrainF::tapereg = "tape";
const char *BrainF::headreg = "head";
const char *BrainF::label = "brainf";
const char *BrainF::testreg = "test";
Module *BrainF::parse(std::istream *in1, int mem, CompileFlags cf,
LLVMContext& Context) {
in = in1;
memtotal = mem;
comflag = cf;
header(Context);
readloop(nullptr, nullptr, nullptr, Context);
delete builder;
return module;
}
void BrainF::header(LLVMContext& C) {
module = new Module("BrainF", C);
//Function prototypes
//declare void @llvm.memset.p0i8.i32(i8 *, i8, i32, i32, i1)
Type *Tys[] = { Type::getInt8PtrTy(C), Type::getInt32Ty(C) };
Function *memset_func = Intrinsic::getDeclaration(module, Intrinsic::memset,
Tys);
//declare i32 @getchar()
getchar_func = cast<Function>(module->
getOrInsertFunction("getchar", IntegerType::getInt32Ty(C)));
//declare i32 @putchar(i32)
putchar_func = cast<Function>(module->
getOrInsertFunction("putchar", IntegerType::getInt32Ty(C),
IntegerType::getInt32Ty(C)));
//Function header
//define void @brainf()
brainf_func = cast<Function>(module->
getOrInsertFunction("brainf", Type::getVoidTy(C)));
builder = new IRBuilder<>(BasicBlock::Create(C, label, brainf_func));
//%arr = malloc i8, i32 %d
ConstantInt *val_mem = ConstantInt::get(C, APInt(32, memtotal));
BasicBlock* BB = builder->GetInsertBlock();
Type* IntPtrTy = IntegerType::getInt32Ty(C);
Type* Int8Ty = IntegerType::getInt8Ty(C);
Constant* allocsize = ConstantExpr::getSizeOf(Int8Ty);
allocsize = ConstantExpr::getTruncOrBitCast(allocsize, IntPtrTy);
ptr_arr = CallInst::CreateMalloc(BB, IntPtrTy, Int8Ty, allocsize, val_mem,
nullptr, "arr");
BB->getInstList().push_back(cast<Instruction>(ptr_arr));
//call void @llvm.memset.p0i8.i32(i8 *%arr, i8 0, i32 %d, i32 1, i1 0)
{
Value *memset_params[] = {
ptr_arr,
ConstantInt::get(C, APInt(8, 0)),
val_mem,
ConstantInt::get(C, APInt(32, 1)),
ConstantInt::get(C, APInt(1, 0))
};
CallInst *memset_call = builder->
CreateCall(memset_func, memset_params);
memset_call->setTailCall(false);
}
//%arrmax = getelementptr i8 *%arr, i32 %d
if (comflag & flag_arraybounds) {
ptr_arrmax = builder->
CreateGEP(ptr_arr, ConstantInt::get(C, APInt(32, memtotal)), "arrmax");
}
//%head.%d = getelementptr i8 *%arr, i32 %d
curhead = builder->CreateGEP(ptr_arr,
ConstantInt::get(C, APInt(32, memtotal/2)),
headreg);
//Function footer
//brainf.end:
endbb = BasicBlock::Create(C, label, brainf_func);
//call free(i8 *%arr)
endbb->getInstList().push_back(CallInst::CreateFree(ptr_arr, endbb));
//ret void
ReturnInst::Create(C, endbb);
//Error block for array out of bounds
if (comflag & flag_arraybounds)
{
//@aberrormsg = internal constant [%d x i8] c"\00"
Constant *msg_0 =
ConstantDataArray::getString(C, "Error: The head has left the tape.",
true);
GlobalVariable *aberrormsg = new GlobalVariable(
*module,
msg_0->getType(),
true,
GlobalValue::InternalLinkage,
msg_0,
"aberrormsg");
//declare i32 @puts(i8 *)
Function *puts_func = cast<Function>(module->
getOrInsertFunction("puts", IntegerType::getInt32Ty(C),
PointerType::getUnqual(IntegerType::getInt8Ty(C))));
//brainf.aberror:
aberrorbb = BasicBlock::Create(C, label, brainf_func);
//call i32 @puts(i8 *getelementptr([%d x i8] *@aberrormsg, i32 0, i32 0))
{
Constant *zero_32 = Constant::getNullValue(IntegerType::getInt32Ty(C));
Constant *gep_params[] = {
zero_32,
zero_32
};
Constant *msgptr = ConstantExpr::
getGetElementPtr(aberrormsg->getValueType(), aberrormsg, gep_params);
Value *puts_params[] = {
msgptr
};
CallInst *puts_call =
CallInst::Create(puts_func,
puts_params,
"", aberrorbb);
puts_call->setTailCall(false);
}
//br label %brainf.end
BranchInst::Create(endbb, aberrorbb);
}
}
void BrainF::readloop(PHINode *phi, BasicBlock *oldbb, BasicBlock *testbb,
LLVMContext &C) {
Symbol cursym = SYM_NONE;
int curvalue = 0;
Symbol nextsym = SYM_NONE;
int nextvalue = 0;
char c;
int loop;
int direction;
while(cursym != SYM_EOF && cursym != SYM_ENDLOOP) {
// Write out commands
switch(cursym) {
case SYM_NONE:
// Do nothing
break;
case SYM_READ:
{
//%tape.%d = call i32 @getchar()
CallInst *getchar_call =
builder->CreateCall(getchar_func, {}, tapereg);
getchar_call->setTailCall(false);
Value *tape_0 = getchar_call;
//%tape.%d = trunc i32 %tape.%d to i8
Value *tape_1 = builder->
CreateTrunc(tape_0, IntegerType::getInt8Ty(C), tapereg);
//store i8 %tape.%d, i8 *%head.%d
builder->CreateStore(tape_1, curhead);
}
break;
case SYM_WRITE:
{
//%tape.%d = load i8 *%head.%d
LoadInst *tape_0 = builder->CreateLoad(curhead, tapereg);
//%tape.%d = sext i8 %tape.%d to i32
Value *tape_1 = builder->
CreateSExt(tape_0, IntegerType::getInt32Ty(C), tapereg);
//call i32 @putchar(i32 %tape.%d)
Value *putchar_params[] = {
tape_1
};
CallInst *putchar_call = builder->
CreateCall(putchar_func,
putchar_params);
putchar_call->setTailCall(false);
}
break;
case SYM_MOVE:
{
//%head.%d = getelementptr i8 *%head.%d, i32 %d
curhead = builder->
CreateGEP(curhead, ConstantInt::get(C, APInt(32, curvalue)),
headreg);
//Error block for array out of bounds
if (comflag & flag_arraybounds)
{
//%test.%d = icmp uge i8 *%head.%d, %arrmax
Value *test_0 = builder->
CreateICmpUGE(curhead, ptr_arrmax, testreg);
//%test.%d = icmp ult i8 *%head.%d, %arr
Value *test_1 = builder->
CreateICmpULT(curhead, ptr_arr, testreg);
//%test.%d = or i1 %test.%d, %test.%d
Value *test_2 = builder->
CreateOr(test_0, test_1, testreg);
//br i1 %test.%d, label %main.%d, label %main.%d
BasicBlock *nextbb = BasicBlock::Create(C, label, brainf_func);
builder->CreateCondBr(test_2, aberrorbb, nextbb);
//main.%d:
builder->SetInsertPoint(nextbb);
}
}
break;
case SYM_CHANGE:
{
//%tape.%d = load i8 *%head.%d
LoadInst *tape_0 = builder->CreateLoad(curhead, tapereg);
//%tape.%d = add i8 %tape.%d, %d
Value *tape_1 = builder->
CreateAdd(tape_0, ConstantInt::get(C, APInt(8, curvalue)), tapereg);
//store i8 %tape.%d, i8 *%head.%d\n"
builder->CreateStore(tape_1, curhead);
}
break;
case SYM_LOOP:
{
//br label %main.%d
BasicBlock *testbb = BasicBlock::Create(C, label, brainf_func);
builder->CreateBr(testbb);
//main.%d:
BasicBlock *bb_0 = builder->GetInsertBlock();
BasicBlock *bb_1 = BasicBlock::Create(C, label, brainf_func);
builder->SetInsertPoint(bb_1);
// Make part of PHI instruction now, wait until end of loop to finish
PHINode *phi_0 =
PHINode::Create(PointerType::getUnqual(IntegerType::getInt8Ty(C)),
2, headreg, testbb);
phi_0->addIncoming(curhead, bb_0);
curhead = phi_0;
readloop(phi_0, bb_1, testbb, C);
}
break;
default:
std::cerr << "Error: Unknown symbol.\n";
abort();
break;
}
cursym = nextsym;
curvalue = nextvalue;
nextsym = SYM_NONE;
// Reading stdin loop
loop = (cursym == SYM_NONE)
|| (cursym == SYM_MOVE)
|| (cursym == SYM_CHANGE);
while(loop) {
*in>>c;
if (in->eof()) {
if (cursym == SYM_NONE) {
cursym = SYM_EOF;
} else {
nextsym = SYM_EOF;
}
loop = 0;
} else {
direction = 1;
switch(c) {
case '-':
direction = -1;
LLVM_FALLTHROUGH;
case '+':
if (cursym == SYM_CHANGE) {
curvalue += direction;
// loop = 1
} else {
if (cursym == SYM_NONE) {
cursym = SYM_CHANGE;
curvalue = direction;
// loop = 1
} else {
nextsym = SYM_CHANGE;
nextvalue = direction;
loop = 0;
}
}
break;
case '<':
direction = -1;
LLVM_FALLTHROUGH;
case '>':
if (cursym == SYM_MOVE) {
curvalue += direction;
// loop = 1
} else {
if (cursym == SYM_NONE) {
cursym = SYM_MOVE;
curvalue = direction;
// loop = 1
} else {
nextsym = SYM_MOVE;
nextvalue = direction;
loop = 0;
}
}
break;
case ',':
if (cursym == SYM_NONE) {
cursym = SYM_READ;
} else {
nextsym = SYM_READ;
}
loop = 0;
break;
case '.':
if (cursym == SYM_NONE) {
cursym = SYM_WRITE;
} else {
nextsym = SYM_WRITE;
}
loop = 0;
break;
case '[':
if (cursym == SYM_NONE) {
cursym = SYM_LOOP;
} else {
nextsym = SYM_LOOP;
}
loop = 0;
break;
case ']':
if (cursym == SYM_NONE) {
cursym = SYM_ENDLOOP;
} else {
nextsym = SYM_ENDLOOP;
}
loop = 0;
break;
// Ignore other characters
default:
break;
}
}
}
}
if (cursym == SYM_ENDLOOP) {
if (!phi) {
std::cerr << "Error: Extra ']'\n";
abort();
}
// Write loop test
{
//br label %main.%d
builder->CreateBr(testbb);
//main.%d:
//%head.%d = phi i8 *[%head.%d, %main.%d], [%head.%d, %main.%d]
//Finish phi made at beginning of loop
phi->addIncoming(curhead, builder->GetInsertBlock());
Value *head_0 = phi;
//%tape.%d = load i8 *%head.%d
LoadInst *tape_0 = new LoadInst(head_0, tapereg, testbb);
//%test.%d = icmp eq i8 %tape.%d, 0
ICmpInst *test_0 = new ICmpInst(*testbb, ICmpInst::ICMP_EQ, tape_0,
ConstantInt::get(C, APInt(8, 0)), testreg);
//br i1 %test.%d, label %main.%d, label %main.%d
BasicBlock *bb_0 = BasicBlock::Create(C, label, brainf_func);
BranchInst::Create(bb_0, oldbb, test_0, testbb);
//main.%d:
builder->SetInsertPoint(bb_0);
//%head.%d = phi i8 *[%head.%d, %main.%d]
PHINode *phi_1 = builder->
CreatePHI(PointerType::getUnqual(IntegerType::getInt8Ty(C)), 1,
headreg);
phi_1->addIncoming(head_0, testbb);
curhead = phi_1;
}
return;
}
//End of the program, so go to return block
builder->CreateBr(endbb);
if (phi) {
std::cerr << "Error: Missing ']'\n";
abort();
}
}

View File

@ -1,95 +0,0 @@
//===-- BrainF.h - BrainF compiler class ------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class stores the data for the BrainF compiler so it doesn't have
// to pass all of it around. The main method is parse.
//
//===----------------------------------------------------------------------===//
#ifndef BRAINF_H
#define BRAINF_H
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include <istream>
using namespace llvm;
/// This class provides a parser for the BrainF language.
/// The class itself is made to store values during
/// parsing so they don't have to be passed around
/// as much.
class BrainF {
public:
/// Options for how BrainF should compile
enum CompileFlags {
flag_off = 0,
flag_arraybounds = 1
};
/// This is the main method. It parses BrainF from in1
/// and returns the module with a function
/// void brainf()
/// containing the resulting code.
/// On error, it calls abort.
/// The caller must delete the returned module.
Module *parse(std::istream *in1, int mem, CompileFlags cf,
LLVMContext& C);
protected:
/// The different symbols in the BrainF language
enum Symbol {
SYM_NONE,
SYM_READ,
SYM_WRITE,
SYM_MOVE,
SYM_CHANGE,
SYM_LOOP,
SYM_ENDLOOP,
SYM_EOF
};
/// Names of the different parts of the language.
/// Tape is used for reading and writing the tape.
/// headreg is used for the position of the head.
/// label is used for the labels for the BasicBlocks.
/// testreg is used for testing the loop exit condition.
static const char *tapereg;
static const char *headreg;
static const char *label;
static const char *testreg;
/// Put the brainf function preamble and other fixed pieces of code
void header(LLVMContext& C);
/// The main loop for parsing. It calls itself recursively
/// to handle the depth of nesting of "[]".
void readloop(PHINode *phi, BasicBlock *oldbb,
BasicBlock *testbb, LLVMContext &Context);
/// Constants during parsing
int memtotal;
CompileFlags comflag;
std::istream *in;
Module *module;
Function *brainf_func;
Function *getchar_func;
Function *putchar_func;
Value *ptr_arr;
Value *ptr_arrmax;
BasicBlock *endbb;
BasicBlock *aberrorbb;
/// Variables
IRBuilder<> *builder;
Value *curhead;
};
#endif // BRAINF_H

View File

@ -1,184 +0,0 @@
//===-- BrainFDriver.cpp - BrainF compiler driver -------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This program converts the BrainF language into LLVM assembly,
// which it can then run using the JIT or output as BitCode.
//
// This implementation has a tape of 65536 bytes,
// with the head starting in the middle.
// Range checking is off by default, so be careful.
// It can be enabled with -abc.
//
// Use:
// ./BrainF -jit prog.bf #Run program now
// ./BrainF -jit -abc prog.bf #Run program now safely
// ./BrainF prog.bf #Write as BitCode
//
// lli prog.bf.bc #Run generated BitCode
//
//===----------------------------------------------------------------------===//
#include "BrainF.h"
#include "llvm/ADT/APInt.h"
#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/MCJIT.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <memory>
#include <string>
#include <system_error>
#include <vector>
using namespace llvm;
//Command line options
static cl::opt<std::string>
InputFilename(cl::Positional, cl::desc("<input brainf>"));
static cl::opt<std::string>
OutputFilename("o", cl::desc("Output filename"), cl::value_desc("filename"));
static cl::opt<bool>
ArrayBoundsChecking("abc", cl::desc("Enable array bounds checking"));
static cl::opt<bool>
JIT("jit", cl::desc("Run program Just-In-Time"));
//Add main function so can be fully compiled
void addMainFunction(Module *mod) {
//define i32 @main(i32 %argc, i8 **%argv)
Function *main_func = cast<Function>(mod->
getOrInsertFunction("main", IntegerType::getInt32Ty(mod->getContext()),
IntegerType::getInt32Ty(mod->getContext()),
PointerType::getUnqual(PointerType::getUnqual(
IntegerType::getInt8Ty(mod->getContext())))));
{
Function::arg_iterator args = main_func->arg_begin();
Value *arg_0 = &*args++;
arg_0->setName("argc");
Value *arg_1 = &*args++;
arg_1->setName("argv");
}
//main.0:
BasicBlock *bb = BasicBlock::Create(mod->getContext(), "main.0", main_func);
//call void @brainf()
{
CallInst *brainf_call = CallInst::Create(mod->getFunction("brainf"),
"", bb);
brainf_call->setTailCall(false);
}
//ret i32 0
ReturnInst::Create(mod->getContext(),
ConstantInt::get(mod->getContext(), APInt(32, 0)), bb);
}
int main(int argc, char **argv) {
cl::ParseCommandLineOptions(argc, argv, " BrainF compiler\n");
LLVMContext Context;
if (InputFilename == "") {
errs() << "Error: You must specify the filename of the program to "
"be compiled. Use --help to see the options.\n";
abort();
}
//Get the output stream
raw_ostream *out = &outs();
if (!JIT) {
if (OutputFilename == "") {
std::string base = InputFilename;
if (InputFilename == "-") { base = "a"; }
// Use default filename.
OutputFilename = base+".bc";
}
if (OutputFilename != "-") {
std::error_code EC;
out = new raw_fd_ostream(OutputFilename, EC, sys::fs::F_None);
}
}
//Get the input stream
std::istream *in = &std::cin;
if (InputFilename != "-")
in = new std::ifstream(InputFilename.c_str());
//Gather the compile flags
BrainF::CompileFlags cf = BrainF::flag_off;
if (ArrayBoundsChecking)
cf = BrainF::CompileFlags(cf | BrainF::flag_arraybounds);
//Read the BrainF program
BrainF bf;
std::unique_ptr<Module> Mod(bf.parse(in, 65536, cf, Context)); // 64 KiB
if (in != &std::cin)
delete in;
addMainFunction(Mod.get());
//Verify generated code
if (verifyModule(*Mod)) {
errs() << "Error: module failed verification. This shouldn't happen.\n";
abort();
}
//Write it out
if (JIT) {
InitializeNativeTarget();
InitializeNativeTargetAsmPrinter();
outs() << "------- Running JIT -------\n";
Module &M = *Mod;
ExecutionEngine *ee = EngineBuilder(std::move(Mod)).create();
if (!ee) {
errs() << "Error: execution engine creation failed.\n";
abort();
}
std::vector<GenericValue> args;
Function *brainf_func = M.getFunction("brainf");
GenericValue gv = ee->runFunction(brainf_func, args);
// Genereated code calls putchar, and output is not guaranteed without fflush.
// The better place for fflush(stdout) call would be the generated code, but it
// is unmanageable because stdout linkage name depends on stdlib implementation.
fflush(stdout);
} else {
WriteBitcodeToFile(Mod.get(), *out);
}
//Clean up
if (out != &outs())
delete out;
llvm_shutdown();
return 0;
}

View File

@ -1,14 +0,0 @@
set(LLVM_LINK_COMPONENTS
BitWriter
Core
ExecutionEngine
MC
MCJIT
Support
nativecodegen
)
add_llvm_example(BrainF
BrainF.cpp
BrainFDriver.cpp
)

View File

@ -1,13 +0,0 @@
add_subdirectory(BrainF)
add_subdirectory(Fibonacci)
add_subdirectory(HowToUseJIT)
add_subdirectory(Kaleidoscope)
add_subdirectory(ModuleMaker)
if(LLVM_ENABLE_EH AND (NOT WIN32) AND (NOT "${LLVM_NATIVE_ARCH}" STREQUAL "ARM"))
add_subdirectory(ExceptionDemo)
endif()
if( HAVE_PTHREAD_H )
add_subdirectory(ParallelJIT)
endif( HAVE_PTHREAD_H )

View File

@ -1,21 +0,0 @@
set(LLVM_LINK_COMPONENTS
Core
ExecutionEngine
MC
MCJIT
RuntimeDyld
Support
Target
nativecodegen
)
# Enable EH and RTTI for this demo
if(NOT LLVM_ENABLE_EH)
message(FATAL_ERROR "ExceptionDemo must require EH.")
endif()
add_llvm_example(ExceptionDemo
ExceptionDemo.cpp
)
export_executable_symbols(ExceptionDemo)

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +0,0 @@
set(LLVM_LINK_COMPONENTS
Core
ExecutionEngine
Interpreter
MC
MCJIT
Support
nativecodegen
)
add_llvm_example(Fibonacci
fibonacci.cpp
)

View File

@ -1,148 +0,0 @@
//===--- examples/Fibonacci/fibonacci.cpp - An example use of the JIT -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This small program provides an example of how to build quickly a small module
// with function Fibonacci and execute it with the JIT.
//
// The goal of this snippet is to create in the memory the LLVM module
// consisting of one function as follow:
//
// int fib(int x) {
// if(x<=2) return 1;
// return fib(x-1)+fib(x-2);
// }
//
// Once we have this, we compile the module via JIT, then execute the `fib'
// function and return result to a driver, i.e. to a "host program".
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/APInt.h"
#include "llvm/IR/Verifier.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/MCJIT.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cstdlib>
#include <memory>
#include <string>
#include <vector>
using namespace llvm;
static Function *CreateFibFunction(Module *M, LLVMContext &Context) {
// Create the fib function and insert it into module M. This function is said
// to return an int and take an int parameter.
Function *FibF =
cast<Function>(M->getOrInsertFunction("fib", Type::getInt32Ty(Context),
Type::getInt32Ty(Context)));
// Add a basic block to the function.
BasicBlock *BB = BasicBlock::Create(Context, "EntryBlock", FibF);
// Get pointers to the constants.
Value *One = ConstantInt::get(Type::getInt32Ty(Context), 1);
Value *Two = ConstantInt::get(Type::getInt32Ty(Context), 2);
// Get pointer to the integer argument of the add1 function...
Argument *ArgX = &*FibF->arg_begin(); // Get the arg.
ArgX->setName("AnArg"); // Give it a nice symbolic name for fun.
// Create the true_block.
BasicBlock *RetBB = BasicBlock::Create(Context, "return", FibF);
// Create an exit block.
BasicBlock* RecurseBB = BasicBlock::Create(Context, "recurse", FibF);
// Create the "if (arg <= 2) goto exitbb"
Value *CondInst = new ICmpInst(*BB, ICmpInst::ICMP_SLE, ArgX, Two, "cond");
BranchInst::Create(RetBB, RecurseBB, CondInst, BB);
// Create: ret int 1
ReturnInst::Create(Context, One, RetBB);
// create fib(x-1)
Value *Sub = BinaryOperator::CreateSub(ArgX, One, "arg", RecurseBB);
CallInst *CallFibX1 = CallInst::Create(FibF, Sub, "fibx1", RecurseBB);
CallFibX1->setTailCall();
// create fib(x-2)
Sub = BinaryOperator::CreateSub(ArgX, Two, "arg", RecurseBB);
CallInst *CallFibX2 = CallInst::Create(FibF, Sub, "fibx2", RecurseBB);
CallFibX2->setTailCall();
// fib(x-1)+fib(x-2)
Value *Sum = BinaryOperator::CreateAdd(CallFibX1, CallFibX2,
"addresult", RecurseBB);
// Create the return instruction and add it to the basic block
ReturnInst::Create(Context, Sum, RecurseBB);
return FibF;
}
int main(int argc, char **argv) {
int n = argc > 1 ? atol(argv[1]) : 24;
InitializeNativeTarget();
InitializeNativeTargetAsmPrinter();
LLVMContext Context;
// Create some module to put our function into it.
std::unique_ptr<Module> Owner(new Module("test", Context));
Module *M = Owner.get();
// We are about to create the "fib" function:
Function *FibF = CreateFibFunction(M, Context);
// Now we going to create JIT
std::string errStr;
ExecutionEngine *EE =
EngineBuilder(std::move(Owner))
.setErrorStr(&errStr)
.create();
if (!EE) {
errs() << argv[0] << ": Failed to construct ExecutionEngine: " << errStr
<< "\n";
return 1;
}
errs() << "verifying... ";
if (verifyModule(*M)) {
errs() << argv[0] << ": Error constructing function!\n";
return 1;
}
errs() << "OK\n";
errs() << "We just constructed this LLVM module:\n\n---------\n" << *M;
errs() << "---------\nstarting fibonacci(" << n << ") with JIT...\n";
// Call the Fibonacci function with argument n:
std::vector<GenericValue> Args(1);
Args[0].IntVal = APInt(32, n);
GenericValue GV = EE->runFunction(FibF, Args);
// import result of execution
outs() << "Result: " << GV.IntVal << "\n";
return 0;
}

View File

@ -1,11 +0,0 @@
set(LLVM_LINK_COMPONENTS
Core
ExecutionEngine
Interpreter
Support
nativecodegen
)
add_llvm_example(HowToUseJIT
HowToUseJIT.cpp
)

View File

@ -1,137 +0,0 @@
//===-- examples/HowToUseJIT/HowToUseJIT.cpp - An example use of the JIT --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This small program provides an example of how to quickly build a small
// module with two functions and execute it with the JIT.
//
// Goal:
// The goal of this snippet is to create in the memory
// the LLVM module consisting of two functions as follow:
//
// int add1(int x) {
// return x+1;
// }
//
// int foo() {
// return add1(10);
// }
//
// then compile the module via JIT, then execute the `foo'
// function and return result to a driver, i.e. to a "host program".
//
// Some remarks and questions:
//
// - could we invoke some code using noname functions too?
// e.g. evaluate "foo()+foo()" without fears to introduce
// conflict of temporary function name with some real
// existing function name?
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <memory>
#include <vector>
using namespace llvm;
int main() {
InitializeNativeTarget();
LLVMContext Context;
// Create some module to put our function into it.
std::unique_ptr<Module> Owner = make_unique<Module>("test", Context);
Module *M = Owner.get();
// Create the add1 function entry and insert this entry into module M. The
// function will have a return type of "int" and take an argument of "int".
Function *Add1F =
cast<Function>(M->getOrInsertFunction("add1", Type::getInt32Ty(Context),
Type::getInt32Ty(Context)));
// Add a basic block to the function. As before, it automatically inserts
// because of the last argument.
BasicBlock *BB = BasicBlock::Create(Context, "EntryBlock", Add1F);
// Create a basic block builder with default parameters. The builder will
// automatically append instructions to the basic block `BB'.
IRBuilder<> builder(BB);
// Get pointers to the constant `1'.
Value *One = builder.getInt32(1);
// Get pointers to the integer argument of the add1 function...
assert(Add1F->arg_begin() != Add1F->arg_end()); // Make sure there's an arg
Argument *ArgX = &*Add1F->arg_begin(); // Get the arg
ArgX->setName("AnArg"); // Give it a nice symbolic name for fun.
// Create the add instruction, inserting it into the end of BB.
Value *Add = builder.CreateAdd(One, ArgX);
// Create the return instruction and add it to the basic block
builder.CreateRet(Add);
// Now, function add1 is ready.
// Now we're going to create function `foo', which returns an int and takes no
// arguments.
Function *FooF =
cast<Function>(M->getOrInsertFunction("foo", Type::getInt32Ty(Context)));
// Add a basic block to the FooF function.
BB = BasicBlock::Create(Context, "EntryBlock", FooF);
// Tell the basic block builder to attach itself to the new basic block
builder.SetInsertPoint(BB);
// Get pointer to the constant `10'.
Value *Ten = builder.getInt32(10);
// Pass Ten to the call to Add1F
CallInst *Add1CallRes = builder.CreateCall(Add1F, Ten);
Add1CallRes->setTailCall(true);
// Create the return instruction and add it to the basic block.
builder.CreateRet(Add1CallRes);
// Now we create the JIT.
ExecutionEngine* EE = EngineBuilder(std::move(Owner)).create();
outs() << "We just constructed this LLVM module:\n\n" << *M;
outs() << "\n\nRunning foo: ";
outs().flush();
// Call the `foo' function with no arguments:
std::vector<GenericValue> noargs;
GenericValue gv = EE->runFunction(FooF, noargs);
// Import result of execution:
outs() << "Result: " << gv.IntVal << "\n";
delete EE;
llvm_shutdown();
return 0;
}

View File

@ -1,8 +0,0 @@
add_subdirectory(Chapter1)
add_subdirectory(Chapter2)
add_subdirectory(Chapter3)
add_subdirectory(Chapter4)
if (NOT WIN32)
add_subdirectory(Chapter5)
endif()

View File

@ -1,17 +0,0 @@
set(LLVM_LINK_COMPONENTS
Analysis
Core
ExecutionEngine
InstCombine
Object
RuntimeDyld
ScalarOpts
Support
native
)
add_kaleidoscope_chapter(BuildingAJIT-Ch1
toy.cpp
)
export_executable_symbols(BuildingAJIT-Ch1)

View File

@ -1,101 +0,0 @@
//===- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains a simple JIT definition for use in the kaleidoscope tutorials.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_KALEIDOSCOPEJIT_H
#define LLVM_EXECUTIONENGINE_ORC_KALEIDOSCOPEJIT_H
#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Mangler.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
namespace orc {
class KaleidoscopeJIT {
private:
std::unique_ptr<TargetMachine> TM;
const DataLayout DL;
RTDyldObjectLinkingLayer ObjectLayer;
IRCompileLayer<decltype(ObjectLayer), SimpleCompiler> CompileLayer;
public:
using ModuleHandle = decltype(CompileLayer)::ModuleHandleT;
KaleidoscopeJIT()
: TM(EngineBuilder().selectTarget()), DL(TM->createDataLayout()),
ObjectLayer([]() { return std::make_shared<SectionMemoryManager>(); }),
CompileLayer(ObjectLayer, SimpleCompiler(*TM)) {
llvm::sys::DynamicLibrary::LoadLibraryPermanently(nullptr);
}
TargetMachine &getTargetMachine() { return *TM; }
ModuleHandle addModule(std::unique_ptr<Module> M) {
// Build our symbol resolver:
// Lambda 1: Look back into the JIT itself to find symbols that are part of
// the same "logical dylib".
// Lambda 2: Search for external symbols in the host process.
auto Resolver = createLambdaResolver(
[&](const std::string &Name) {
if (auto Sym = CompileLayer.findSymbol(Name, false))
return Sym;
return JITSymbol(nullptr);
},
[](const std::string &Name) {
if (auto SymAddr =
RTDyldMemoryManager::getSymbolAddressInProcess(Name))
return JITSymbol(SymAddr, JITSymbolFlags::Exported);
return JITSymbol(nullptr);
});
// Add the set to the JIT with the resolver we created above and a newly
// created SectionMemoryManager.
return cantFail(CompileLayer.addModule(std::move(M),
std::move(Resolver)));
}
JITSymbol findSymbol(const std::string Name) {
std::string MangledName;
raw_string_ostream MangledNameStream(MangledName);
Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
return CompileLayer.findSymbol(MangledNameStream.str(), true);
}
JITTargetAddress getSymbolAddress(const std::string Name) {
return cantFail(findSymbol(Name).getAddress());
}
void removeModule(ModuleHandle H) {
cantFail(CompileLayer.removeModule(H));
}
};
} // end namespace orc
} // end namespace llvm
#endif // LLVM_EXECUTIONENGINE_ORC_KALEIDOSCOPEJIT_H

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +0,0 @@
set(LLVM_LINK_COMPONENTS
Analysis
Core
ExecutionEngine
InstCombine
Object
RuntimeDyld
ScalarOpts
Support
native
)
add_kaleidoscope_chapter(BuildingAJIT-Ch2
toy.cpp
)
export_executable_symbols(BuildingAJIT-Ch2)

View File

@ -1,130 +0,0 @@
//===- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains a simple JIT definition for use in the kaleidoscope tutorials.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_KALEIDOSCOPEJIT_H
#define LLVM_EXECUTIONENGINE_ORC_KALEIDOSCOPEJIT_H
#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "llvm/ExecutionEngine/Orc/IRTransformLayer.h"
#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Mangler.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/GVN.h"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
namespace orc {
class KaleidoscopeJIT {
private:
std::unique_ptr<TargetMachine> TM;
const DataLayout DL;
RTDyldObjectLinkingLayer ObjectLayer;
IRCompileLayer<decltype(ObjectLayer), SimpleCompiler> CompileLayer;
using OptimizeFunction =
std::function<std::shared_ptr<Module>(std::shared_ptr<Module>)>;
IRTransformLayer<decltype(CompileLayer), OptimizeFunction> OptimizeLayer;
public:
using ModuleHandle = decltype(OptimizeLayer)::ModuleHandleT;
KaleidoscopeJIT()
: TM(EngineBuilder().selectTarget()), DL(TM->createDataLayout()),
ObjectLayer([]() { return std::make_shared<SectionMemoryManager>(); }),
CompileLayer(ObjectLayer, SimpleCompiler(*TM)),
OptimizeLayer(CompileLayer,
[this](std::shared_ptr<Module> M) {
return optimizeModule(std::move(M));
}) {
llvm::sys::DynamicLibrary::LoadLibraryPermanently(nullptr);
}
TargetMachine &getTargetMachine() { return *TM; }
ModuleHandle addModule(std::unique_ptr<Module> M) {
// Build our symbol resolver:
// Lambda 1: Look back into the JIT itself to find symbols that are part of
// the same "logical dylib".
// Lambda 2: Search for external symbols in the host process.
auto Resolver = createLambdaResolver(
[&](const std::string &Name) {
if (auto Sym = OptimizeLayer.findSymbol(Name, false))
return Sym;
return JITSymbol(nullptr);
},
[](const std::string &Name) {
if (auto SymAddr =
RTDyldMemoryManager::getSymbolAddressInProcess(Name))
return JITSymbol(SymAddr, JITSymbolFlags::Exported);
return JITSymbol(nullptr);
});
// Add the set to the JIT with the resolver we created above and a newly
// created SectionMemoryManager.
return cantFail(OptimizeLayer.addModule(std::move(M),
std::move(Resolver)));
}
JITSymbol findSymbol(const std::string Name) {
std::string MangledName;
raw_string_ostream MangledNameStream(MangledName);
Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
return OptimizeLayer.findSymbol(MangledNameStream.str(), true);
}
void removeModule(ModuleHandle H) {
cantFail(OptimizeLayer.removeModule(H));
}
private:
std::shared_ptr<Module> optimizeModule(std::shared_ptr<Module> M) {
// Create a function pass manager.
auto FPM = llvm::make_unique<legacy::FunctionPassManager>(M.get());
// Add some optimizations.
FPM->add(createInstructionCombiningPass());
FPM->add(createReassociatePass());
FPM->add(createGVNPass());
FPM->add(createCFGSimplificationPass());
FPM->doInitialization();
// Run the optimizations over all functions in the module being added to
// the JIT.
for (auto &F : *M)
FPM->run(F);
return M;
}
};
} // end namespace orc
} // end namespace llvm
#endif // LLVM_EXECUTIONENGINE_ORC_KALEIDOSCOPEJIT_H

File diff suppressed because it is too large Load Diff

View File

@ -1,19 +0,0 @@
set(LLVM_LINK_COMPONENTS
Analysis
Core
ExecutionEngine
InstCombine
Object
OrcJIT
RuntimeDyld
ScalarOpts
Support
TransformUtils
native
)
add_kaleidoscope_chapter(BuildingAJIT-Ch3
toy.cpp
)
export_executable_symbols(BuildingAJIT-Ch3)

View File

@ -1,142 +0,0 @@
//===- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains a simple JIT definition for use in the kaleidoscope tutorials.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_KALEIDOSCOPEJIT_H
#define LLVM_EXECUTIONENGINE_ORC_KALEIDOSCOPEJIT_H
#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "llvm/ExecutionEngine/Orc/IRTransformLayer.h"
#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Mangler.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/GVN.h"
#include <algorithm>
#include <memory>
#include <set>
#include <string>
#include <vector>
namespace llvm {
namespace orc {
class KaleidoscopeJIT {
private:
std::unique_ptr<TargetMachine> TM;
const DataLayout DL;
RTDyldObjectLinkingLayer ObjectLayer;
IRCompileLayer<decltype(ObjectLayer), SimpleCompiler> CompileLayer;
using OptimizeFunction =
std::function<std::shared_ptr<Module>(std::shared_ptr<Module>)>;
IRTransformLayer<decltype(CompileLayer), OptimizeFunction> OptimizeLayer;
std::unique_ptr<JITCompileCallbackManager> CompileCallbackManager;
CompileOnDemandLayer<decltype(OptimizeLayer)> CODLayer;
public:
using ModuleHandle = decltype(CODLayer)::ModuleHandleT;
KaleidoscopeJIT()
: TM(EngineBuilder().selectTarget()), DL(TM->createDataLayout()),
ObjectLayer([]() { return std::make_shared<SectionMemoryManager>(); }),
CompileLayer(ObjectLayer, SimpleCompiler(*TM)),
OptimizeLayer(CompileLayer,
[this](std::shared_ptr<Module> M) {
return optimizeModule(std::move(M));
}),
CompileCallbackManager(
orc::createLocalCompileCallbackManager(TM->getTargetTriple(), 0)),
CODLayer(OptimizeLayer,
[](Function &F) { return std::set<Function*>({&F}); },
*CompileCallbackManager,
orc::createLocalIndirectStubsManagerBuilder(
TM->getTargetTriple())) {
llvm::sys::DynamicLibrary::LoadLibraryPermanently(nullptr);
}
TargetMachine &getTargetMachine() { return *TM; }
ModuleHandle addModule(std::unique_ptr<Module> M) {
// Build our symbol resolver:
// Lambda 1: Look back into the JIT itself to find symbols that are part of
// the same "logical dylib".
// Lambda 2: Search for external symbols in the host process.
auto Resolver = createLambdaResolver(
[&](const std::string &Name) {
if (auto Sym = CODLayer.findSymbol(Name, false))
return Sym;
return JITSymbol(nullptr);
},
[](const std::string &Name) {
if (auto SymAddr =
RTDyldMemoryManager::getSymbolAddressInProcess(Name))
return JITSymbol(SymAddr, JITSymbolFlags::Exported);
return JITSymbol(nullptr);
});
// Add the set to the JIT with the resolver we created above and a newly
// created SectionMemoryManager.
return cantFail(CODLayer.addModule(std::move(M), std::move(Resolver)));
}
JITSymbol findSymbol(const std::string Name) {
std::string MangledName;
raw_string_ostream MangledNameStream(MangledName);
Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
return CODLayer.findSymbol(MangledNameStream.str(), true);
}
void removeModule(ModuleHandle H) {
cantFail(CODLayer.removeModule(H));
}
private:
std::shared_ptr<Module> optimizeModule(std::shared_ptr<Module> M) {
// Create a function pass manager.
auto FPM = llvm::make_unique<legacy::FunctionPassManager>(M.get());
// Add some optimizations.
FPM->add(createInstructionCombiningPass());
FPM->add(createReassociatePass());
FPM->add(createGVNPass());
FPM->add(createCFGSimplificationPass());
FPM->doInitialization();
// Run the optimizations over all functions in the module being added to
// the JIT.
for (auto &F : *M)
FPM->run(F);
return M;
}
};
} // end namespace orc
} // end namespace llvm
#endif // LLVM_EXECUTIONENGINE_ORC_KALEIDOSCOPEJIT_H

Some files were not shown because too many files have changed in this diff Show More