Imported Upstream version 5.18.0.167

Former-commit-id: 289509151e0fee68a1b591a20c9f109c3c789d3a
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2018-10-20 08:25:10 +00:00
parent e19d552987
commit b084638f15
28489 changed files with 184 additions and 3866856 deletions

View File

@ -1,24 +0,0 @@
set(LLVM_LINK_COMPONENTS
AsmPrinter
CodeGen
Core
MC
SelectionDAG
Support
Target
)
set(CodeGenSources
DIEHashTest.cpp
LowLevelTypeTest.cpp
MachineInstrBundleIteratorTest.cpp
MachineInstrTest.cpp
MachineOperandTest.cpp
ScalableVectorMVTsTest.cpp
)
add_llvm_unittest(CodeGenTests
${CodeGenSources}
)
add_subdirectory(GlobalISel)

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
set(LLVM_LINK_COMPONENTS
GlobalISel
CodeGen
)
add_llvm_unittest(GlobalISelTests
LegalizerInfoTest.cpp
)

View File

@ -1,198 +0,0 @@
//===- llvm/unittest/CodeGen/GlobalISel/LegalizerInfoTest.cpp -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "gtest/gtest.h"
using namespace llvm;
// Define a couple of pretty printers to help debugging when things go wrong.
namespace llvm {
std::ostream &
operator<<(std::ostream &OS, const llvm::LegalizerInfo::LegalizeAction Act) {
switch (Act) {
case LegalizerInfo::Lower: OS << "Lower"; break;
case LegalizerInfo::Legal: OS << "Legal"; break;
case LegalizerInfo::NarrowScalar: OS << "NarrowScalar"; break;
case LegalizerInfo::WidenScalar: OS << "WidenScalar"; break;
case LegalizerInfo::FewerElements: OS << "FewerElements"; break;
case LegalizerInfo::MoreElements: OS << "MoreElements"; break;
case LegalizerInfo::Libcall: OS << "Libcall"; break;
case LegalizerInfo::Custom: OS << "Custom"; break;
case LegalizerInfo::Unsupported: OS << "Unsupported"; break;
case LegalizerInfo::NotFound: OS << "NotFound";
}
return OS;
}
std::ostream &
operator<<(std::ostream &OS, const llvm::LLT Ty) {
std::string Repr;
raw_string_ostream SS{Repr};
Ty.print(SS);
OS << SS.str();
return OS;
}
}
namespace {
TEST(LegalizerInfoTest, ScalarRISC) {
using namespace TargetOpcode;
LegalizerInfo L;
// Typical RISCy set of operations based on AArch64.
for (unsigned Op : {G_ADD, G_SUB}) {
for (unsigned Size : {32, 64})
L.setAction({Op, 0, LLT::scalar(Size)}, LegalizerInfo::Legal);
L.setLegalizeScalarToDifferentSizeStrategy(
Op, 0, LegalizerInfo::widenToLargerTypesAndNarrowToLargest);
}
L.computeTables();
for (unsigned opcode : {G_ADD, G_SUB}) {
// Check we infer the correct types and actually do what we're told.
ASSERT_EQ(L.getAction({opcode, LLT::scalar(8)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(16)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(32)}),
std::make_pair(LegalizerInfo::Legal, LLT::scalar(32)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(64)}),
std::make_pair(LegalizerInfo::Legal, LLT::scalar(64)));
// Make sure the default for over-sized types applies.
ASSERT_EQ(L.getAction({opcode, LLT::scalar(128)}),
std::make_pair(LegalizerInfo::NarrowScalar, LLT::scalar(64)));
// Make sure we also handle unusual sizes
ASSERT_EQ(L.getAction({opcode, LLT::scalar(1)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(31)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(33)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(64)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(63)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(64)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(65)}),
std::make_pair(LegalizerInfo::NarrowScalar, LLT::scalar(64)));
}
}
TEST(LegalizerInfoTest, VectorRISC) {
using namespace TargetOpcode;
LegalizerInfo L;
// Typical RISCy set of operations based on ARM.
L.setAction({G_ADD, LLT::vector(8, 8)}, LegalizerInfo::Legal);
L.setAction({G_ADD, LLT::vector(16, 8)}, LegalizerInfo::Legal);
L.setAction({G_ADD, LLT::vector(4, 16)}, LegalizerInfo::Legal);
L.setAction({G_ADD, LLT::vector(8, 16)}, LegalizerInfo::Legal);
L.setAction({G_ADD, LLT::vector(2, 32)}, LegalizerInfo::Legal);
L.setAction({G_ADD, LLT::vector(4, 32)}, LegalizerInfo::Legal);
L.setLegalizeVectorElementToDifferentSizeStrategy(
G_ADD, 0, LegalizerInfo::widenToLargerTypesUnsupportedOtherwise);
L.setAction({G_ADD, 0, LLT::scalar(32)}, LegalizerInfo::Legal);
L.computeTables();
// Check we infer the correct types and actually do what we're told for some
// simple cases.
ASSERT_EQ(L.getAction({G_ADD, LLT::vector(8, 8)}),
std::make_pair(LegalizerInfo::Legal, LLT::vector(8, 8)));
ASSERT_EQ(L.getAction({G_ADD, LLT::vector(8, 7)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::vector(8, 8)));
ASSERT_EQ(L.getAction({G_ADD, LLT::vector(2, 8)}),
std::make_pair(LegalizerInfo::MoreElements, LLT::vector(8, 8)));
ASSERT_EQ(L.getAction({G_ADD, LLT::vector(8, 32)}),
std::make_pair(LegalizerInfo::FewerElements, LLT::vector(4, 32)));
// Check a few non-power-of-2 sizes:
ASSERT_EQ(L.getAction({G_ADD, LLT::vector(3, 3)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::vector(3, 8)));
ASSERT_EQ(L.getAction({G_ADD, LLT::vector(3, 8)}),
std::make_pair(LegalizerInfo::MoreElements, LLT::vector(8, 8)));
}
TEST(LegalizerInfoTest, MultipleTypes) {
using namespace TargetOpcode;
LegalizerInfo L;
LLT p0 = LLT::pointer(0, 64);
LLT s64 = LLT::scalar(64);
// Typical RISCy set of operations based on AArch64.
L.setAction({G_PTRTOINT, 0, s64}, LegalizerInfo::Legal);
L.setAction({G_PTRTOINT, 1, p0}, LegalizerInfo::Legal);
L.setLegalizeScalarToDifferentSizeStrategy(
G_PTRTOINT, 0, LegalizerInfo::widenToLargerTypesAndNarrowToLargest);
L.computeTables();
// Check we infer the correct types and actually do what we're told.
ASSERT_EQ(L.getAction({G_PTRTOINT, 0, s64}),
std::make_pair(LegalizerInfo::Legal, s64));
ASSERT_EQ(L.getAction({G_PTRTOINT, 1, p0}),
std::make_pair(LegalizerInfo::Legal, p0));
// Make sure we also handle unusual sizes
ASSERT_EQ(L.getAction({G_PTRTOINT, 0, LLT::scalar(65)}),
std::make_pair(LegalizerInfo::NarrowScalar, s64));
ASSERT_EQ(L.getAction({G_PTRTOINT, 1, LLT::pointer(0, 32)}),
std::make_pair(LegalizerInfo::Unsupported, LLT::pointer(0, 32)));
}
TEST(LegalizerInfoTest, MultipleSteps) {
using namespace TargetOpcode;
LegalizerInfo L;
LLT s32 = LLT::scalar(32);
LLT s64 = LLT::scalar(64);
L.setLegalizeScalarToDifferentSizeStrategy(
G_UREM, 0, LegalizerInfo::widenToLargerTypesUnsupportedOtherwise);
L.setAction({G_UREM, 0, s32}, LegalizerInfo::Lower);
L.setAction({G_UREM, 0, s64}, LegalizerInfo::Lower);
L.computeTables();
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(16)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(32)}),
std::make_pair(LegalizerInfo::Lower, LLT::scalar(32)));
}
TEST(LegalizerInfoTest, SizeChangeStrategy) {
using namespace TargetOpcode;
LegalizerInfo L;
for (unsigned Size : {1, 8, 16, 32})
L.setAction({G_UREM, 0, LLT::scalar(Size)}, LegalizerInfo::Legal);
L.setLegalizeScalarToDifferentSizeStrategy(
G_UREM, 0, LegalizerInfo::widenToLargerTypesUnsupportedOtherwise);
L.computeTables();
// Check we infer the correct types and actually do what we're told.
for (unsigned Size : {1, 8, 16, 32}) {
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(Size)}),
std::make_pair(LegalizerInfo::Legal, LLT::scalar(Size)));
}
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(2)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(8)));
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(7)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(8)));
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(9)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(16)));
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(17)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(31)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(33)}),
std::make_pair(LegalizerInfo::Unsupported, LLT::scalar(33)));
}
}

View File

@ -1,154 +0,0 @@
//===- llvm/unittest/CodeGen/GlobalISel/LowLevelTypeTest.cpp --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Type.h"
#include "gtest/gtest.h"
using namespace llvm;
// Define a pretty printer to help debugging when things go wrong.
namespace llvm {
std::ostream &
operator<<(std::ostream &OS, const llvm::LLT Ty) {
std::string Repr;
raw_string_ostream SS{Repr};
Ty.print(SS);
OS << SS.str();
return OS;
}
}
namespace {
TEST(LowLevelTypeTest, Scalar) {
LLVMContext C;
DataLayout DL("");
for (unsigned S : {1U, 17U, 32U, 64U, 0xfffffU}) {
const LLT Ty = LLT::scalar(S);
// Test kind.
ASSERT_TRUE(Ty.isValid());
ASSERT_TRUE(Ty.isScalar());
ASSERT_FALSE(Ty.isPointer());
ASSERT_FALSE(Ty.isVector());
// Test sizes.
EXPECT_EQ(S, Ty.getSizeInBits());
EXPECT_EQ(S, Ty.getScalarSizeInBits());
// Test equality operators.
EXPECT_TRUE(Ty == Ty);
EXPECT_FALSE(Ty != Ty);
// Test Type->LLT conversion.
Type *IRTy = IntegerType::get(C, S);
EXPECT_EQ(Ty, getLLTForType(*IRTy, DL));
}
}
TEST(LowLevelTypeTest, Vector) {
LLVMContext C;
DataLayout DL("");
for (unsigned S : {1U, 17U, 32U, 64U, 0xfffU}) {
for (uint16_t Elts : {2U, 3U, 4U, 32U, 0xffU}) {
const LLT STy = LLT::scalar(S);
const LLT VTy = LLT::vector(Elts, S);
// Test the alternative vector().
{
const LLT VSTy = LLT::vector(Elts, STy);
EXPECT_EQ(VTy, VSTy);
}
// Test getElementType().
EXPECT_EQ(STy, VTy.getElementType());
// Test kind.
ASSERT_TRUE(VTy.isValid());
ASSERT_TRUE(VTy.isVector());
ASSERT_FALSE(VTy.isScalar());
ASSERT_FALSE(VTy.isPointer());
// Test sizes.
EXPECT_EQ(S * Elts, VTy.getSizeInBits());
EXPECT_EQ(S, VTy.getScalarSizeInBits());
EXPECT_EQ(Elts, VTy.getNumElements());
// Test equality operators.
EXPECT_TRUE(VTy == VTy);
EXPECT_FALSE(VTy != VTy);
// Test inequality operators on..
// ..different kind.
EXPECT_NE(VTy, STy);
// Test Type->LLT conversion.
Type *IRSTy = IntegerType::get(C, S);
Type *IRTy = VectorType::get(IRSTy, Elts);
EXPECT_EQ(VTy, getLLTForType(*IRTy, DL));
}
}
}
TEST(LowLevelTypeTest, Pointer) {
LLVMContext C;
DataLayout DL("");
for (unsigned AS : {0U, 1U, 127U, 0xffffU}) {
const LLT Ty = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
const LLT VTy = LLT::vector(4, Ty);
// Test kind.
ASSERT_TRUE(Ty.isValid());
ASSERT_TRUE(Ty.isPointer());
ASSERT_FALSE(Ty.isScalar());
ASSERT_FALSE(Ty.isVector());
ASSERT_TRUE(VTy.isValid());
ASSERT_TRUE(VTy.isVector());
ASSERT_TRUE(VTy.getElementType().isPointer());
// Test addressspace.
EXPECT_EQ(AS, Ty.getAddressSpace());
EXPECT_EQ(AS, VTy.getElementType().getAddressSpace());
// Test equality operators.
EXPECT_TRUE(Ty == Ty);
EXPECT_FALSE(Ty != Ty);
EXPECT_TRUE(VTy == VTy);
EXPECT_FALSE(VTy != VTy);
// Test Type->LLT conversion.
Type *IRTy = PointerType::get(IntegerType::get(C, 8), AS);
EXPECT_EQ(Ty, getLLTForType(*IRTy, DL));
Type *IRVTy =
VectorType::get(PointerType::get(IntegerType::get(C, 8), AS), 4);
EXPECT_EQ(VTy, getLLTForType(*IRVTy, DL));
}
}
TEST(LowLevelTypeTest, Invalid) {
const LLT Ty;
ASSERT_FALSE(Ty.isValid());
ASSERT_FALSE(Ty.isScalar());
ASSERT_FALSE(Ty.isPointer());
ASSERT_FALSE(Ty.isVector());
}
}

View File

@ -1,197 +0,0 @@
//===- MachineInstrBundleIteratorTest.cpp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineInstrBundleIterator.h"
#include "llvm/ADT/ilist_node.h"
#include "gtest/gtest.h"
using namespace llvm;
namespace {
struct MyBundledInstr
: public ilist_node<MyBundledInstr, ilist_sentinel_tracking<true>> {
bool isBundledWithPred() const { return true; }
bool isBundledWithSucc() const { return true; }
};
typedef MachineInstrBundleIterator<MyBundledInstr> bundled_iterator;
typedef MachineInstrBundleIterator<const MyBundledInstr> const_bundled_iterator;
typedef MachineInstrBundleIterator<MyBundledInstr, true>
reverse_bundled_iterator;
typedef MachineInstrBundleIterator<const MyBundledInstr, true>
const_reverse_bundled_iterator;
#ifdef GTEST_HAS_DEATH_TEST
#ifndef NDEBUG
TEST(MachineInstrBundleIteratorTest, CheckForBundles) {
MyBundledInstr MBI;
auto I = MBI.getIterator();
auto RI = I.getReverse();
// Confirm that MBI is always considered bundled.
EXPECT_TRUE(MBI.isBundledWithPred());
EXPECT_TRUE(MBI.isBundledWithSucc());
// Confirm that iterators check in their constructor for bundled iterators.
EXPECT_DEATH((void)static_cast<bundled_iterator>(I),
"not legal to initialize");
EXPECT_DEATH((void)static_cast<bundled_iterator>(MBI),
"not legal to initialize");
EXPECT_DEATH((void)static_cast<bundled_iterator>(&MBI),
"not legal to initialize");
EXPECT_DEATH((void)static_cast<const_bundled_iterator>(I),
"not legal to initialize");
EXPECT_DEATH((void)static_cast<const_bundled_iterator>(MBI),
"not legal to initialize");
EXPECT_DEATH((void)static_cast<const_bundled_iterator>(&MBI),
"not legal to initialize");
EXPECT_DEATH((void)static_cast<reverse_bundled_iterator>(RI),
"not legal to initialize");
EXPECT_DEATH((void)static_cast<reverse_bundled_iterator>(MBI),
"not legal to initialize");
EXPECT_DEATH((void)static_cast<reverse_bundled_iterator>(&MBI),
"not legal to initialize");
EXPECT_DEATH((void)static_cast<const_reverse_bundled_iterator>(RI),
"not legal to initialize");
EXPECT_DEATH((void)static_cast<const_reverse_bundled_iterator>(MBI),
"not legal to initialize");
EXPECT_DEATH((void)static_cast<const_reverse_bundled_iterator>(&MBI),
"not legal to initialize");
}
#endif
#endif
TEST(MachineInstrBundleIteratorTest, CompareToBundledMI) {
MyBundledInstr MBI;
const MyBundledInstr &CMBI = MBI;
bundled_iterator I;
const_bundled_iterator CI;
// Confirm that MBI is always considered bundled.
EXPECT_TRUE(MBI.isBundledWithPred());
EXPECT_TRUE(MBI.isBundledWithSucc());
// These invocations will crash when !NDEBUG if a conversion is taking place.
// These checks confirm that comparison operators don't use any conversion
// operators.
ASSERT_FALSE(MBI == I);
ASSERT_FALSE(&MBI == I);
ASSERT_FALSE(CMBI == I);
ASSERT_FALSE(&CMBI == I);
ASSERT_FALSE(I == MBI);
ASSERT_FALSE(I == &MBI);
ASSERT_FALSE(I == CMBI);
ASSERT_FALSE(I == &CMBI);
ASSERT_FALSE(MBI == CI);
ASSERT_FALSE(&MBI == CI);
ASSERT_FALSE(CMBI == CI);
ASSERT_FALSE(&CMBI == CI);
ASSERT_FALSE(CI == MBI);
ASSERT_FALSE(CI == &MBI);
ASSERT_FALSE(CI == CMBI);
ASSERT_FALSE(CI == &CMBI);
ASSERT_FALSE(MBI.getIterator() == I);
ASSERT_FALSE(CMBI.getIterator() == I);
ASSERT_FALSE(I == MBI.getIterator());
ASSERT_FALSE(I == CMBI.getIterator());
ASSERT_FALSE(MBI.getIterator() == CI);
ASSERT_FALSE(CMBI.getIterator() == CI);
ASSERT_FALSE(CI == MBI.getIterator());
ASSERT_FALSE(CI == CMBI.getIterator());
ASSERT_TRUE(MBI != I);
ASSERT_TRUE(&MBI != I);
ASSERT_TRUE(CMBI != I);
ASSERT_TRUE(&CMBI != I);
ASSERT_TRUE(I != MBI);
ASSERT_TRUE(I != &MBI);
ASSERT_TRUE(I != CMBI);
ASSERT_TRUE(I != &CMBI);
ASSERT_TRUE(MBI != CI);
ASSERT_TRUE(&MBI != CI);
ASSERT_TRUE(CMBI != CI);
ASSERT_TRUE(&CMBI != CI);
ASSERT_TRUE(CI != MBI);
ASSERT_TRUE(CI != &MBI);
ASSERT_TRUE(CI != CMBI);
ASSERT_TRUE(CI != &CMBI);
ASSERT_TRUE(MBI.getIterator() != I);
ASSERT_TRUE(CMBI.getIterator() != I);
ASSERT_TRUE(I != MBI.getIterator());
ASSERT_TRUE(I != CMBI.getIterator());
ASSERT_TRUE(MBI.getIterator() != CI);
ASSERT_TRUE(CMBI.getIterator() != CI);
ASSERT_TRUE(CI != MBI.getIterator());
ASSERT_TRUE(CI != CMBI.getIterator());
}
struct MyUnbundledInstr
: ilist_node<MyUnbundledInstr, ilist_sentinel_tracking<true>> {
bool isBundledWithPred() const { return false; }
bool isBundledWithSucc() const { return false; }
};
typedef MachineInstrBundleIterator<MyUnbundledInstr> unbundled_iterator;
typedef MachineInstrBundleIterator<const MyUnbundledInstr>
const_unbundled_iterator;
typedef MachineInstrBundleIterator<MyUnbundledInstr, true>
reverse_unbundled_iterator;
typedef MachineInstrBundleIterator<const MyUnbundledInstr, true>
const_reverse_unbundled_iterator;
TEST(MachineInstrBundleIteratorTest, ReverseConstructor) {
simple_ilist<MyUnbundledInstr, ilist_sentinel_tracking<true>> L;
const auto &CL = L;
MyUnbundledInstr A, B;
L.insert(L.end(), A);
L.insert(L.end(), B);
// Save typing.
typedef MachineInstrBundleIterator<MyUnbundledInstr> iterator;
typedef MachineInstrBundleIterator<MyUnbundledInstr, true> reverse_iterator;
typedef MachineInstrBundleIterator<const MyUnbundledInstr> const_iterator;
typedef MachineInstrBundleIterator<const MyUnbundledInstr, true>
const_reverse_iterator;
// Convert to bundle iterators.
auto begin = [&]() -> iterator { return L.begin(); };
auto end = [&]() -> iterator { return L.end(); };
auto rbegin = [&]() -> reverse_iterator { return L.rbegin(); };
auto rend = [&]() -> reverse_iterator { return L.rend(); };
auto cbegin = [&]() -> const_iterator { return CL.begin(); };
auto cend = [&]() -> const_iterator { return CL.end(); };
auto crbegin = [&]() -> const_reverse_iterator { return CL.rbegin(); };
auto crend = [&]() -> const_reverse_iterator { return CL.rend(); };
// Check conversion values.
EXPECT_EQ(begin(), iterator(rend()));
EXPECT_EQ(++begin(), iterator(++rbegin()));
EXPECT_EQ(end(), iterator(rbegin()));
EXPECT_EQ(rbegin(), reverse_iterator(end()));
EXPECT_EQ(++rbegin(), reverse_iterator(++begin()));
EXPECT_EQ(rend(), reverse_iterator(begin()));
// Check const iterator constructors.
EXPECT_EQ(cbegin(), const_iterator(rend()));
EXPECT_EQ(cbegin(), const_iterator(crend()));
EXPECT_EQ(crbegin(), const_reverse_iterator(end()));
EXPECT_EQ(crbegin(), const_reverse_iterator(cend()));
// Confirm lack of implicit conversions.
static_assert(!std::is_convertible<iterator, reverse_iterator>::value,
"unexpected implicit conversion");
static_assert(!std::is_convertible<reverse_iterator, iterator>::value,
"unexpected implicit conversion");
static_assert(
!std::is_convertible<const_iterator, const_reverse_iterator>::value,
"unexpected implicit conversion");
static_assert(
!std::is_convertible<const_reverse_iterator, const_iterator>::value,
"unexpected implicit conversion");
}
} // end namespace

View File

@ -1,247 +0,0 @@
//===- MachineInstrTest.cpp -----------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "gtest/gtest.h"
using namespace llvm;
namespace {
// Add a few Bogus backend classes so we can create MachineInstrs without
// depending on a real target.
class BogusTargetLowering : public TargetLowering {
public:
BogusTargetLowering(TargetMachine &TM) : TargetLowering(TM) {}
};
class BogusFrameLowering : public TargetFrameLowering {
public:
BogusFrameLowering()
: TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 4, 4) {}
void emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const override {}
void emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const override {}
bool hasFP(const MachineFunction &MF) const override { return false; }
};
class BogusSubtarget : public TargetSubtargetInfo {
public:
BogusSubtarget(TargetMachine &TM)
: TargetSubtargetInfo(Triple(""), "", "", {}, {}, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr, nullptr),
FL(), TL(TM) {}
~BogusSubtarget() override {}
const TargetFrameLowering *getFrameLowering() const override { return &FL; }
const TargetLowering *getTargetLowering() const override { return &TL; }
const TargetInstrInfo *getInstrInfo() const override { return &TII; }
private:
BogusFrameLowering FL;
BogusTargetLowering TL;
TargetInstrInfo TII;
};
class BogusTargetMachine : public LLVMTargetMachine {
public:
BogusTargetMachine()
: LLVMTargetMachine(Target(), "", Triple(""), "", "", TargetOptions(),
Reloc::Static, CodeModel::Small, CodeGenOpt::Default),
ST(*this) {}
~BogusTargetMachine() override {}
const TargetSubtargetInfo *getSubtargetImpl(const Function &) const override {
return &ST;
}
private:
BogusSubtarget ST;
};
std::unique_ptr<BogusTargetMachine> createTargetMachine() {
return llvm::make_unique<BogusTargetMachine>();
}
std::unique_ptr<MachineFunction> createMachineFunction() {
LLVMContext Ctx;
Module M("Module", Ctx);
auto Type = FunctionType::get(Type::getVoidTy(Ctx), false);
auto F = Function::Create(Type, GlobalValue::ExternalLinkage, "Test", &M);
auto TM = createTargetMachine();
unsigned FunctionNum = 42;
MachineModuleInfo MMI(TM.get());
const TargetSubtargetInfo &STI = *TM->getSubtargetImpl(*F);
return llvm::make_unique<MachineFunction>(*F, *TM, STI, FunctionNum, MMI);
}
// This test makes sure that MachineInstr::isIdenticalTo handles Defs correctly
// for various combinations of IgnoreDefs, and also that it is symmetrical.
TEST(IsIdenticalToTest, DifferentDefs) {
auto MF = createMachineFunction();
unsigned short NumOps = 2;
unsigned char NumDefs = 1;
MCOperandInfo OpInfo[] = {
{0, 0, MCOI::OPERAND_REGISTER, 0},
{0, 1 << MCOI::OptionalDef, MCOI::OPERAND_REGISTER, 0}};
MCInstrDesc MCID = {
0, NumOps, NumDefs, 0, 0, 1ULL << MCID::HasOptionalDef,
0, nullptr, nullptr, OpInfo, 0, nullptr};
// Create two MIs with different virtual reg defs and the same uses.
unsigned VirtualDef1 = -42; // The value doesn't matter, but the sign does.
unsigned VirtualDef2 = -43;
unsigned VirtualUse = -44;
auto MI1 = MF->CreateMachineInstr(MCID, DebugLoc());
MI1->addOperand(*MF, MachineOperand::CreateReg(VirtualDef1, /*isDef*/ true));
MI1->addOperand(*MF, MachineOperand::CreateReg(VirtualUse, /*isDef*/ false));
auto MI2 = MF->CreateMachineInstr(MCID, DebugLoc());
MI2->addOperand(*MF, MachineOperand::CreateReg(VirtualDef2, /*isDef*/ true));
MI2->addOperand(*MF, MachineOperand::CreateReg(VirtualUse, /*isDef*/ false));
// Check that they are identical when we ignore virtual register defs, but not
// when we check defs.
ASSERT_FALSE(MI1->isIdenticalTo(*MI2, MachineInstr::CheckDefs));
ASSERT_FALSE(MI2->isIdenticalTo(*MI1, MachineInstr::CheckDefs));
ASSERT_TRUE(MI1->isIdenticalTo(*MI2, MachineInstr::IgnoreVRegDefs));
ASSERT_TRUE(MI2->isIdenticalTo(*MI1, MachineInstr::IgnoreVRegDefs));
// Create two MIs with different virtual reg defs, and a def or use of a
// sentinel register.
unsigned SentinelReg = 0;
auto MI3 = MF->CreateMachineInstr(MCID, DebugLoc());
MI3->addOperand(*MF, MachineOperand::CreateReg(VirtualDef1, /*isDef*/ true));
MI3->addOperand(*MF, MachineOperand::CreateReg(SentinelReg, /*isDef*/ true));
auto MI4 = MF->CreateMachineInstr(MCID, DebugLoc());
MI4->addOperand(*MF, MachineOperand::CreateReg(VirtualDef2, /*isDef*/ true));
MI4->addOperand(*MF, MachineOperand::CreateReg(SentinelReg, /*isDef*/ false));
// Check that they are never identical.
ASSERT_FALSE(MI3->isIdenticalTo(*MI4, MachineInstr::CheckDefs));
ASSERT_FALSE(MI4->isIdenticalTo(*MI3, MachineInstr::CheckDefs));
ASSERT_FALSE(MI3->isIdenticalTo(*MI4, MachineInstr::IgnoreVRegDefs));
ASSERT_FALSE(MI4->isIdenticalTo(*MI3, MachineInstr::IgnoreVRegDefs));
}
// Check that MachineInstrExpressionTrait::isEqual is symmetric and in sync with
// MachineInstrExpressionTrait::getHashValue
void checkHashAndIsEqualMatch(MachineInstr *MI1, MachineInstr *MI2) {
bool IsEqual1 = MachineInstrExpressionTrait::isEqual(MI1, MI2);
bool IsEqual2 = MachineInstrExpressionTrait::isEqual(MI2, MI1);
ASSERT_EQ(IsEqual1, IsEqual2);
auto Hash1 = MachineInstrExpressionTrait::getHashValue(MI1);
auto Hash2 = MachineInstrExpressionTrait::getHashValue(MI2);
ASSERT_EQ(IsEqual1, Hash1 == Hash2);
}
// This test makes sure that MachineInstrExpressionTraits::isEqual is in sync
// with MachineInstrExpressionTraits::getHashValue.
TEST(MachineInstrExpressionTraitTest, IsEqualAgreesWithGetHashValue) {
auto MF = createMachineFunction();
unsigned short NumOps = 2;
unsigned char NumDefs = 1;
MCOperandInfo OpInfo[] = {
{0, 0, MCOI::OPERAND_REGISTER, 0},
{0, 1 << MCOI::OptionalDef, MCOI::OPERAND_REGISTER, 0}};
MCInstrDesc MCID = {
0, NumOps, NumDefs, 0, 0, 1ULL << MCID::HasOptionalDef,
0, nullptr, nullptr, OpInfo, 0, nullptr};
// Define a series of instructions with different kinds of operands and make
// sure that the hash function is consistent with isEqual for various
// combinations of them.
unsigned VirtualDef1 = -42;
unsigned VirtualDef2 = -43;
unsigned VirtualReg = -44;
unsigned SentinelReg = 0;
unsigned PhysicalReg = 45;
auto VD1VU = MF->CreateMachineInstr(MCID, DebugLoc());
VD1VU->addOperand(*MF,
MachineOperand::CreateReg(VirtualDef1, /*isDef*/ true));
VD1VU->addOperand(*MF,
MachineOperand::CreateReg(VirtualReg, /*isDef*/ false));
auto VD2VU = MF->CreateMachineInstr(MCID, DebugLoc());
VD2VU->addOperand(*MF,
MachineOperand::CreateReg(VirtualDef2, /*isDef*/ true));
VD2VU->addOperand(*MF,
MachineOperand::CreateReg(VirtualReg, /*isDef*/ false));
auto VD1SU = MF->CreateMachineInstr(MCID, DebugLoc());
VD1SU->addOperand(*MF,
MachineOperand::CreateReg(VirtualDef1, /*isDef*/ true));
VD1SU->addOperand(*MF,
MachineOperand::CreateReg(SentinelReg, /*isDef*/ false));
auto VD1SD = MF->CreateMachineInstr(MCID, DebugLoc());
VD1SD->addOperand(*MF,
MachineOperand::CreateReg(VirtualDef1, /*isDef*/ true));
VD1SD->addOperand(*MF,
MachineOperand::CreateReg(SentinelReg, /*isDef*/ true));
auto VD2PU = MF->CreateMachineInstr(MCID, DebugLoc());
VD2PU->addOperand(*MF,
MachineOperand::CreateReg(VirtualDef2, /*isDef*/ true));
VD2PU->addOperand(*MF,
MachineOperand::CreateReg(PhysicalReg, /*isDef*/ false));
auto VD2PD = MF->CreateMachineInstr(MCID, DebugLoc());
VD2PD->addOperand(*MF,
MachineOperand::CreateReg(VirtualDef2, /*isDef*/ true));
VD2PD->addOperand(*MF,
MachineOperand::CreateReg(PhysicalReg, /*isDef*/ true));
checkHashAndIsEqualMatch(VD1VU, VD2VU);
checkHashAndIsEqualMatch(VD1VU, VD1SU);
checkHashAndIsEqualMatch(VD1VU, VD1SD);
checkHashAndIsEqualMatch(VD1VU, VD2PU);
checkHashAndIsEqualMatch(VD1VU, VD2PD);
checkHashAndIsEqualMatch(VD2VU, VD1SU);
checkHashAndIsEqualMatch(VD2VU, VD1SD);
checkHashAndIsEqualMatch(VD2VU, VD2PU);
checkHashAndIsEqualMatch(VD2VU, VD2PD);
checkHashAndIsEqualMatch(VD1SU, VD1SD);
checkHashAndIsEqualMatch(VD1SU, VD2PU);
checkHashAndIsEqualMatch(VD1SU, VD2PD);
checkHashAndIsEqualMatch(VD1SD, VD2PU);
checkHashAndIsEqualMatch(VD1SD, VD2PD);
checkHashAndIsEqualMatch(VD2PU, VD2PD);
}
} // end namespace

View File

@ -1,402 +0,0 @@
//===- MachineOperandTest.cpp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSlotTracker.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/raw_ostream.h"
#include "gtest/gtest.h"
using namespace llvm;
namespace {
TEST(MachineOperandTest, ChangeToTargetIndexTest) {
// Creating a MachineOperand to change it to TargetIndex
MachineOperand MO = MachineOperand::CreateImm(50);
// Checking some precondition on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isImm());
ASSERT_TRUE(MO.getImm() == 50);
ASSERT_FALSE(MO.isTargetIndex());
// Changing to TargetIndex with some arbitrary values
// for index, offset and flags.
MO.ChangeToTargetIndex(74, 57, 12);
// Checking that the mutation to TargetIndex happened
// correctly.
ASSERT_TRUE(MO.isTargetIndex());
ASSERT_TRUE(MO.getIndex() == 74);
ASSERT_TRUE(MO.getOffset() == 57);
ASSERT_TRUE(MO.getTargetFlags() == 12);
}
TEST(MachineOperandTest, PrintRegisterMask) {
uint32_t Dummy;
MachineOperand MO = MachineOperand::CreateRegMask(&Dummy);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isRegMask());
ASSERT_TRUE(MO.getRegMask() == &Dummy);
// Print a MachineOperand containing a RegMask. Here we check that without a
// TRI and IntrinsicInfo we still print a less detailed regmask.
std::string str;
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "<regmask ...>");
}
TEST(MachineOperandTest, PrintSubReg) {
// Create a MachineOperand with RegNum=1 and SubReg=5.
MachineOperand MO = MachineOperand::CreateReg(
/*Reg=*/1, /*isDef=*/false, /*isImp=*/false, /*isKill=*/false,
/*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/false,
/*SubReg=*/5, /*isDebug=*/false, /*isInternalRead=*/false);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isReg());
ASSERT_TRUE(MO.getReg() == 1);
ASSERT_TRUE(MO.getSubReg() == 5);
// Print a MachineOperand containing a SubReg. Here we check that without a
// TRI and IntrinsicInfo we can still print the subreg index.
std::string str;
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "%physreg1.subreg5");
}
TEST(MachineOperandTest, PrintCImm) {
LLVMContext Context;
APInt Int(128, UINT64_MAX);
++Int;
ConstantInt *CImm = ConstantInt::get(Context, Int);
// Create a MachineOperand with an Imm=(UINT64_MAX + 1)
MachineOperand MO = MachineOperand::CreateCImm(CImm);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isCImm());
ASSERT_TRUE(MO.getCImm() == CImm);
ASSERT_TRUE(MO.getCImm()->getValue() == Int);
// Print a MachineOperand containing a SubReg. Here we check that without a
// TRI and IntrinsicInfo we can still print the subreg index.
std::string str;
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "i128 18446744073709551616");
}
TEST(MachineOperandTest, PrintSubRegIndex) {
// Create a MachineOperand with an immediate and print it as a subreg index.
MachineOperand MO = MachineOperand::CreateImm(3);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isImm());
ASSERT_TRUE(MO.getImm() == 3);
// Print a MachineOperand containing a SubRegIdx. Here we check that without a
// TRI and IntrinsicInfo we can print the operand as a subreg index.
std::string str;
raw_string_ostream OS(str);
ModuleSlotTracker DummyMST(nullptr);
MachineOperand::printSubregIdx(OS, MO.getImm(), nullptr);
ASSERT_TRUE(OS.str() == "%subreg.3");
}
TEST(MachineOperandTest, PrintCPI) {
// Create a MachineOperand with a constant pool index and print it.
MachineOperand MO = MachineOperand::CreateCPI(0, 8);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isCPI());
ASSERT_TRUE(MO.getIndex() == 0);
ASSERT_TRUE(MO.getOffset() == 8);
// Print a MachineOperand containing a constant pool index and a positive
// offset.
std::string str;
{
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "%const.0 + 8");
}
str.clear();
MO.setOffset(-12);
// Print a MachineOperand containing a constant pool index and a negative
// offset.
{
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "%const.0 - 12");
}
}
TEST(MachineOperandTest, PrintTargetIndexName) {
// Create a MachineOperand with a target index and print it.
MachineOperand MO = MachineOperand::CreateTargetIndex(0, 8);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isTargetIndex());
ASSERT_TRUE(MO.getIndex() == 0);
ASSERT_TRUE(MO.getOffset() == 8);
// Print a MachineOperand containing a target index and a positive offset.
std::string str;
{
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "target-index(<unknown>) + 8");
}
str.clear();
MO.setOffset(-12);
// Print a MachineOperand containing a target index and a negative offset.
{
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "target-index(<unknown>) - 12");
}
}
TEST(MachineOperandTest, PrintJumpTableIndex) {
// Create a MachineOperand with a jump-table index and print it.
MachineOperand MO = MachineOperand::CreateJTI(3);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isJTI());
ASSERT_TRUE(MO.getIndex() == 3);
// Print a MachineOperand containing a jump-table index.
std::string str;
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "%jump-table.3");
}
TEST(MachineOperandTest, PrintExternalSymbol) {
// Create a MachineOperand with an external symbol and print it.
MachineOperand MO = MachineOperand::CreateES("foo");
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isSymbol());
ASSERT_TRUE(MO.getSymbolName() == StringRef("foo"));
// Print a MachineOperand containing an external symbol and no offset.
std::string str;
{
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "$foo");
}
str.clear();
MO.setOffset(12);
// Print a MachineOperand containing an external symbol and a positive offset.
{
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "$foo + 12");
}
str.clear();
MO.setOffset(-12);
// Print a MachineOperand containing an external symbol and a negative offset.
{
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "$foo - 12");
}
}
TEST(MachineOperandTest, PrintGlobalAddress) {
LLVMContext Ctx;
Module M("MachineOperandGVTest", Ctx);
M.getOrInsertGlobal("foo", Type::getInt32Ty(Ctx));
GlobalValue *GV = M.getNamedValue("foo");
// Create a MachineOperand with a global address and a positive offset and
// print it.
MachineOperand MO = MachineOperand::CreateGA(GV, 12);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isGlobal());
ASSERT_TRUE(MO.getGlobal() == GV);
ASSERT_TRUE(MO.getOffset() == 12);
std::string str;
// Print a MachineOperand containing a global address and a positive offset.
{
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "@foo + 12");
}
str.clear();
MO.setOffset(-12);
// Print a MachineOperand containing a global address and a negative offset.
{
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "@foo - 12");
}
}
TEST(MachineOperandTest, PrintRegisterLiveOut) {
// Create a MachineOperand with a register live out list and print it.
uint32_t Mask = 0;
MachineOperand MO = MachineOperand::CreateRegLiveOut(&Mask);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isRegLiveOut());
ASSERT_TRUE(MO.getRegLiveOut() == &Mask);
std::string str;
// Print a MachineOperand containing a register live out list without a TRI.
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "liveout(<unknown>)");
}
TEST(MachineOperandTest, PrintMetadata) {
LLVMContext Ctx;
Module M("MachineOperandMDNodeTest", Ctx);
NamedMDNode *MD = M.getOrInsertNamedMetadata("namedmd");
ModuleSlotTracker DummyMST(&M);
Metadata *MDS = MDString::get(Ctx, "foo");
MDNode *Node = MDNode::get(Ctx, MDS);
MD->addOperand(Node);
// Create a MachineOperand with a metadata and print it.
MachineOperand MO = MachineOperand::CreateMetadata(Node);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isMetadata());
ASSERT_TRUE(MO.getMetadata() == Node);
std::string str;
// Print a MachineOperand containing a metadata node.
raw_string_ostream OS(str);
MO.print(OS, DummyMST, LLT{}, false, false, 0, /*TRI=*/nullptr,
/*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "!0");
}
TEST(MachineOperandTest, PrintMCSymbol) {
MCAsmInfo MAI;
MCContext Ctx(&MAI, /*MRI=*/nullptr, /*MOFI=*/nullptr);
MCSymbol *Sym = Ctx.getOrCreateSymbol("foo");
// Create a MachineOperand with a metadata and print it.
MachineOperand MO = MachineOperand::CreateMCSymbol(Sym);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isMCSymbol());
ASSERT_TRUE(MO.getMCSymbol() == Sym);
std::string str;
// Print a MachineOperand containing a metadata node.
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "<mcsymbol foo>");
}
TEST(MachineOperandTest, PrintCFI) {
// Create a MachineOperand with a CFI index but no function and print it.
MachineOperand MO = MachineOperand::CreateCFIIndex(8);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isCFIIndex());
ASSERT_TRUE(MO.getCFIIndex() == 8);
std::string str;
// Print a MachineOperand containing a CFI Index node but no machine function
// attached to it.
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "<cfi directive>");
}
TEST(MachineOperandTest, PrintIntrinsicID) {
// Create a MachineOperand with a generic intrinsic ID.
MachineOperand MO = MachineOperand::CreateIntrinsicID(Intrinsic::bswap);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isIntrinsicID());
ASSERT_TRUE(MO.getIntrinsicID() == Intrinsic::bswap);
std::string str;
{
// Print a MachineOperand containing a generic intrinsic ID.
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "intrinsic(@llvm.bswap)");
}
str.clear();
// Set a target-specific intrinsic.
MO = MachineOperand::CreateIntrinsicID((Intrinsic::ID)-1);
{
// Print a MachineOperand containing a target-specific intrinsic ID but not
// IntrinsicInfo.
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "intrinsic(4294967295)");
}
}
TEST(MachineOperandTest, PrintPredicate) {
// Create a MachineOperand with a generic intrinsic ID.
MachineOperand MO = MachineOperand::CreatePredicate(CmpInst::ICMP_EQ);
// Checking some preconditions on the newly created
// MachineOperand.
ASSERT_TRUE(MO.isPredicate());
ASSERT_TRUE(MO.getPredicate() == CmpInst::ICMP_EQ);
std::string str;
// Print a MachineOperand containing a int predicate ICMP_EQ.
raw_string_ostream OS(str);
MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
ASSERT_TRUE(OS.str() == "intpred(eq)");
}
} // end namespace

View File

@ -1,88 +0,0 @@
//===-------- llvm/unittest/CodeGen/ScalableVectorMVTsTest.cpp ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/LLVMContext.h"
#include "gtest/gtest.h"
using namespace llvm;
namespace {
TEST(ScalableVectorMVTsTest, IntegerMVTs) {
for (auto VecTy : MVT::integer_scalable_vector_valuetypes()) {
ASSERT_TRUE(VecTy.isValid());
ASSERT_TRUE(VecTy.isInteger());
ASSERT_TRUE(VecTy.isVector());
ASSERT_TRUE(VecTy.isScalableVector());
ASSERT_TRUE(VecTy.getScalarType().isValid());
ASSERT_FALSE(VecTy.isFloatingPoint());
}
}
TEST(ScalableVectorMVTsTest, FloatMVTs) {
for (auto VecTy : MVT::fp_scalable_vector_valuetypes()) {
ASSERT_TRUE(VecTy.isValid());
ASSERT_TRUE(VecTy.isFloatingPoint());
ASSERT_TRUE(VecTy.isVector());
ASSERT_TRUE(VecTy.isScalableVector());
ASSERT_TRUE(VecTy.getScalarType().isValid());
ASSERT_FALSE(VecTy.isInteger());
}
}
TEST(ScalableVectorMVTsTest, HelperFuncs) {
LLVMContext Ctx;
// Create with scalable flag
EVT Vnx4i32 = EVT::getVectorVT(Ctx, MVT::i32, 4, /*Scalable=*/true);
ASSERT_TRUE(Vnx4i32.isScalableVector());
// Create with separate MVT::ElementCount
auto EltCnt = MVT::ElementCount(2, true);
EVT Vnx2i32 = EVT::getVectorVT(Ctx, MVT::i32, EltCnt);
ASSERT_TRUE(Vnx2i32.isScalableVector());
// Create with inline MVT::ElementCount
EVT Vnx2i64 = EVT::getVectorVT(Ctx, MVT::i64, {2, true});
ASSERT_TRUE(Vnx2i64.isScalableVector());
// Check that changing scalar types/element count works
EXPECT_EQ(Vnx2i32.widenIntegerVectorElementType(Ctx), Vnx2i64);
EXPECT_EQ(Vnx4i32.getHalfNumVectorElementsVT(Ctx), Vnx2i32);
// Check that overloaded '*' and '/' operators work
EXPECT_EQ(EVT::getVectorVT(Ctx, MVT::i64, EltCnt * 2), MVT::nxv4i64);
EXPECT_EQ(EVT::getVectorVT(Ctx, MVT::i64, EltCnt / 2), MVT::nxv1i64);
// Check that float->int conversion works
EVT Vnx2f64 = EVT::getVectorVT(Ctx, MVT::f64, {2, true});
EXPECT_EQ(Vnx2f64.changeTypeToInteger(), Vnx2i64);
// Check fields inside MVT::ElementCount
EltCnt = Vnx4i32.getVectorElementCount();
EXPECT_EQ(EltCnt.Min, 4U);
ASSERT_TRUE(EltCnt.Scalable);
// Check that fixed-length vector types aren't scalable.
EVT V8i32 = EVT::getVectorVT(Ctx, MVT::i32, 8);
ASSERT_FALSE(V8i32.isScalableVector());
EVT V4f64 = EVT::getVectorVT(Ctx, MVT::f64, {4, false});
ASSERT_FALSE(V4f64.isScalableVector());
// Check that MVT::ElementCount works for fixed-length types.
EltCnt = V8i32.getVectorElementCount();
EXPECT_EQ(EltCnt.Min, 8U);
ASSERT_FALSE(EltCnt.Scalable);
}
}