Imported Upstream version 5.18.0.167

Former-commit-id: 289509151e0fee68a1b591a20c9f109c3c789d3a
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2018-10-20 08:25:10 +00:00
parent e19d552987
commit b084638f15
28489 changed files with 184 additions and 3866856 deletions

View File

@ -1,440 +0,0 @@
; RUN: opt -S -o - -mtriple=armv7-apple-ios7.0 -atomic-expand -codegen-opt-level=1 %s | FileCheck %s
define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) {
; CHECK-LABEL: @test_atomic_xchg_i8
; CHECK-NOT: dmb
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[NEWVAL32:%.*]] = zext i8 %xchgend to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK-NOT: dmb
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw xchg i8* %ptr, i8 %xchgend monotonic
ret i8 %res
}
define i16 @test_atomic_add_i16(i16* %ptr, i16 %addend) {
; CHECK-LABEL: @test_atomic_add_i16
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i16(i16* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i16
; CHECK: [[NEWVAL:%.*]] = add i16 [[OLDVAL]], %addend
; CHECK: [[NEWVAL32:%.*]] = zext i16 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: ret i16 [[OLDVAL]]
%res = atomicrmw add i16* %ptr, i16 %addend seq_cst
ret i16 %res
}
define i32 @test_atomic_sub_i32(i32* %ptr, i32 %subend) {
; CHECK-LABEL: @test_atomic_sub_i32
; CHECK-NOT: dmb
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %ptr)
; CHECK: [[NEWVAL:%.*]] = sub i32 [[OLDVAL]], %subend
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 [[NEWVAL]], i32* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: ret i32 [[OLDVAL]]
%res = atomicrmw sub i32* %ptr, i32 %subend acquire
ret i32 %res
}
define i8 @test_atomic_and_i8(i8* %ptr, i8 %andend) {
; CHECK-LABEL: @test_atomic_and_i8
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[NEWVAL:%.*]] = and i8 [[OLDVAL]], %andend
; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK-NOT: dmb
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw and i8* %ptr, i8 %andend release
ret i8 %res
}
define i16 @test_atomic_nand_i16(i16* %ptr, i16 %nandend) {
; CHECK-LABEL: @test_atomic_nand_i16
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i16(i16* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i16
; CHECK: [[NEWVAL_TMP:%.*]] = and i16 [[OLDVAL]], %nandend
; CHECK: [[NEWVAL:%.*]] = xor i16 [[NEWVAL_TMP]], -1
; CHECK: [[NEWVAL32:%.*]] = zext i16 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: ret i16 [[OLDVAL]]
%res = atomicrmw nand i16* %ptr, i16 %nandend seq_cst
ret i16 %res
}
define i64 @test_atomic_or_i64(i64* %ptr, i64 %orend) {
; CHECK-LABEL: @test_atomic_or_i64
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldrexd(i8* [[PTR8]])
; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
; CHECK: [[NEWVAL:%.*]] = or i64 [[OLDVAL]], %orend
; CHECK: [[NEWLO:%.*]] = trunc i64 [[NEWVAL]] to i32
; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 [[NEWVAL]], 32
; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: ret i64 [[OLDVAL]]
%res = atomicrmw or i64* %ptr, i64 %orend seq_cst
ret i64 %res
}
define i8 @test_atomic_xor_i8(i8* %ptr, i8 %xorend) {
; CHECK-LABEL: @test_atomic_xor_i8
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[NEWVAL:%.*]] = xor i8 [[OLDVAL]], %xorend
; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw xor i8* %ptr, i8 %xorend seq_cst
ret i8 %res
}
define i8 @test_atomic_max_i8(i8* %ptr, i8 %maxend) {
; CHECK-LABEL: @test_atomic_max_i8
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[WANT_OLD:%.*]] = icmp sgt i8 [[OLDVAL]], %maxend
; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %maxend
; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw max i8* %ptr, i8 %maxend seq_cst
ret i8 %res
}
define i8 @test_atomic_min_i8(i8* %ptr, i8 %minend) {
; CHECK-LABEL: @test_atomic_min_i8
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[WANT_OLD:%.*]] = icmp sle i8 [[OLDVAL]], %minend
; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %minend
; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw min i8* %ptr, i8 %minend seq_cst
ret i8 %res
}
define i8 @test_atomic_umax_i8(i8* %ptr, i8 %umaxend) {
; CHECK-LABEL: @test_atomic_umax_i8
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[WANT_OLD:%.*]] = icmp ugt i8 [[OLDVAL]], %umaxend
; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %umaxend
; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw umax i8* %ptr, i8 %umaxend seq_cst
ret i8 %res
}
define i8 @test_atomic_umin_i8(i8* %ptr, i8 %uminend) {
; CHECK-LABEL: @test_atomic_umin_i8
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[WANT_OLD:%.*]] = icmp ule i8 [[OLDVAL]], %uminend
; CHECK: [[NEWVAL:%.*]] = select i1 [[WANT_OLD]], i8 [[OLDVAL]], i8 %uminend
; CHECK: [[NEWVAL32:%.*]] = zext i8 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw umin i8* %ptr, i8 %uminend seq_cst
ret i8 %res
}
define i8 @test_cmpxchg_i8_seqcst_seqcst(i8* %ptr, i8 %desired, i8 %newval) {
; CHECK-LABEL: @test_cmpxchg_i8_seqcst_seqcst
; CHECK: br label %[[START:.*]]
; CHECK: [[START]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i8 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[FENCED_STORE:.*]], label %[[NO_STORE_BB:.*]]
; CHECK: [[FENCED_STORE]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[LOADED_LOOP:%.*]] = phi i8 [ [[OLDVAL]], %[[FENCED_STORE]] ], [ [[OLDVAL_LOOP:%.*]], %[[RELEASED_LOAD:.*]] ]
; CHECK: [[NEWVAL32:%.*]] = zext i8 %newval to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[RELEASED_LOAD]]
; CHECK: [[RELEASED_LOAD]]:
; CHECK: [[OLDVAL32_LOOP:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL_LOOP]] = trunc i32 [[OLDVAL32_LOOP]] to i8
; CHECK: [[SHOULD_STORE_LOOP:%.*]] = icmp eq i8 [[OLDVAL_LOOP]], %desired
; CHECK: br i1 [[SHOULD_STORE_LOOP]], label %[[LOOP]], label %[[NO_STORE_BB]]
; CHECK: [[SUCCESS_BB]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[DONE:.*]]
; CHECK: [[NO_STORE_BB]]:
; CHECK-NEXT: [[LOADED_NO_STORE:%.*]] = phi i8 [ [[OLDVAL]], %[[START]] ], [ [[OLDVAL_LOOP]], %[[RELEASED_LOAD]] ]
; CHECK-NEXT: call void @llvm.arm.clrex()
; CHECK-NEXT: br label %[[FAILURE_BB:.*]]
; CHECK: [[FAILURE_BB]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[DONE]]
; CHECK: [[DONE]]:
; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
; CHECK: [[LOADED:%.*]] = phi i8 [ [[LOADED_LOOP]], %[[SUCCESS_BB]] ], [ [[LOADED_NO_STORE]], %[[FAILURE_BB]] ]
; CHECK: ret i8 [[LOADED]]
%pairold = cmpxchg i8* %ptr, i8 %desired, i8 %newval seq_cst seq_cst
%old = extractvalue { i8, i1 } %pairold, 0
ret i8 %old
}
define i16 @test_cmpxchg_i16_seqcst_monotonic(i16* %ptr, i16 %desired, i16 %newval) {
; CHECK-LABEL: @test_cmpxchg_i16_seqcst_monotonic
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i16(i16* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i16
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i16 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[FENCED_STORE:.*]], label %[[NO_STORE_BB:.*]]
; CHECK: [[FENCED_STORE]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[LOADED_LOOP:%.*]] = phi i16 [ [[OLDVAL]], %[[FENCED_STORE]] ], [ [[OLDVAL_LOOP:%.*]], %[[RELEASED_LOAD:.*]] ]
; CHECK: [[NEWVAL32:%.*]] = zext i16 %newval to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[RELEASED_LOAD:.*]]
; CHECK: [[RELEASED_LOAD]]:
; CHECK: [[OLDVAL32_LOOP:%.*]] = call i32 @llvm.arm.ldrex.p0i16(i16* %ptr)
; CHECK: [[OLDVAL_LOOP]] = trunc i32 [[OLDVAL32_LOOP]] to i16
; CHECK: [[SHOULD_STORE_LOOP:%.*]] = icmp eq i16 [[OLDVAL_LOOP]], %desired
; CHECK: br i1 [[SHOULD_STORE_LOOP]], label %[[LOOP]], label %[[NO_STORE_BB]]
; CHECK: [[SUCCESS_BB]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[DONE:.*]]
; CHECK: [[NO_STORE_BB]]:
; CHECK-NEXT: [[LOADED_NO_STORE:%.*]] = phi i16 [ [[OLDVAL]], %[[START]] ], [ [[OLDVAL_LOOP]], %[[RELEASED_LOAD]] ]
; CHECK-NEXT: call void @llvm.arm.clrex()
; CHECK-NEXT: br label %[[FAILURE_BB:.*]]
; CHECK: [[FAILURE_BB]]:
; CHECK-NOT: dmb
; CHECK: br label %[[DONE]]
; CHECK: [[DONE]]:
; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
; CHECK: [[LOADED:%.*]] = phi i16 [ [[LOADED_LOOP]], %[[SUCCESS_BB]] ], [ [[LOADED_NO_STORE]], %[[FAILURE_BB]] ]
; CHECK: ret i16 [[LOADED]]
%pairold = cmpxchg i16* %ptr, i16 %desired, i16 %newval seq_cst monotonic
%old = extractvalue { i16, i1 } %pairold, 0
ret i16 %old
}
define i32 @test_cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %desired, i32 %newval) {
; CHECK-LABEL: @test_cmpxchg_i32_acquire_acquire
; CHECK-NOT: dmb
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %ptr)
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[NO_STORE_BB:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %newval, i32* %ptr)
; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
; CHECK: [[SUCCESS_BB]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[DONE:.*]]
; CHECK: [[NO_STORE_BB]]:
; CHECK-NEXT: call void @llvm.arm.clrex()
; CHECK-NEXT: br label %[[FAILURE_BB:.*]]
; CHECK: [[FAILURE_BB]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[DONE]]
; CHECK: [[DONE]]:
; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
; CHECK: ret i32 [[OLDVAL]]
%pairold = cmpxchg i32* %ptr, i32 %desired, i32 %newval acquire acquire
%old = extractvalue { i32, i1 } %pairold, 0
ret i32 %old
}
define i64 @test_cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %desired, i64 %newval) {
; CHECK-LABEL: @test_cmpxchg_i64_monotonic_monotonic
; CHECK-NOT: dmb
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldrexd(i8* [[PTR8]])
; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i64 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[NO_STORE_BB:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[NEWLO:%.*]] = trunc i64 %newval to i32
; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 %newval, 32
; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
; CHECK: [[SUCCESS_BB]]:
; CHECK-NOT: dmb
; CHECK: br label %[[DONE:.*]]
; CHECK: [[NO_STORE_BB]]:
; CHECK-NEXT: call void @llvm.arm.clrex()
; CHECK-NEXT: br label %[[FAILURE_BB:.*]]
; CHECK: [[FAILURE_BB]]:
; CHECK-NOT: dmb
; CHECK: br label %[[DONE]]
; CHECK: [[DONE]]:
; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
; CHECK: ret i64 [[OLDVAL]]
%pairold = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic
%old = extractvalue { i64, i1 } %pairold, 0
ret i64 %old
}
define i32 @test_cmpxchg_minsize(i32* %addr, i32 %desired, i32 %new) minsize {
; CHECK-LABEL: @test_cmpxchg_minsize
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[START:.*]]
; CHECK: [[START]]:
; CHECK: [[LOADED:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %addr)
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[LOADED]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[NO_STORE_BB:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[STREX:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %new, i32* %addr)
; CHECK: [[SUCCESS:%.*]] = icmp eq i32 [[STREX]], 0
; CHECK: br i1 [[SUCCESS]], label %[[SUCCESS_BB:.*]], label %[[START]]
; CHECK: [[SUCCESS_BB]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[END:.*]]
; CHECK: [[NO_STORE_BB]]:
; CHECK: call void @llvm.arm.clrex()
; CHECK: br label %[[FAILURE_BB]]
; CHECK: [[FAILURE_BB]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[END]]
; CHECK: [[END]]:
; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
; CHECK: ret i32 [[LOADED]]
%pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
%oldval = extractvalue { i32, i1 } %pair, 0
ret i32 %oldval
}

View File

@ -1,242 +0,0 @@
; RUN: opt -S -o - -mtriple=armv8-linux-gnueabihf -atomic-expand %s -codegen-opt-level=1 | FileCheck %s
define i8 @test_atomic_xchg_i8(i8* %ptr, i8 %xchgend) {
; CHECK-LABEL: @test_atomic_xchg_i8
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i8
; CHECK: [[NEWVAL32:%.*]] = zext i8 %xchgend to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK-NOT: fence
; CHECK: ret i8 [[OLDVAL]]
%res = atomicrmw xchg i8* %ptr, i8 %xchgend monotonic
ret i8 %res
}
define i16 @test_atomic_add_i16(i16* %ptr, i16 %addend) {
; CHECK-LABEL: @test_atomic_add_i16
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldaex.p0i16(i16* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 [[OLDVAL32]] to i16
; CHECK: [[NEWVAL:%.*]] = add i16 [[OLDVAL]], %addend
; CHECK: [[NEWVAL32:%.*]] = zext i16 [[NEWVAL]] to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK-NOT: fence
; CHECK: ret i16 [[OLDVAL]]
%res = atomicrmw add i16* %ptr, i16 %addend seq_cst
ret i16 %res
}
define i32 @test_atomic_sub_i32(i32* %ptr, i32 %subend) {
; CHECK-LABEL: @test_atomic_sub_i32
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldaex.p0i32(i32* %ptr)
; CHECK: [[NEWVAL:%.*]] = sub i32 [[OLDVAL]], %subend
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 [[NEWVAL]], i32* %ptr)
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK-NOT: fence
; CHECK: ret i32 [[OLDVAL]]
%res = atomicrmw sub i32* %ptr, i32 %subend acquire
ret i32 %res
}
define i64 @test_atomic_or_i64(i64* %ptr, i64 %orend) {
; CHECK-LABEL: @test_atomic_or_i64
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldaexd(i8* [[PTR8]])
; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
; CHECK: [[NEWVAL:%.*]] = or i64 [[OLDVAL]], %orend
; CHECK: [[NEWLO:%.*]] = trunc i64 [[NEWVAL]] to i32
; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 [[NEWVAL]], 32
; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
; CHECK: [[TST:%.*]] = icmp ne i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[LOOP]], label %[[END:.*]]
; CHECK: [[END]]:
; CHECK-NOT: fence
; CHECK: ret i64 [[OLDVAL]]
%res = atomicrmw or i64* %ptr, i64 %orend seq_cst
ret i64 %res
}
define i8 @test_cmpxchg_i8_seqcst_seqcst(i8* %ptr, i8 %desired, i8 %newval) {
; CHECK-LABEL: @test_cmpxchg_i8_seqcst_seqcst
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldaex.p0i8(i8* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i8
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i8 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[NO_STORE_BB:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[NEWVAL32:%.*]] = zext i8 %newval to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlex.p0i8(i32 [[NEWVAL32]], i8* %ptr)
; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
; CHECK: [[SUCCESS_BB]]:
; CHECK-NOT: fence_cst
; CHECK: br label %[[DONE:.*]]
; CHECK: [[NO_STORE_BB]]:
; CHECK-NEXT: call void @llvm.arm.clrex()
; CHECK-NEXT: br label %[[FAILURE_BB:.*]]
; CHECK: [[FAILURE_BB]]:
; CHECK-NOT: fence_cst
; CHECK: br label %[[DONE]]
; CHECK: [[DONE]]:
; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
; CHECK: ret i8 [[OLDVAL]]
%pairold = cmpxchg i8* %ptr, i8 %desired, i8 %newval seq_cst seq_cst
%old = extractvalue { i8, i1 } %pairold, 0
ret i8 %old
}
define i16 @test_cmpxchg_i16_seqcst_monotonic(i16* %ptr, i16 %desired, i16 %newval) {
; CHECK-LABEL: @test_cmpxchg_i16_seqcst_monotonic
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL32:%.*]] = call i32 @llvm.arm.ldaex.p0i16(i16* %ptr)
; CHECK: [[OLDVAL:%.*]] = trunc i32 %1 to i16
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i16 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[NO_STORE_BB:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[NEWVAL32:%.*]] = zext i16 %newval to i32
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.stlex.p0i16(i32 [[NEWVAL32]], i16* %ptr)
; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
; CHECK: [[SUCCESS_BB]]:
; CHECK-NOT: fence
; CHECK: br label %[[DONE:.*]]
; CHECK: [[NO_STORE_BB]]:
; CHECK-NEXT: call void @llvm.arm.clrex()
; CHECK-NEXT: br label %[[FAILURE_BB:.*]]
; CHECK: [[FAILURE_BB]]:
; CHECK-NOT: fence
; CHECK: br label %[[DONE]]
; CHECK: [[DONE]]:
; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
; CHECK: ret i16 [[OLDVAL]]
%pairold = cmpxchg i16* %ptr, i16 %desired, i16 %newval seq_cst monotonic
%old = extractvalue { i16, i1 } %pairold, 0
ret i16 %old
}
define i32 @test_cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %desired, i32 %newval) {
; CHECK-LABEL: @test_cmpxchg_i32_acquire_acquire
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[OLDVAL:%.*]] = call i32 @llvm.arm.ldaex.p0i32(i32* %ptr)
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[NO_STORE_BB:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %newval, i32* %ptr)
; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
; CHECK: [[SUCCESS_BB]]:
; CHECK-NOT: fence_cst
; CHECK: br label %[[DONE:.*]]
; CHECK: [[NO_STORE_BB]]:
; CHECK-NEXT: call void @llvm.arm.clrex()
; CHECK-NEXT: br label %[[FAILURE_BB:.*]]
; CHECK: [[FAILURE_BB]]:
; CHECK-NOT: fence_cst
; CHECK: br label %[[DONE]]
; CHECK: [[DONE]]:
; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
; CHECK: ret i32 [[OLDVAL]]
%pairold = cmpxchg i32* %ptr, i32 %desired, i32 %newval acquire acquire
%old = extractvalue { i32, i1 } %pairold, 0
ret i32 %old
}
define i64 @test_cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %desired, i64 %newval) {
; CHECK-LABEL: @test_cmpxchg_i64_monotonic_monotonic
; CHECK-NOT: fence
; CHECK: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[LOHI:%.*]] = call { i32, i32 } @llvm.arm.ldrexd(i8* [[PTR8]])
; CHECK: [[LO:%.*]] = extractvalue { i32, i32 } [[LOHI]], 0
; CHECK: [[HI:%.*]] = extractvalue { i32, i32 } [[LOHI]], 1
; CHECK: [[LO64:%.*]] = zext i32 [[LO]] to i64
; CHECK: [[HI64_TMP:%.*]] = zext i32 [[HI]] to i64
; CHECK: [[HI64:%.*]] = shl i64 [[HI64_TMP]], 32
; CHECK: [[OLDVAL:%.*]] = or i64 [[LO64]], [[HI64]]
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i64 [[OLDVAL]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[NO_STORE_BB:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[NEWLO:%.*]] = trunc i64 %newval to i32
; CHECK: [[NEWHI_TMP:%.*]] = lshr i64 %newval, 32
; CHECK: [[NEWHI:%.*]] = trunc i64 [[NEWHI_TMP]] to i32
; CHECK: [[PTR8:%.*]] = bitcast i64* %ptr to i8*
; CHECK: [[TRYAGAIN:%.*]] = call i32 @llvm.arm.strexd(i32 [[NEWLO]], i32 [[NEWHI]], i8* [[PTR8]])
; CHECK: [[TST:%.*]] = icmp eq i32 [[TRYAGAIN]], 0
; CHECK: br i1 [[TST]], label %[[SUCCESS_BB:.*]], label %[[LOOP]]
; CHECK: [[SUCCESS_BB]]:
; CHECK-NOT: fence_cst
; CHECK: br label %[[DONE:.*]]
; CHECK: [[NO_STORE_BB]]:
; CHECK-NEXT: call void @llvm.arm.clrex()
; CHECK-NEXT: br label %[[FAILURE_BB:.*]]
; CHECK: [[FAILURE_BB]]:
; CHECK-NOT: fence_cst
; CHECK: br label %[[DONE]]
; CHECK: [[DONE]]:
; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
; CHECK: ret i64 [[OLDVAL]]
%pairold = cmpxchg i64* %ptr, i64 %desired, i64 %newval monotonic monotonic
%old = extractvalue { i64, i1 } %pairold, 0
ret i64 %old
}

View File

@ -1,155 +0,0 @@
; RUN: opt -atomic-expand -codegen-opt-level=1 -S -mtriple=thumbv7s-apple-ios7.0 %s | FileCheck %s
define i32 @test_cmpxchg_seq_cst(i32* %addr, i32 %desired, i32 %new) {
; CHECK-LABEL: @test_cmpxchg_seq_cst
; Intrinsic for "dmb ishst" is then expected
; CHECK: br label %[[START:.*]]
; CHECK: [[START]]:
; CHECK: [[LOADED:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %addr)
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[LOADED]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[FENCED_STORE:.*]], label %[[NO_STORE_BB:.*]]
; CHECK: [[FENCED_STORE]]:
; CHECK: call void @llvm.arm.dmb(i32 10)
; CHECK: br label %[[TRY_STORE:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[STREX:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %new, i32* %addr)
; CHECK: [[SUCCESS:%.*]] = icmp eq i32 [[STREX]], 0
; CHECK: br i1 [[SUCCESS]], label %[[SUCCESS_BB:.*]], label %[[FAILURE_BB:.*]]
; CHECK: [[SUCCESS_BB]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[END:.*]]
; CHECK: [[NO_STORE_BB]]:
; CHECK: call void @llvm.arm.clrex()
; CHECK: br label %[[FAILURE_BB]]
; CHECK: [[FAILURE_BB]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[END]]
; CHECK: [[END]]:
; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
; CHECK: ret i32 [[LOADED]]
%pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
%oldval = extractvalue { i32, i1 } %pair, 0
ret i32 %oldval
}
define i1 @test_cmpxchg_weak_fail(i32* %addr, i32 %desired, i32 %new) {
; CHECK-LABEL: @test_cmpxchg_weak_fail
; CHECK: br label %[[START:.*]]
; CHECK: [[START]]:
; CHECK: [[LOADED:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %addr)
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[LOADED]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[FENCED_STORE:.*]], label %[[NO_STORE_BB:.*]]
; CHECK: [[FENCED_STORE]]:
; CHECK: call void @llvm.arm.dmb(i32 10)
; CHECK: br label %[[TRY_STORE:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[STREX:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %new, i32* %addr)
; CHECK: [[SUCCESS:%.*]] = icmp eq i32 [[STREX]], 0
; CHECK: br i1 [[SUCCESS]], label %[[SUCCESS_BB:.*]], label %[[FAILURE_BB:.*]]
; CHECK: [[SUCCESS_BB]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[END:.*]]
; CHECK: [[NO_STORE_BB]]:
; CHECK: call void @llvm.arm.clrex()
; CHECK: br label %[[FAILURE_BB]]
; CHECK: [[FAILURE_BB]]:
; CHECK-NOT: dmb
; CHECK: br label %[[END]]
; CHECK: [[END]]:
; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
; CHECK: ret i1 [[SUCCESS]]
%pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic
%oldval = extractvalue { i32, i1 } %pair, 1
ret i1 %oldval
}
define i32 @test_cmpxchg_monotonic(i32* %addr, i32 %desired, i32 %new) {
; CHECK-LABEL: @test_cmpxchg_monotonic
; CHECK-NOT: dmb
; CHECK: br label %[[START:.*]]
; CHECK: [[START]]:
; CHECK: [[LOADED:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %addr)
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[LOADED]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[TRY_STORE:.*]], label %[[NO_STORE_BB:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[STREX:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %new, i32* %addr)
; CHECK: [[SUCCESS:%.*]] = icmp eq i32 [[STREX]], 0
; CHECK: br i1 [[SUCCESS]], label %[[SUCCESS_BB:.*]], label %[[FAILURE_BB:.*]]
; CHECK: [[SUCCESS_BB]]:
; CHECK-NOT: dmb
; CHECK: br label %[[END:.*]]
; CHECK: [[NO_STORE_BB]]:
; CHECK: call void @llvm.arm.clrex()
; CHECK: br label %[[FAILURE_BB]]
; CHECK: [[FAILURE_BB]]:
; CHECK-NOT: dmb
; CHECK: br label %[[END]]
; CHECK: [[END]]:
; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
; CHECK: ret i32 [[LOADED]]
%pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new monotonic monotonic
%oldval = extractvalue { i32, i1 } %pair, 0
ret i32 %oldval
}
define i32 @test_cmpxchg_seq_cst_minsize(i32* %addr, i32 %desired, i32 %new) minsize {
; CHECK-LABEL: @test_cmpxchg_seq_cst_minsize
; CHECK: br label %[[START:.*]]
; CHECK: [[START]]:
; CHECK: [[LOADED:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* %addr)
; CHECK: [[SHOULD_STORE:%.*]] = icmp eq i32 [[LOADED]], %desired
; CHECK: br i1 [[SHOULD_STORE]], label %[[FENCED_STORE:.*]], label %[[NO_STORE_BB:.*]]
; CHECK: [[FENCED_STORE]]:
; CHECK: call void @llvm.arm.dmb(i32 10)
; CHECK: br label %[[TRY_STORE:.*]]
; CHECK: [[TRY_STORE]]:
; CHECK: [[STREX:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 %new, i32* %addr)
; CHECK: [[SUCCESS:%.*]] = icmp eq i32 [[STREX]], 0
; CHECK: br i1 [[SUCCESS]], label %[[SUCCESS_BB:.*]], label %[[FAILURE_BB:.*]]
; CHECK: [[SUCCESS_BB]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[END:.*]]
; CHECK: [[NO_STORE_BB]]:
; CHECK: call void @llvm.arm.clrex()
; CHECK: br label %[[FAILURE_BB]]
; CHECK: [[FAILURE_BB]]:
; CHECK: call void @llvm.arm.dmb(i32 11)
; CHECK: br label %[[END]]
; CHECK: [[END]]:
; CHECK: [[SUCCESS:%.*]] = phi i1 [ true, %[[SUCCESS_BB]] ], [ false, %[[FAILURE_BB]] ]
; CHECK: ret i32 [[LOADED]]
%pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
%oldval = extractvalue { i32, i1 } %pair, 0
ret i32 %oldval
}

View File

@ -1,3 +0,0 @@
if not 'ARM' in config.root.targets:
config.unsupported = True

View File

@ -1,257 +0,0 @@
; RUN: opt -S %s -atomic-expand | FileCheck %s
;;; NOTE: this test is actually target-independent -- any target which
;;; doesn't support inline atomics can be used. (E.g. X86 i386 would
;;; work, if LLVM is properly taught about what it's missing vs i586.)
;target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
;target triple = "i386-unknown-unknown"
target datalayout = "e-m:e-p:32:32-i64:64-f128:64-n32-S64"
target triple = "sparc-unknown-unknown"
;; First, check the sized calls. Except for cmpxchg, these are fairly
;; straightforward.
; CHECK-LABEL: @test_load_i16(
; CHECK: %1 = bitcast i16* %arg to i8*
; CHECK: %2 = call i16 @__atomic_load_2(i8* %1, i32 5)
; CHECK: ret i16 %2
define i16 @test_load_i16(i16* %arg) {
%ret = load atomic i16, i16* %arg seq_cst, align 4
ret i16 %ret
}
; CHECK-LABEL: @test_store_i16(
; CHECK: %1 = bitcast i16* %arg to i8*
; CHECK: call void @__atomic_store_2(i8* %1, i16 %val, i32 5)
; CHECK: ret void
define void @test_store_i16(i16* %arg, i16 %val) {
store atomic i16 %val, i16* %arg seq_cst, align 4
ret void
}
; CHECK-LABEL: @test_exchange_i16(
; CHECK: %1 = bitcast i16* %arg to i8*
; CHECK: %2 = call i16 @__atomic_exchange_2(i8* %1, i16 %val, i32 5)
; CHECK: ret i16 %2
define i16 @test_exchange_i16(i16* %arg, i16 %val) {
%ret = atomicrmw xchg i16* %arg, i16 %val seq_cst
ret i16 %ret
}
; CHECK-LABEL: @test_cmpxchg_i16(
; CHECK: %1 = bitcast i16* %arg to i8*
; CHECK: %2 = alloca i16, align 2
; CHECK: %3 = bitcast i16* %2 to i8*
; CHECK: call void @llvm.lifetime.start.p0i8(i64 2, i8* %3)
; CHECK: store i16 %old, i16* %2, align 2
; CHECK: %4 = call zeroext i1 @__atomic_compare_exchange_2(i8* %1, i8* %3, i16 %new, i32 5, i32 0)
; CHECK: %5 = load i16, i16* %2, align 2
; CHECK: call void @llvm.lifetime.end.p0i8(i64 2, i8* %3)
; CHECK: %6 = insertvalue { i16, i1 } undef, i16 %5, 0
; CHECK: %7 = insertvalue { i16, i1 } %6, i1 %4, 1
; CHECK: %ret = extractvalue { i16, i1 } %7, 0
; CHECK: ret i16 %ret
define i16 @test_cmpxchg_i16(i16* %arg, i16 %old, i16 %new) {
%ret_succ = cmpxchg i16* %arg, i16 %old, i16 %new seq_cst monotonic
%ret = extractvalue { i16, i1 } %ret_succ, 0
ret i16 %ret
}
; CHECK-LABEL: @test_add_i16(
; CHECK: %1 = bitcast i16* %arg to i8*
; CHECK: %2 = call i16 @__atomic_fetch_add_2(i8* %1, i16 %val, i32 5)
; CHECK: ret i16 %2
define i16 @test_add_i16(i16* %arg, i16 %val) {
%ret = atomicrmw add i16* %arg, i16 %val seq_cst
ret i16 %ret
}
;; Now, check the output for the unsized libcalls. i128 is used for
;; these tests because the "16" suffixed functions aren't available on
;; 32-bit i386.
; CHECK-LABEL: @test_load_i128(
; CHECK: %1 = bitcast i128* %arg to i8*
; CHECK: %2 = alloca i128, align 8
; CHECK: %3 = bitcast i128* %2 to i8*
; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %3)
; CHECK: call void @__atomic_load(i32 16, i8* %1, i8* %3, i32 5)
; CHECK: %4 = load i128, i128* %2, align 8
; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %3)
; CHECK: ret i128 %4
define i128 @test_load_i128(i128* %arg) {
%ret = load atomic i128, i128* %arg seq_cst, align 16
ret i128 %ret
}
; CHECK-LABEL @test_store_i128(
; CHECK: %1 = bitcast i128* %arg to i8*
; CHECK: %2 = alloca i128, align 8
; CHECK: %3 = bitcast i128* %2 to i8*
; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %3)
; CHECK: store i128 %val, i128* %2, align 8
; CHECK: call void @__atomic_store(i32 16, i8* %1, i8* %3, i32 5)
; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %3)
; CHECK: ret void
define void @test_store_i128(i128* %arg, i128 %val) {
store atomic i128 %val, i128* %arg seq_cst, align 16
ret void
}
; CHECK-LABEL: @test_exchange_i128(
; CHECK: %1 = bitcast i128* %arg to i8*
; CHECK: %2 = alloca i128, align 8
; CHECK: %3 = bitcast i128* %2 to i8*
; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %3)
; CHECK: store i128 %val, i128* %2, align 8
; CHECK: %4 = alloca i128, align 8
; CHECK: %5 = bitcast i128* %4 to i8*
; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %5)
; CHECK: call void @__atomic_exchange(i32 16, i8* %1, i8* %3, i8* %5, i32 5)
; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %3)
; CHECK: %6 = load i128, i128* %4, align 8
; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %5)
; CHECK: ret i128 %6
define i128 @test_exchange_i128(i128* %arg, i128 %val) {
%ret = atomicrmw xchg i128* %arg, i128 %val seq_cst
ret i128 %ret
}
; CHECK-LABEL: @test_cmpxchg_i128(
; CHECK: %1 = bitcast i128* %arg to i8*
; CHECK: %2 = alloca i128, align 8
; CHECK: %3 = bitcast i128* %2 to i8*
; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %3)
; CHECK: store i128 %old, i128* %2, align 8
; CHECK: %4 = alloca i128, align 8
; CHECK: %5 = bitcast i128* %4 to i8*
; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %5)
; CHECK: store i128 %new, i128* %4, align 8
; CHECK: %6 = call zeroext i1 @__atomic_compare_exchange(i32 16, i8* %1, i8* %3, i8* %5, i32 5, i32 0)
; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %5)
; CHECK: %7 = load i128, i128* %2, align 8
; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %3)
; CHECK: %8 = insertvalue { i128, i1 } undef, i128 %7, 0
; CHECK: %9 = insertvalue { i128, i1 } %8, i1 %6, 1
; CHECK: %ret = extractvalue { i128, i1 } %9, 0
; CHECK: ret i128 %ret
define i128 @test_cmpxchg_i128(i128* %arg, i128 %old, i128 %new) {
%ret_succ = cmpxchg i128* %arg, i128 %old, i128 %new seq_cst monotonic
%ret = extractvalue { i128, i1 } %ret_succ, 0
ret i128 %ret
}
; This one is a verbose expansion, as there is no generic
; __atomic_fetch_add function, so it needs to expand to a cmpxchg
; loop, which then itself expands into a libcall.
; CHECK-LABEL: @test_add_i128(
; CHECK: %1 = alloca i128, align 8
; CHECK: %2 = alloca i128, align 8
; CHECK: %3 = load i128, i128* %arg, align 16
; CHECK: br label %atomicrmw.start
; CHECK:atomicrmw.start:
; CHECK: %loaded = phi i128 [ %3, %0 ], [ %newloaded, %atomicrmw.start ]
; CHECK: %new = add i128 %loaded, %val
; CHECK: %4 = bitcast i128* %arg to i8*
; CHECK: %5 = bitcast i128* %1 to i8*
; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %5)
; CHECK: store i128 %loaded, i128* %1, align 8
; CHECK: %6 = bitcast i128* %2 to i8*
; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %6)
; CHECK: store i128 %new, i128* %2, align 8
; CHECK: %7 = call zeroext i1 @__atomic_compare_exchange(i32 16, i8* %4, i8* %5, i8* %6, i32 5, i32 5)
; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %6)
; CHECK: %8 = load i128, i128* %1, align 8
; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %5)
; CHECK: %9 = insertvalue { i128, i1 } undef, i128 %8, 0
; CHECK: %10 = insertvalue { i128, i1 } %9, i1 %7, 1
; CHECK: %success = extractvalue { i128, i1 } %10, 1
; CHECK: %newloaded = extractvalue { i128, i1 } %10, 0
; CHECK: br i1 %success, label %atomicrmw.end, label %atomicrmw.start
; CHECK:atomicrmw.end:
; CHECK: ret i128 %newloaded
define i128 @test_add_i128(i128* %arg, i128 %val) {
%ret = atomicrmw add i128* %arg, i128 %val seq_cst
ret i128 %ret
}
;; Ensure that non-integer types get bitcast correctly on the way in and out of a libcall:
; CHECK-LABEL: @test_load_double(
; CHECK: %1 = bitcast double* %arg to i8*
; CHECK: %2 = call i64 @__atomic_load_8(i8* %1, i32 5)
; CHECK: %3 = bitcast i64 %2 to double
; CHECK: ret double %3
define double @test_load_double(double* %arg, double %val) {
%1 = load atomic double, double* %arg seq_cst, align 16
ret double %1
}
; CHECK-LABEL: @test_store_double(
; CHECK: %1 = bitcast double* %arg to i8*
; CHECK: %2 = bitcast double %val to i64
; CHECK: call void @__atomic_store_8(i8* %1, i64 %2, i32 5)
; CHECK: ret void
define void @test_store_double(double* %arg, double %val) {
store atomic double %val, double* %arg seq_cst, align 16
ret void
}
; CHECK-LABEL: @test_cmpxchg_ptr(
; CHECK: %1 = bitcast i16** %arg to i8*
; CHECK: %2 = alloca i16*, align 4
; CHECK: %3 = bitcast i16** %2 to i8*
; CHECK: call void @llvm.lifetime.start.p0i8(i64 4, i8* %3)
; CHECK: store i16* %old, i16** %2, align 4
; CHECK: %4 = ptrtoint i16* %new to i32
; CHECK: %5 = call zeroext i1 @__atomic_compare_exchange_4(i8* %1, i8* %3, i32 %4, i32 5, i32 2)
; CHECK: %6 = load i16*, i16** %2, align 4
; CHECK: call void @llvm.lifetime.end.p0i8(i64 4, i8* %3)
; CHECK: %7 = insertvalue { i16*, i1 } undef, i16* %6, 0
; CHECK: %8 = insertvalue { i16*, i1 } %7, i1 %5, 1
; CHECK: %ret = extractvalue { i16*, i1 } %8, 0
; CHECK: ret i16* %ret
; CHECK: }
define i16* @test_cmpxchg_ptr(i16** %arg, i16* %old, i16* %new) {
%ret_succ = cmpxchg i16** %arg, i16* %old, i16* %new seq_cst acquire
%ret = extractvalue { i16*, i1 } %ret_succ, 0
ret i16* %ret
}
;; ...and for a non-integer type of large size too.
; CHECK-LABEL: @test_store_fp128
; CHECK: %1 = bitcast fp128* %arg to i8*
; CHECK: %2 = alloca fp128, align 8
; CHECK: %3 = bitcast fp128* %2 to i8*
; CHECK: call void @llvm.lifetime.start.p0i8(i64 16, i8* %3)
; CHECK: store fp128 %val, fp128* %2, align 8
; CHECK: call void @__atomic_store(i32 16, i8* %1, i8* %3, i32 5)
; CHECK: call void @llvm.lifetime.end.p0i8(i64 16, i8* %3)
; CHECK: ret void
define void @test_store_fp128(fp128* %arg, fp128 %val) {
store atomic fp128 %val, fp128* %arg seq_cst, align 16
ret void
}
;; Unaligned loads and stores should be expanded to the generic
;; libcall, just like large loads/stores, and not a specialized one.
;; NOTE: atomicrmw and cmpxchg don't yet support an align attribute;
;; when such support is added, they should also be tested here.
; CHECK-LABEL: @test_unaligned_load_i16(
; CHECK: __atomic_load(
define i16 @test_unaligned_load_i16(i16* %arg) {
%ret = load atomic i16, i16* %arg seq_cst, align 1
ret i16 %ret
}
; CHECK-LABEL: @test_unaligned_store_i16(
; CHECK: __atomic_store(
define void @test_unaligned_store_i16(i16* %arg, i16 %val) {
store atomic i16 %val, i16* %arg seq_cst, align 1
ret void
}

View File

@ -1,2 +0,0 @@
if not 'Sparc' in config.root.targets:
config.unsupported = True

View File

@ -1,166 +0,0 @@
; RUN: opt -S %s -atomic-expand | FileCheck %s
;; Verify the cmpxchg and atomicrmw expansions where sub-word-size
;; instructions are not available.
;;; NOTE: this test is mostly target-independent -- any target which
;;; doesn't support cmpxchg of sub-word sizes would do.
target datalayout = "E-m:e-i64:64-n32:64-S128"
target triple = "sparcv9-unknown-unknown"
; CHECK-LABEL: @test_cmpxchg_i8(
; CHECK: fence seq_cst
; CHECK: %0 = ptrtoint i8* %arg to i64
; CHECK: %1 = and i64 %0, -4
; CHECK: %AlignedAddr = inttoptr i64 %1 to i32*
; CHECK: %PtrLSB = and i64 %0, 3
; CHECK: %2 = xor i64 %PtrLSB, 3
; CHECK: %3 = shl i64 %2, 3
; CHECK: %ShiftAmt = trunc i64 %3 to i32
; CHECK: %Mask = shl i32 255, %ShiftAmt
; CHECK: %Inv_Mask = xor i32 %Mask, -1
; CHECK: %4 = zext i8 %new to i32
; CHECK: %5 = shl i32 %4, %ShiftAmt
; CHECK: %6 = zext i8 %old to i32
; CHECK: %7 = shl i32 %6, %ShiftAmt
; CHECK: %8 = load i32, i32* %AlignedAddr
; CHECK: %9 = and i32 %8, %Inv_Mask
; CHECK: br label %partword.cmpxchg.loop
; CHECK:partword.cmpxchg.loop:
; CHECK: %10 = phi i32 [ %9, %entry ], [ %16, %partword.cmpxchg.failure ]
; CHECK: %11 = or i32 %10, %5
; CHECK: %12 = or i32 %10, %7
; CHECK: %13 = cmpxchg i32* %AlignedAddr, i32 %12, i32 %11 monotonic monotonic
; CHECK: %14 = extractvalue { i32, i1 } %13, 0
; CHECK: %15 = extractvalue { i32, i1 } %13, 1
; CHECK: br i1 %15, label %partword.cmpxchg.end, label %partword.cmpxchg.failure
; CHECK:partword.cmpxchg.failure:
; CHECK: %16 = and i32 %14, %Inv_Mask
; CHECK: %17 = icmp ne i32 %10, %16
; CHECK: br i1 %17, label %partword.cmpxchg.loop, label %partword.cmpxchg.end
; CHECK:partword.cmpxchg.end:
; CHECK: %18 = lshr i32 %14, %ShiftAmt
; CHECK: %19 = trunc i32 %18 to i8
; CHECK: %20 = insertvalue { i8, i1 } undef, i8 %19, 0
; CHECK: %21 = insertvalue { i8, i1 } %20, i1 %15, 1
; CHECK: fence seq_cst
; CHECK: %ret = extractvalue { i8, i1 } %21, 0
; CHECK: ret i8 %ret
define i8 @test_cmpxchg_i8(i8* %arg, i8 %old, i8 %new) {
entry:
%ret_succ = cmpxchg i8* %arg, i8 %old, i8 %new seq_cst monotonic
%ret = extractvalue { i8, i1 } %ret_succ, 0
ret i8 %ret
}
; CHECK-LABEL: @test_cmpxchg_i16(
; CHECK: fence seq_cst
; CHECK: %0 = ptrtoint i16* %arg to i64
; CHECK: %1 = and i64 %0, -4
; CHECK: %AlignedAddr = inttoptr i64 %1 to i32*
; CHECK: %PtrLSB = and i64 %0, 3
; CHECK: %2 = xor i64 %PtrLSB, 2
; CHECK: %3 = shl i64 %2, 3
; CHECK: %ShiftAmt = trunc i64 %3 to i32
; CHECK: %Mask = shl i32 65535, %ShiftAmt
; CHECK: %Inv_Mask = xor i32 %Mask, -1
; CHECK: %4 = zext i16 %new to i32
; CHECK: %5 = shl i32 %4, %ShiftAmt
; CHECK: %6 = zext i16 %old to i32
; CHECK: %7 = shl i32 %6, %ShiftAmt
; CHECK: %8 = load i32, i32* %AlignedAddr
; CHECK: %9 = and i32 %8, %Inv_Mask
; CHECK: br label %partword.cmpxchg.loop
; CHECK:partword.cmpxchg.loop:
; CHECK: %10 = phi i32 [ %9, %entry ], [ %16, %partword.cmpxchg.failure ]
; CHECK: %11 = or i32 %10, %5
; CHECK: %12 = or i32 %10, %7
; CHECK: %13 = cmpxchg i32* %AlignedAddr, i32 %12, i32 %11 monotonic monotonic
; CHECK: %14 = extractvalue { i32, i1 } %13, 0
; CHECK: %15 = extractvalue { i32, i1 } %13, 1
; CHECK: br i1 %15, label %partword.cmpxchg.end, label %partword.cmpxchg.failure
; CHECK:partword.cmpxchg.failure:
; CHECK: %16 = and i32 %14, %Inv_Mask
; CHECK: %17 = icmp ne i32 %10, %16
; CHECK: br i1 %17, label %partword.cmpxchg.loop, label %partword.cmpxchg.end
; CHECK:partword.cmpxchg.end:
; CHECK: %18 = lshr i32 %14, %ShiftAmt
; CHECK: %19 = trunc i32 %18 to i16
; CHECK: %20 = insertvalue { i16, i1 } undef, i16 %19, 0
; CHECK: %21 = insertvalue { i16, i1 } %20, i1 %15, 1
; CHECK: fence seq_cst
; CHECK: %ret = extractvalue { i16, i1 } %21, 0
; CHECK: ret i16 %ret
define i16 @test_cmpxchg_i16(i16* %arg, i16 %old, i16 %new) {
entry:
%ret_succ = cmpxchg i16* %arg, i16 %old, i16 %new seq_cst monotonic
%ret = extractvalue { i16, i1 } %ret_succ, 0
ret i16 %ret
}
; CHECK-LABEL: @test_add_i16(
; CHECK: fence seq_cst
; CHECK: %0 = ptrtoint i16* %arg to i64
; CHECK: %1 = and i64 %0, -4
; CHECK: %AlignedAddr = inttoptr i64 %1 to i32*
; CHECK: %PtrLSB = and i64 %0, 3
; CHECK: %2 = xor i64 %PtrLSB, 2
; CHECK: %3 = shl i64 %2, 3
; CHECK: %ShiftAmt = trunc i64 %3 to i32
; CHECK: %Mask = shl i32 65535, %ShiftAmt
; CHECK: %Inv_Mask = xor i32 %Mask, -1
; CHECK: %4 = zext i16 %val to i32
; CHECK: %ValOperand_Shifted = shl i32 %4, %ShiftAmt
; CHECK: %5 = load i32, i32* %AlignedAddr, align 4
; CHECK: br label %atomicrmw.start
; CHECK:atomicrmw.start:
; CHECK: %loaded = phi i32 [ %5, %entry ], [ %newloaded, %atomicrmw.start ]
; CHECK: %new = add i32 %loaded, %ValOperand_Shifted
; CHECK: %6 = and i32 %new, %Mask
; CHECK: %7 = and i32 %loaded, %Inv_Mask
; CHECK: %8 = or i32 %7, %6
; CHECK: %9 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %8 monotonic monotonic
; CHECK: %success = extractvalue { i32, i1 } %9, 1
; CHECK: %newloaded = extractvalue { i32, i1 } %9, 0
; CHECK: br i1 %success, label %atomicrmw.end, label %atomicrmw.start
; CHECK:atomicrmw.end:
; CHECK: %10 = lshr i32 %newloaded, %ShiftAmt
; CHECK: %11 = trunc i32 %10 to i16
; CHECK: fence seq_cst
; CHECK: ret i16 %11
define i16 @test_add_i16(i16* %arg, i16 %val) {
entry:
%ret = atomicrmw add i16* %arg, i16 %val seq_cst
ret i16 %ret
}
; CHECK-LABEL: @test_xor_i16(
; (I'm going to just assert on the bits that differ from add, above.)
; CHECK:atomicrmw.start:
; CHECK: %new = xor i32 %loaded, %ValOperand_Shifted
; CHECK: %6 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %new monotonic monotonic
; CHECK:atomicrmw.end:
define i16 @test_xor_i16(i16* %arg, i16 %val) {
entry:
%ret = atomicrmw xor i16* %arg, i16 %val seq_cst
ret i16 %ret
}
; CHECK-LABEL: @test_min_i16(
; CHECK:atomicrmw.start:
; CHECK: %6 = lshr i32 %loaded, %ShiftAmt
; CHECK: %7 = trunc i32 %6 to i16
; CHECK: %8 = icmp sle i16 %7, %val
; CHECK: %new = select i1 %8, i16 %7, i16 %val
; CHECK: %9 = zext i16 %new to i32
; CHECK: %10 = shl i32 %9, %ShiftAmt
; CHECK: %11 = and i32 %loaded, %Inv_Mask
; CHECK: %12 = or i32 %11, %10
; CHECK: %13 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %12 monotonic monotonic
; CHECK:atomicrmw.end:
define i16 @test_min_i16(i16* %arg, i16 %val) {
entry:
%ret = atomicrmw min i16* %arg, i16 %val seq_cst
ret i16 %ret
}

View File

@ -1,167 +0,0 @@
; RUN: opt -S %s -atomic-expand -mtriple=x86_64-linux-gnu | FileCheck %s
; This file tests the functions `llvm::convertAtomicLoadToIntegerType` and
; `llvm::convertAtomicStoreToIntegerType`. If X86 stops using this
; functionality, please move this test to a target which still is.
define float @float_load_expand(float* %ptr) {
; CHECK-LABEL: @float_load_expand
; CHECK: %1 = bitcast float* %ptr to i32*
; CHECK: %2 = load atomic i32, i32* %1 unordered, align 4
; CHECK: %3 = bitcast i32 %2 to float
; CHECK: ret float %3
%res = load atomic float, float* %ptr unordered, align 4
ret float %res
}
define float @float_load_expand_seq_cst(float* %ptr) {
; CHECK-LABEL: @float_load_expand_seq_cst
; CHECK: %1 = bitcast float* %ptr to i32*
; CHECK: %2 = load atomic i32, i32* %1 seq_cst, align 4
; CHECK: %3 = bitcast i32 %2 to float
; CHECK: ret float %3
%res = load atomic float, float* %ptr seq_cst, align 4
ret float %res
}
define float @float_load_expand_vol(float* %ptr) {
; CHECK-LABEL: @float_load_expand_vol
; CHECK: %1 = bitcast float* %ptr to i32*
; CHECK: %2 = load atomic volatile i32, i32* %1 unordered, align 4
; CHECK: %3 = bitcast i32 %2 to float
; CHECK: ret float %3
%res = load atomic volatile float, float* %ptr unordered, align 4
ret float %res
}
define float @float_load_expand_addr1(float addrspace(1)* %ptr) {
; CHECK-LABEL: @float_load_expand_addr1
; CHECK: %1 = bitcast float addrspace(1)* %ptr to i32 addrspace(1)*
; CHECK: %2 = load atomic i32, i32 addrspace(1)* %1 unordered, align 4
; CHECK: %3 = bitcast i32 %2 to float
; CHECK: ret float %3
%res = load atomic float, float addrspace(1)* %ptr unordered, align 4
ret float %res
}
define void @float_store_expand(float* %ptr, float %v) {
; CHECK-LABEL: @float_store_expand
; CHECK: %1 = bitcast float %v to i32
; CHECK: %2 = bitcast float* %ptr to i32*
; CHECK: store atomic i32 %1, i32* %2 unordered, align 4
store atomic float %v, float* %ptr unordered, align 4
ret void
}
define void @float_store_expand_seq_cst(float* %ptr, float %v) {
; CHECK-LABEL: @float_store_expand_seq_cst
; CHECK: %1 = bitcast float %v to i32
; CHECK: %2 = bitcast float* %ptr to i32*
; CHECK: store atomic i32 %1, i32* %2 seq_cst, align 4
store atomic float %v, float* %ptr seq_cst, align 4
ret void
}
define void @float_store_expand_vol(float* %ptr, float %v) {
; CHECK-LABEL: @float_store_expand_vol
; CHECK: %1 = bitcast float %v to i32
; CHECK: %2 = bitcast float* %ptr to i32*
; CHECK: store atomic volatile i32 %1, i32* %2 unordered, align 4
store atomic volatile float %v, float* %ptr unordered, align 4
ret void
}
define void @float_store_expand_addr1(float addrspace(1)* %ptr, float %v) {
; CHECK-LABEL: @float_store_expand_addr1
; CHECK: %1 = bitcast float %v to i32
; CHECK: %2 = bitcast float addrspace(1)* %ptr to i32 addrspace(1)*
; CHECK: store atomic i32 %1, i32 addrspace(1)* %2 unordered, align 4
store atomic float %v, float addrspace(1)* %ptr unordered, align 4
ret void
}
define void @pointer_cmpxchg_expand(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst monotonic
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg i8** %ptr, i8* null, i8* %v seq_cst monotonic
ret void
}
define void @pointer_cmpxchg_expand2(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand2
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 release monotonic
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg i8** %ptr, i8* null, i8* %v release monotonic
ret void
}
define void @pointer_cmpxchg_expand3(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand3
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst seq_cst
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg i8** %ptr, i8* null, i8* %v seq_cst seq_cst
ret void
}
define void @pointer_cmpxchg_expand4(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand4
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg weak i64* %1, i64 0, i64 %2 seq_cst seq_cst
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg weak i8** %ptr, i8* null, i8* %v seq_cst seq_cst
ret void
}
define void @pointer_cmpxchg_expand5(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand5
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg volatile i64* %1, i64 0, i64 %2 seq_cst seq_cst
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg volatile i8** %ptr, i8* null, i8* %v seq_cst seq_cst
ret void
}
define void @pointer_cmpxchg_expand6(i8 addrspace(2)* addrspace(1)* %ptr,
i8 addrspace(2)* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand6
; CHECK: %1 = bitcast i8 addrspace(2)* addrspace(1)* %ptr to i64 addrspace(1)*
; CHECK: %2 = ptrtoint i8 addrspace(2)* %v to i64
; CHECK: %3 = cmpxchg i64 addrspace(1)* %1, i64 0, i64 %2 seq_cst seq_cst
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8 addrspace(2)*
; CHECK: %7 = insertvalue { i8 addrspace(2)*, i1 } undef, i8 addrspace(2)* %6, 0
; CHECK: %8 = insertvalue { i8 addrspace(2)*, i1 } %7, i1 %5, 1
cmpxchg i8 addrspace(2)* addrspace(1)* %ptr, i8 addrspace(2)* null, i8 addrspace(2)* %v seq_cst seq_cst
ret void
}

View File

@ -1,11 +0,0 @@
; RUN: opt -S %s -atomic-expand -mtriple=i686-linux-gnu | FileCheck %s
; This file tests the function `llvm::expandAtomicRMWToCmpXchg`.
; It isn't technically target specific, but is exposed through a pass that is.
define i8 @test_initial_load(i8* %ptr, i8 %value) {
%res = atomicrmw nand i8* %ptr, i8 %value seq_cst
ret i8 %res
}
; CHECK-LABEL: @test_initial_load
; CHECK-NEXT: %1 = load i8, i8* %ptr, align 1

View File

@ -1,2 +0,0 @@
if not 'X86' in config.root.targets:
config.unsupported = True