Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@@ -0,0 +1,33 @@
//===-- adddf3vfp.S - Implement adddf3vfp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// double __adddf3vfp(double a, double b) { return a + b; }
//
// Adds two double precision floating point numbers using the Darwin
// calling convention where double arguments are passsed in GPR pairs
//
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__adddf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vadd.f64 d0, d0, d1
#else
vmov d6, r0, r1 // move first param from r0/r1 pair into d6
vmov d7, r2, r3 // move second param from r2/r3 pair into d7
vadd.f64 d6, d6, d7
vmov r0, r1, d6 // move result back to r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__adddf3vfp)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,277 @@
/*===-- addsf3.S - Adds two single precision floating pointer numbers-----===//
*
* The LLVM Compiler Infrastructure
*
* This file is dual licensed under the MIT and the University of Illinois Open
* Source Licenses. See LICENSE.TXT for details.
*
*===----------------------------------------------------------------------===//
*
* This file implements the __addsf3 (single precision floating pointer number
* addition with the IEEE-754 default rounding (to nearest, ties to even)
* function for the ARM Thumb1 ISA.
*
*===----------------------------------------------------------------------===*/
#include "../assembly.h"
#define significandBits 23
#define typeWidth 32
.syntax unified
.text
.thumb
.p2align 2
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_fadd, __addsf3)
DEFINE_COMPILERRT_THUMB_FUNCTION(__addsf3)
push {r4, r5, r6, r7, lr}
// Get the absolute value of a and b.
lsls r2, r0, #1
lsls r3, r1, #1
lsrs r2, r2, #1 /* aAbs */
beq LOCAL_LABEL(a_zero_nan_inf)
lsrs r3, r3, #1 /* bAbs */
beq LOCAL_LABEL(zero_nan_inf)
// Detect if a or b is infinity or Nan.
lsrs r6, r2, #(significandBits)
lsrs r7, r3, #(significandBits)
cmp r6, #0xFF
beq LOCAL_LABEL(zero_nan_inf)
cmp r7, #0xFF
beq LOCAL_LABEL(zero_nan_inf)
// Swap Rep and Abs so that a and aAbs has the larger absolute value.
cmp r2, r3
bhs LOCAL_LABEL(no_swap)
movs r4, r0
movs r5, r2
movs r0, r1
movs r2, r3
movs r1, r4
movs r3, r5
LOCAL_LABEL(no_swap):
// Get the significands and shift them to give us round, guard and sticky.
lsls r4, r0, #(typeWidth - significandBits)
lsrs r4, r4, #(typeWidth - significandBits - 3) /* aSignificand << 3 */
lsls r5, r1, #(typeWidth - significandBits)
lsrs r5, r5, #(typeWidth - significandBits - 3) /* bSignificand << 3 */
// Get the implicitBit.
movs r6, #1
lsls r6, r6, #(significandBits + 3)
// Get aExponent and set implicit bit if necessary.
lsrs r2, r2, #(significandBits)
beq LOCAL_LABEL(a_done_implicit_bit)
orrs r4, r6
LOCAL_LABEL(a_done_implicit_bit):
// Get bExponent and set implicit bit if necessary.
lsrs r3, r3, #(significandBits)
beq LOCAL_LABEL(b_done_implicit_bit)
orrs r5, r6
LOCAL_LABEL(b_done_implicit_bit):
// Get the difference in exponents.
subs r6, r2, r3
beq LOCAL_LABEL(done_align)
// If b is denormal, then a must be normal as align > 0, and we only need to
// right shift bSignificand by (align - 1) bits.
cmp r3, #0
bne 1f
subs r6, r6, #1
1:
// No longer needs bExponent. r3 is dead here.
// Set sticky bits of b: sticky = bSignificand << (typeWidth - align).
movs r3, #(typeWidth)
subs r3, r3, r6
movs r7, r5
lsls r7, r3
beq 1f
movs r7, #1
1:
// bSignificand = bSignificand >> align | sticky;
lsrs r5, r6
orrs r5, r7
bne LOCAL_LABEL(done_align)
movs r5, #1 // sticky; b is known to be non-zero.
LOCAL_LABEL(done_align):
// isSubtraction = (aRep ^ bRep) >> 31;
movs r7, r0
eors r7, r1
lsrs r7, #31
bne LOCAL_LABEL(do_substraction)
// Same sign, do Addition.
// aSignificand += bSignificand;
adds r4, r4, r5
// Check carry bit.
movs r6, #1
lsls r6, r6, #(significandBits + 3 + 1)
movs r7, r4
ands r7, r6
beq LOCAL_LABEL(form_result)
// If the addition carried up, we need to right-shift the result and
// adjust the exponent.
movs r7, r4
movs r6, #1
ands r7, r6 // sticky = aSignificand & 1;
lsrs r4, #1
orrs r4, r7 // result Significand
adds r2, #1 // result Exponent
// If we have overflowed the type, return +/- infinity.
cmp r2, 0xFF
beq LOCAL_LABEL(ret_inf)
LOCAL_LABEL(form_result):
// Shift the sign, exponent and significand into place.
lsrs r0, #(typeWidth - 1)
lsls r0, #(typeWidth - 1) // Get Sign.
lsls r2, #(significandBits)
orrs r0, r2
movs r1, r4
lsls r4, #(typeWidth - significandBits - 3)
lsrs r4, #(typeWidth - significandBits)
orrs r0, r4
// Final rounding. The result may overflow to infinity, but that is the
// correct result in that case.
// roundGuardSticky = aSignificand & 0x7;
movs r2, #0x7
ands r1, r2
// if (roundGuardSticky > 0x4) result++;
cmp r1, #0x4
blt LOCAL_LABEL(done_round)
beq 1f
adds r0, #1
pop {r4, r5, r6, r7, pc}
1:
// if (roundGuardSticky == 0x4) result += result & 1;
movs r1, r0
lsrs r1, #1
bcc LOCAL_LABEL(done_round)
adds r0, r0, #1
LOCAL_LABEL(done_round):
pop {r4, r5, r6, r7, pc}
LOCAL_LABEL(do_substraction):
subs r4, r4, r5 // aSignificand -= bSignificand;
beq LOCAL_LABEL(ret_zero)
movs r6, r4
cmp r2, 0
beq LOCAL_LABEL(form_result) // if a's exp is 0, no need to normalize.
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
lsrs r6, r6, #(significandBits + 3)
bne LOCAL_LABEL(form_result)
push {r0, r1, r2, r3}
movs r0, r4
bl __clzsi2
movs r5, r0
pop {r0, r1, r2, r3}
// shift = rep_clz(aSignificand) - rep_clz(implicitBit << 3);
subs r5, r5, #(typeWidth - significandBits - 3 - 1)
// aSignificand <<= shift; aExponent -= shift;
lsls r4, r5
subs r2, r2, r5
bgt LOCAL_LABEL(form_result)
// Do normalization if aExponent <= 0.
movs r6, #1
subs r6, r6, r2 // 1 - aExponent;
movs r2, #0 // aExponent = 0;
movs r3, #(typeWidth) // bExponent is dead.
subs r3, r3, r6
movs r7, r4
lsls r7, r3 // stickyBit = (bool)(aSignificant << (typeWidth - align))
beq 1f
movs r7, #1
1:
lsrs r4, r6 /* aSignificand >> shift */
orrs r4, r7
b LOCAL_LABEL(form_result)
LOCAL_LABEL(ret_zero):
movs r0, #0
pop {r4, r5, r6, r7, pc}
LOCAL_LABEL(a_zero_nan_inf):
lsrs r3, r3, #1
LOCAL_LABEL(zero_nan_inf):
// Here r2 has aAbs, r3 has bAbs
movs r4, #0xFF
lsls r4, r4, #(significandBits) // Make +inf.
cmp r2, r4
bhi LOCAL_LABEL(a_is_nan)
cmp r3, r4
bhi LOCAL_LABEL(b_is_nan)
cmp r2, r4
bne LOCAL_LABEL(a_is_rational)
// aAbs is INF.
eors r1, r0 // aRep ^ bRep.
movs r6, #1
lsls r6, r6, #(typeWidth - 1) // get sign mask.
cmp r1, r6 // if they only differ on sign bit, it's -INF + INF
beq LOCAL_LABEL(a_is_nan)
pop {r4, r5, r6, r7, pc}
LOCAL_LABEL(a_is_rational):
cmp r3, r4
bne LOCAL_LABEL(b_is_rational)
movs r0, r1
pop {r4, r5, r6, r7, pc}
LOCAL_LABEL(b_is_rational):
// either a or b or both are zero.
adds r4, r2, r3
beq LOCAL_LABEL(both_zero)
cmp r2, #0 // is absA 0 ?
beq LOCAL_LABEL(ret_b)
pop {r4, r5, r6, r7, pc}
LOCAL_LABEL(both_zero):
ands r0, r1 // +0 + -0 = +0
pop {r4, r5, r6, r7, pc}
LOCAL_LABEL(ret_b):
movs r0, r1
LOCAL_LABEL(ret):
pop {r4, r5, r6, r7, pc}
LOCAL_LABEL(b_is_nan):
movs r0, r1
LOCAL_LABEL(a_is_nan):
movs r1, #1
lsls r1, r1, #(significandBits -1) // r1 is quiet bit.
orrs r0, r1
pop {r4, r5, r6, r7, pc}
LOCAL_LABEL(ret_inf):
movs r4, #0xFF
lsls r4, r4, #(significandBits)
orrs r0, r4
lsrs r0, r0, #(significandBits)
lsls r0, r0, #(significandBits)
pop {r4, r5, r6, r7, pc}
END_COMPILERRT_FUNCTION(__addsf3)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,33 @@
//===-- addsf3vfp.S - Implement addsf3vfp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern float __addsf3vfp(float a, float b);
//
// Adds two single precision floating point numbers using the Darwin
// calling convention where single arguments are passsed in GPRs
//
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__addsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vadd.f32 s0, s0, s1
#else
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vadd.f32 s14, s14, s15
vmov r0, s14 // move result back to r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__addsf3vfp)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,149 @@
//===-- aeabi_cdcmp.S - EABI cdcmp* implementation ------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
#error big endian support not implemented
#endif
#define APSR_Z (1 << 30)
#define APSR_C (1 << 29)
// void __aeabi_cdcmpeq(double a, double b) {
// if (isnan(a) || isnan(b)) {
// Z = 0; C = 1;
// } else {
// __aeabi_cdcmple(a, b);
// }
// }
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
push {r0-r3, lr}
bl __aeabi_cdcmpeq_check_nan
cmp r0, #1
#if defined(USE_THUMB_1)
beq 1f
// NaN has been ruled out, so __aeabi_cdcmple can't trap
mov r0, sp
ldm r0, {r0-r3}
bl __aeabi_cdcmple
pop {r0-r3, pc}
1:
// Z = 0, C = 1
movs r0, #0xF
lsls r0, r0, #31
pop {r0-r3, pc}
#else
pop {r0-r3, lr}
// NaN has been ruled out, so __aeabi_cdcmple can't trap
// Use "it ne" + unconditional branch to guarantee a supported relocation if
// __aeabi_cdcmple is in a different section for some builds.
IT(ne)
bne __aeabi_cdcmple
#if defined(USE_THUMB_2)
mov ip, #APSR_C
msr APSR_nzcvq, ip
#else
msr CPSR_f, #APSR_C
#endif
JMP(lr)
#endif
END_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
// void __aeabi_cdcmple(double a, double b) {
// if (__aeabi_dcmplt(a, b)) {
// Z = 0; C = 0;
// } else if (__aeabi_dcmpeq(a, b)) {
// Z = 1; C = 1;
// } else {
// Z = 0; C = 1;
// }
// }
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmple)
// Per the RTABI, this function must preserve r0-r11.
// Save lr in the same instruction for compactness
push {r0-r3, lr}
bl __aeabi_dcmplt
cmp r0, #1
#if defined(USE_THUMB_1)
bne 1f
// Z = 0, C = 0
movs r0, #1
lsls r0, r0, #1
pop {r0-r3, pc}
1:
mov r0, sp
ldm r0, {r0-r3}
bl __aeabi_dcmpeq
cmp r0, #1
bne 2f
// Z = 1, C = 1
movs r0, #2
lsls r0, r0, #31
pop {r0-r3, pc}
2:
// Z = 0, C = 1
movs r0, #0xF
lsls r0, r0, #31
pop {r0-r3, pc}
#else
ITT(eq)
moveq ip, #0
beq 1f
ldm sp, {r0-r3}
bl __aeabi_dcmpeq
cmp r0, #1
ITE(eq)
moveq ip, #(APSR_C | APSR_Z)
movne ip, #(APSR_C)
1:
#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
msr APSR_nzcvq, ip
#else
msr CPSR_f, ip
#endif
pop {r0-r3}
POP_PC()
#endif
END_COMPILERRT_FUNCTION(__aeabi_cdcmple)
// int __aeabi_cdrcmple(double a, double b) {
// return __aeabi_cdcmple(b, a);
// }
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_cdrcmple)
// Swap r0 and r2
mov ip, r0
mov r0, r2
mov r2, ip
// Swap r1 and r3
mov ip, r1
mov r1, r3
mov r3, ip
b __aeabi_cdcmple
END_COMPILERRT_FUNCTION(__aeabi_cdrcmple)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,16 @@
//===-- lib/arm/aeabi_cdcmpeq_helper.c - Helper for cdcmpeq ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include <stdint.h>
#include "../int_lib.h"
AEABI_RTABI __attribute__((visibility("hidden")))
int __aeabi_cdcmpeq_check_nan(double a, double b) {
return __builtin_isnan(a) || __builtin_isnan(b);
}

View File

@@ -0,0 +1,144 @@
//===-- aeabi_cfcmp.S - EABI cfcmp* implementation ------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
#error big endian support not implemented
#endif
#define APSR_Z (1 << 30)
#define APSR_C (1 << 29)
// void __aeabi_cfcmpeq(float a, float b) {
// if (isnan(a) || isnan(b)) {
// Z = 0; C = 1;
// } else {
// __aeabi_cfcmple(a, b);
// }
// }
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
push {r0-r3, lr}
bl __aeabi_cfcmpeq_check_nan
cmp r0, #1
#if defined(USE_THUMB_1)
beq 1f
// NaN has been ruled out, so __aeabi_cfcmple can't trap
mov r0, sp
ldm r0, {r0-r3}
bl __aeabi_cfcmple
pop {r0-r3, pc}
1:
// Z = 0, C = 1
movs r0, #0xF
lsls r0, r0, #31
pop {r0-r3, pc}
#else
pop {r0-r3, lr}
// NaN has been ruled out, so __aeabi_cfcmple can't trap
// Use "it ne" + unconditional branch to guarantee a supported relocation if
// __aeabi_cfcmple is in a different section for some builds.
IT(ne)
bne __aeabi_cfcmple
#if defined(USE_THUMB_2)
mov ip, #APSR_C
msr APSR_nzcvq, ip
#else
msr CPSR_f, #APSR_C
#endif
JMP(lr)
#endif
END_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
// void __aeabi_cfcmple(float a, float b) {
// if (__aeabi_fcmplt(a, b)) {
// Z = 0; C = 0;
// } else if (__aeabi_fcmpeq(a, b)) {
// Z = 1; C = 1;
// } else {
// Z = 0; C = 1;
// }
// }
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmple)
// Per the RTABI, this function must preserve r0-r11.
// Save lr in the same instruction for compactness
push {r0-r3, lr}
bl __aeabi_fcmplt
cmp r0, #1
#if defined(USE_THUMB_1)
bne 1f
// Z = 0, C = 0
movs r0, #1
lsls r0, r0, #1
pop {r0-r3, pc}
1:
mov r0, sp
ldm r0, {r0-r3}
bl __aeabi_fcmpeq
cmp r0, #1
bne 2f
// Z = 1, C = 1
movs r0, #2
lsls r0, r0, #31
pop {r0-r3, pc}
2:
// Z = 0, C = 1
movs r0, #0xF
lsls r0, r0, #31
pop {r0-r3, pc}
#else
ITT(eq)
moveq ip, #0
beq 1f
ldm sp, {r0-r3}
bl __aeabi_fcmpeq
cmp r0, #1
ITE(eq)
moveq ip, #(APSR_C | APSR_Z)
movne ip, #(APSR_C)
1:
#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
msr APSR_nzcvq, ip
#else
msr CPSR_f, ip
#endif
pop {r0-r3}
POP_PC()
#endif
END_COMPILERRT_FUNCTION(__aeabi_cfcmple)
// int __aeabi_cfrcmple(float a, float b) {
// return __aeabi_cfcmple(b, a);
// }
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_cfrcmple)
// Swap r0 and r1
mov ip, r0
mov r0, r1
mov r1, ip
b __aeabi_cfcmple
END_COMPILERRT_FUNCTION(__aeabi_cfrcmple)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,16 @@
//===-- lib/arm/aeabi_cfcmpeq_helper.c - Helper for cdcmpeq ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include <stdint.h>
#include "../int_lib.h"
AEABI_RTABI __attribute__((visibility("hidden")))
int __aeabi_cfcmpeq_check_nan(float a, float b) {
return __builtin_isnan(a) || __builtin_isnan(b);
}

View File

@@ -0,0 +1,52 @@
//===-- aeabi_dcmp.S - EABI dcmp* implementation ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
// int __aeabi_dcmp{eq,lt,le,ge,gt}(double a, double b) {
// int result = __{eq,lt,le,ge,gt}df2(a, b);
// if (result {==,<,<=,>=,>} 0) {
// return 1;
// } else {
// return 0;
// }
// }
#if defined(COMPILER_RT_ARMHF_TARGET)
# define CONVERT_DCMP_ARGS_TO_DF2_ARGS \
vmov d0, r0, r1 SEPARATOR \
vmov d1, r2, r3
#else
# define CONVERT_DCMP_ARGS_TO_DF2_ARGS
#endif
#define DEFINE_AEABI_DCMP(cond) \
.syntax unified SEPARATOR \
.p2align 2 SEPARATOR \
DEFINE_COMPILERRT_FUNCTION(__aeabi_dcmp ## cond) \
push { r4, lr } SEPARATOR \
CONVERT_DCMP_ARGS_TO_DF2_ARGS SEPARATOR \
bl SYMBOL_NAME(__ ## cond ## df2) SEPARATOR \
cmp r0, #0 SEPARATOR \
b ## cond 1f SEPARATOR \
movs r0, #0 SEPARATOR \
pop { r4, pc } SEPARATOR \
1: SEPARATOR \
movs r0, #1 SEPARATOR \
pop { r4, pc } SEPARATOR \
END_COMPILERRT_FUNCTION(__aeabi_dcmp ## cond)
DEFINE_AEABI_DCMP(eq)
DEFINE_AEABI_DCMP(lt)
DEFINE_AEABI_DCMP(le)
DEFINE_AEABI_DCMP(ge)
DEFINE_AEABI_DCMP(gt)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,45 @@
/* ===-- aeabi_div0.c - ARM Runtime ABI support routines for compiler-rt ---===
*
* The LLVM Compiler Infrastructure
*
* This file is dual licensed under the MIT and the University of Illinois Open
* Source Licenses. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements the division by zero helper routines as specified by the
* Run-time ABI for the ARM Architecture.
*
* ===----------------------------------------------------------------------===
*/
/*
* RTABI 4.3.2 - Division by zero
*
* The *div0 functions:
* - Return the value passed to them as a parameter
* - Or, return a fixed value defined by the execution environment (such as 0)
* - Or, raise a signal (often SIGFPE) or throw an exception, and do not return
*
* An application may provide its own implementations of the *div0 functions to
* for a particular behaviour from the *div and *divmod functions called out of
* line.
*/
#include "../int_lib.h"
/* provide an unused declaration to pacify pendantic compilation */
extern unsigned char declaration;
#if defined(__ARM_EABI__)
AEABI_RTABI int __attribute__((weak)) __attribute__((visibility("hidden")))
__aeabi_idiv0(int return_value) {
return return_value;
}
AEABI_RTABI long long __attribute__((weak)) __attribute__((visibility("hidden")))
__aeabi_ldiv0(long long return_value) {
return return_value;
}
#endif

View File

@@ -0,0 +1,19 @@
//===-- lib/arm/aeabi_drsub.c - Double-precision subtraction --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#define DOUBLE_PRECISION
#include "../fp_lib.h"
AEABI_RTABI fp_t
__aeabi_dsub(fp_t, fp_t);
AEABI_RTABI fp_t
__aeabi_drsub(fp_t a, fp_t b) {
return __aeabi_dsub(b, a);
}

View File

@@ -0,0 +1,52 @@
//===-- aeabi_fcmp.S - EABI fcmp* implementation ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
// int __aeabi_fcmp{eq,lt,le,ge,gt}(float a, float b) {
// int result = __{eq,lt,le,ge,gt}sf2(a, b);
// if (result {==,<,<=,>=,>} 0) {
// return 1;
// } else {
// return 0;
// }
// }
#if defined(COMPILER_RT_ARMHF_TARGET)
# define CONVERT_FCMP_ARGS_TO_SF2_ARGS \
vmov s0, r0 SEPARATOR \
vmov s1, r1
#else
# define CONVERT_FCMP_ARGS_TO_SF2_ARGS
#endif
#define DEFINE_AEABI_FCMP(cond) \
.syntax unified SEPARATOR \
.p2align 2 SEPARATOR \
DEFINE_COMPILERRT_FUNCTION(__aeabi_fcmp ## cond) \
push { r4, lr } SEPARATOR \
CONVERT_FCMP_ARGS_TO_SF2_ARGS SEPARATOR \
bl SYMBOL_NAME(__ ## cond ## sf2) SEPARATOR \
cmp r0, #0 SEPARATOR \
b ## cond 1f SEPARATOR \
movs r0, #0 SEPARATOR \
pop { r4, pc } SEPARATOR \
1: SEPARATOR \
movs r0, #1 SEPARATOR \
pop { r4, pc } SEPARATOR \
END_COMPILERRT_FUNCTION(__aeabi_fcmp ## cond)
DEFINE_AEABI_FCMP(eq)
DEFINE_AEABI_FCMP(lt)
DEFINE_AEABI_FCMP(le)
DEFINE_AEABI_FCMP(ge)
DEFINE_AEABI_FCMP(gt)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,19 @@
//===-- lib/arm/aeabi_frsub.c - Single-precision subtraction --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#define SINGLE_PRECISION
#include "../fp_lib.h"
AEABI_RTABI fp_t
__aeabi_fsub(fp_t, fp_t);
AEABI_RTABI fp_t
__aeabi_frsub(fp_t a, fp_t b) {
return __aeabi_fsub(b, a);
}

View File

@@ -0,0 +1,51 @@
//===-- aeabi_idivmod.S - EABI idivmod implementation ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
// struct { int quot, int rem} __aeabi_idivmod(int numerator, int denominator) {
// int rem, quot;
// quot = __divmodsi4(numerator, denominator, &rem);
// return {quot, rem};
// }
#if defined(__MINGW32__)
#define __aeabi_idivmod __rt_sdiv
#endif
.syntax unified
.text
DEFINE_CODE_STATE
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
#if defined(USE_THUMB_1)
push {r0, r1, lr}
bl SYMBOL_NAME(__divsi3)
pop {r1, r2, r3} // now r0 = quot, r1 = num, r2 = denom
muls r2, r0, r2 // r2 = quot * denom
subs r1, r1, r2
JMP (r3)
#else // defined(USE_THUMB_1)
push { lr }
sub sp, sp, #4
mov r2, sp
#if defined(__MINGW32__)
mov r3, r0
mov r0, r1
mov r1, r3
#endif
bl SYMBOL_NAME(__divmodsi4)
ldr r1, [sp]
add sp, sp, #4
pop { pc }
#endif // defined(USE_THUMB_1)
END_COMPILERRT_FUNCTION(__aeabi_idivmod)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,46 @@
//===-- aeabi_ldivmod.S - EABI ldivmod implementation ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
// struct { int64_t quot, int64_t rem}
// __aeabi_ldivmod(int64_t numerator, int64_t denominator) {
// int64_t rem, quot;
// quot = __divmoddi4(numerator, denominator, &rem);
// return {quot, rem};
// }
#if defined(__MINGW32__)
#define __aeabi_ldivmod __rt_sdiv64
#endif
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
push {r6, lr}
sub sp, sp, #16
add r6, sp, #8
str r6, [sp]
#if defined(__MINGW32__)
movs r6, r0
movs r0, r2
movs r2, r6
movs r6, r1
movs r1, r3
movs r3, r6
#endif
bl SYMBOL_NAME(__divmoddi4)
ldr r2, [sp, #8]
ldr r3, [sp, #12]
add sp, sp, #16
pop {r6, pc}
END_COMPILERRT_FUNCTION(__aeabi_ldivmod)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,30 @@
//===-- aeabi_memcmp.S - EABI memcmp implementation -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
// void __aeabi_memcmp(void *dest, void *src, size_t n) { memcmp(dest, src, n); }
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_memcmp)
#ifdef USE_THUMB_1
push {r7, lr}
bl memcmp
pop {r7, pc}
#else
b memcmp
#endif
END_COMPILERRT_FUNCTION(__aeabi_memcmp)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcmp4, __aeabi_memcmp)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcmp8, __aeabi_memcmp)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,30 @@
//===-- aeabi_memcpy.S - EABI memcpy implementation -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
// void __aeabi_memcpy(void *dest, void *src, size_t n) { memcpy(dest, src, n); }
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_memcpy)
#ifdef USE_THUMB_1
push {r7, lr}
bl memcpy
pop {r7, pc}
#else
b memcpy
#endif
END_COMPILERRT_FUNCTION(__aeabi_memcpy)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcpy4, __aeabi_memcpy)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcpy8, __aeabi_memcpy)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,29 @@
//===-- aeabi_memmove.S - EABI memmove implementation --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===---------------------------------------------------------------------===//
#include "../assembly.h"
// void __aeabi_memmove(void *dest, void *src, size_t n) { memmove(dest, src, n); }
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_memmove)
#ifdef USE_THUMB_1
push {r7, lr}
bl memmove
pop {r7, pc}
#else
b memmove
#endif
END_COMPILERRT_FUNCTION(__aeabi_memmove)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memmove4, __aeabi_memmove)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memmove8, __aeabi_memmove)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,50 @@
//===-- aeabi_memset.S - EABI memset implementation -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
// void __aeabi_memset(void *dest, size_t n, int c) { memset(dest, c, n); }
// void __aeabi_memclr(void *dest, size_t n) { __aeabi_memset(dest, n, 0); }
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_memset)
mov r3, r1
mov r1, r2
mov r2, r3
#ifdef USE_THUMB_1
push {r7, lr}
bl memset
pop {r7, pc}
#else
b memset
#endif
END_COMPILERRT_FUNCTION(__aeabi_memset)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memset4, __aeabi_memset)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memset8, __aeabi_memset)
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_memclr)
mov r2, r1
movs r1, #0
#ifdef USE_THUMB_1
push {r7, lr}
bl memset
pop {r7, pc}
#else
b memset
#endif
END_COMPILERRT_FUNCTION(__aeabi_memclr)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memclr4, __aeabi_memclr)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memclr8, __aeabi_memclr)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,58 @@
//===-- aeabi_uidivmod.S - EABI uidivmod implementation -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
// struct { unsigned quot, unsigned rem}
// __aeabi_uidivmod(unsigned numerator, unsigned denominator) {
// unsigned rem, quot;
// quot = __udivmodsi4(numerator, denominator, &rem);
// return {quot, rem};
// }
#if defined(__MINGW32__)
#define __aeabi_uidivmod __rt_udiv
#endif
.syntax unified
.text
DEFINE_CODE_STATE
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
#if defined(USE_THUMB_1)
cmp r0, r1
bcc LOCAL_LABEL(case_denom_larger)
push {r0, r1, lr}
bl SYMBOL_NAME(__aeabi_uidiv)
pop {r1, r2, r3}
muls r2, r0, r2 // r2 = quot * denom
subs r1, r1, r2
JMP (r3)
LOCAL_LABEL(case_denom_larger):
movs r1, r0
movs r0, #0
JMP (lr)
#else // defined(USE_THUMB_1)
push { lr }
sub sp, sp, #4
mov r2, sp
#if defined(__MINGW32__)
mov r3, r0
mov r0, r1
mov r1, r3
#endif
bl SYMBOL_NAME(__udivmodsi4)
ldr r1, [sp]
add sp, sp, #4
pop { pc }
#endif
END_COMPILERRT_FUNCTION(__aeabi_uidivmod)
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,46 @@
//===-- aeabi_uldivmod.S - EABI uldivmod implementation -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
// struct { uint64_t quot, uint64_t rem}
// __aeabi_uldivmod(uint64_t numerator, uint64_t denominator) {
// uint64_t rem, quot;
// quot = __udivmoddi4(numerator, denominator, &rem);
// return {quot, rem};
// }
#if defined(__MINGW32__)
#define __aeabi_uldivmod __rt_udiv64
#endif
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
push {r6, lr}
sub sp, sp, #16
add r6, sp, #8
str r6, [sp]
#if defined(__MINGW32__)
movs r6, r0
movs r0, r2
movs r2, r6
movs r6, r1
movs r1, r3
movs r3, r6
#endif
bl SYMBOL_NAME(__udivmoddi4)
ldr r2, [sp, #8]
ldr r3, [sp, #12]
add sp, sp, #16
pop {r6, pc}
END_COMPILERRT_FUNCTION(__aeabi_uldivmod)
NO_EXEC_STACK_DIRECTIVE

Some files were not shown because too many files have changed in this diff Show More