Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@@ -0,0 +1,45 @@
#ifndef COMPILERRT_DD_HEADER
#define COMPILERRT_DD_HEADER
#include "../int_lib.h"
typedef union {
long double ld;
struct {
double hi;
double lo;
}s;
} DD;
typedef union {
double d;
uint64_t x;
} doublebits;
#define LOWORDER(xy,xHi,xLo,yHi,yLo) \
(((((xHi)*(yHi) - (xy)) + (xHi)*(yLo)) + (xLo)*(yHi)) + (xLo)*(yLo))
static __inline ALWAYS_INLINE double local_fabs(double x) {
doublebits result = {.d = x};
result.x &= UINT64_C(0x7fffffffffffffff);
return result.d;
}
static __inline ALWAYS_INLINE double high26bits(double x) {
doublebits result = {.d = x};
result.x &= UINT64_C(0xfffffffff8000000);
return result.d;
}
static __inline ALWAYS_INLINE int different_sign(double x, double y) {
doublebits xsignbit = {.d = x}, ysignbit = {.d = y};
int result = (int)(xsignbit.x >> 63) ^ (int)(ysignbit.x >> 63);
return result;
}
long double __gcc_qadd(long double, long double);
long double __gcc_qsub(long double, long double);
long double __gcc_qmul(long double, long double);
long double __gcc_qdiv(long double, long double);
#endif /* COMPILERRT_DD_HEADER */

View File

@@ -0,0 +1,91 @@
/* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*/
#include "DD.h"
#include "../int_math.h"
#if !defined(CRT_INFINITY) && defined(HUGE_VAL)
#define CRT_INFINITY HUGE_VAL
#endif /* CRT_INFINITY */
#define makeFinite(x) { \
(x).s.hi = crt_copysign(crt_isinf((x).s.hi) ? 1.0 : 0.0, (x).s.hi); \
(x).s.lo = 0.0; \
}
long double _Complex
__divtc3(long double a, long double b, long double c, long double d)
{
DD cDD = { .ld = c };
DD dDD = { .ld = d };
int ilogbw = 0;
const double logbw = crt_logb(crt_fmax(crt_fabs(cDD.s.hi), crt_fabs(dDD.s.hi) ));
if (crt_isfinite(logbw))
{
ilogbw = (int)logbw;
cDD.s.hi = crt_scalbn(cDD.s.hi, -ilogbw);
cDD.s.lo = crt_scalbn(cDD.s.lo, -ilogbw);
dDD.s.hi = crt_scalbn(dDD.s.hi, -ilogbw);
dDD.s.lo = crt_scalbn(dDD.s.lo, -ilogbw);
}
const long double denom = __gcc_qadd(__gcc_qmul(cDD.ld, cDD.ld), __gcc_qmul(dDD.ld, dDD.ld));
const long double realNumerator = __gcc_qadd(__gcc_qmul(a,cDD.ld), __gcc_qmul(b,dDD.ld));
const long double imagNumerator = __gcc_qsub(__gcc_qmul(b,cDD.ld), __gcc_qmul(a,dDD.ld));
DD real = { .ld = __gcc_qdiv(realNumerator, denom) };
DD imag = { .ld = __gcc_qdiv(imagNumerator, denom) };
real.s.hi = crt_scalbn(real.s.hi, -ilogbw);
real.s.lo = crt_scalbn(real.s.lo, -ilogbw);
imag.s.hi = crt_scalbn(imag.s.hi, -ilogbw);
imag.s.lo = crt_scalbn(imag.s.lo, -ilogbw);
if (crt_isnan(real.s.hi) && crt_isnan(imag.s.hi))
{
DD aDD = { .ld = a };
DD bDD = { .ld = b };
DD rDD = { .ld = denom };
if ((rDD.s.hi == 0.0) && (!crt_isnan(aDD.s.hi) ||
!crt_isnan(bDD.s.hi)))
{
real.s.hi = crt_copysign(CRT_INFINITY,cDD.s.hi) * aDD.s.hi;
real.s.lo = 0.0;
imag.s.hi = crt_copysign(CRT_INFINITY,cDD.s.hi) * bDD.s.hi;
imag.s.lo = 0.0;
}
else if ((crt_isinf(aDD.s.hi) || crt_isinf(bDD.s.hi)) &&
crt_isfinite(cDD.s.hi) && crt_isfinite(dDD.s.hi))
{
makeFinite(aDD);
makeFinite(bDD);
real.s.hi = CRT_INFINITY * (aDD.s.hi*cDD.s.hi + bDD.s.hi*dDD.s.hi);
real.s.lo = 0.0;
imag.s.hi = CRT_INFINITY * (bDD.s.hi*cDD.s.hi - aDD.s.hi*dDD.s.hi);
imag.s.lo = 0.0;
}
else if ((crt_isinf(cDD.s.hi) || crt_isinf(dDD.s.hi)) &&
crt_isfinite(aDD.s.hi) && crt_isfinite(bDD.s.hi))
{
makeFinite(cDD);
makeFinite(dDD);
real.s.hi = crt_copysign(0.0,(aDD.s.hi*cDD.s.hi + bDD.s.hi*dDD.s.hi));
real.s.lo = 0.0;
imag.s.hi = crt_copysign(0.0,(bDD.s.hi*cDD.s.hi - aDD.s.hi*dDD.s.hi));
imag.s.lo = 0.0;
}
}
long double _Complex z;
__real__ z = real.ld;
__imag__ z = imag.ld;
return z;
}

View File

@@ -0,0 +1,104 @@
/* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*/
/* int64_t __fixunstfdi(long double x);
* This file implements the PowerPC 128-bit double-double -> int64_t conversion
*/
#include "DD.h"
#include "../int_math.h"
uint64_t __fixtfdi(long double input)
{
const DD x = { .ld = input };
const doublebits hibits = { .d = x.s.hi };
const uint32_t absHighWord = (uint32_t)(hibits.x >> 32) & UINT32_C(0x7fffffff);
const uint32_t absHighWordMinusOne = absHighWord - UINT32_C(0x3ff00000);
/* If (1.0 - tiny) <= input < 0x1.0p63: */
if (UINT32_C(0x03f00000) > absHighWordMinusOne)
{
/* Do an unsigned conversion of the absolute value, then restore the sign. */
const int unbiasedHeadExponent = absHighWordMinusOne >> 20;
int64_t result = hibits.x & INT64_C(0x000fffffffffffff); /* mantissa(hi) */
result |= INT64_C(0x0010000000000000); /* matissa(hi) with implicit bit */
result <<= 10; /* mantissa(hi) with one zero preceding bit. */
const int64_t hiNegationMask = ((int64_t)(hibits.x)) >> 63;
/* If the tail is non-zero, we need to patch in the tail bits. */
if (0.0 != x.s.lo)
{
const doublebits lobits = { .d = x.s.lo };
int64_t tailMantissa = lobits.x & INT64_C(0x000fffffffffffff);
tailMantissa |= INT64_C(0x0010000000000000);
/* At this point we have the mantissa of |tail| */
/* We need to negate it if head and tail have different signs. */
const int64_t loNegationMask = ((int64_t)(lobits.x)) >> 63;
const int64_t negationMask = loNegationMask ^ hiNegationMask;
tailMantissa = (tailMantissa ^ negationMask) - negationMask;
/* Now we have the mantissa of tail as a signed 2s-complement integer */
const int biasedTailExponent = (int)(lobits.x >> 52) & 0x7ff;
/* Shift the tail mantissa into the right position, accounting for the
* bias of 10 that we shifted the head mantissa by.
*/
tailMantissa >>= (unbiasedHeadExponent - (biasedTailExponent - (1023 - 10)));
result += tailMantissa;
}
result >>= (62 - unbiasedHeadExponent);
/* Restore the sign of the result and return */
result = (result ^ hiNegationMask) - hiNegationMask;
return result;
}
/* Edge cases handled here: */
/* |x| < 1, result is zero. */
if (1.0 > crt_fabs(x.s.hi))
return INT64_C(0);
/* x very close to INT64_MIN, care must be taken to see which side we are on. */
if (x.s.hi == -0x1.0p63) {
int64_t result = INT64_MIN;
if (0.0 < x.s.lo)
{
/* If the tail is positive, the correct result is something other than INT64_MIN.
* we'll need to figure out what it is.
*/
const doublebits lobits = { .d = x.s.lo };
int64_t tailMantissa = lobits.x & INT64_C(0x000fffffffffffff);
tailMantissa |= INT64_C(0x0010000000000000);
/* Now we negate the tailMantissa */
tailMantissa = (tailMantissa ^ INT64_C(-1)) + INT64_C(1);
/* And shift it by the appropriate amount */
const int biasedTailExponent = (int)(lobits.x >> 52) & 0x7ff;
tailMantissa >>= 1075 - biasedTailExponent;
result -= tailMantissa;
}
return result;
}
/* Signed overflows, infinities, and NaNs */
if (x.s.hi > 0.0)
return INT64_MAX;
else
return INT64_MIN;
}

View File

@@ -0,0 +1,59 @@
/* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*/
/* uint64_t __fixunstfdi(long double x); */
/* This file implements the PowerPC 128-bit double-double -> uint64_t conversion */
#include "DD.h"
uint64_t __fixunstfdi(long double input)
{
const DD x = { .ld = input };
const doublebits hibits = { .d = x.s.hi };
const uint32_t highWordMinusOne = (uint32_t)(hibits.x >> 32) - UINT32_C(0x3ff00000);
/* If (1.0 - tiny) <= input < 0x1.0p64: */
if (UINT32_C(0x04000000) > highWordMinusOne)
{
const int unbiasedHeadExponent = highWordMinusOne >> 20;
uint64_t result = hibits.x & UINT64_C(0x000fffffffffffff); /* mantissa(hi) */
result |= UINT64_C(0x0010000000000000); /* matissa(hi) with implicit bit */
result <<= 11; /* mantissa(hi) left aligned in the int64 field. */
/* If the tail is non-zero, we need to patch in the tail bits. */
if (0.0 != x.s.lo)
{
const doublebits lobits = { .d = x.s.lo };
int64_t tailMantissa = lobits.x & INT64_C(0x000fffffffffffff);
tailMantissa |= INT64_C(0x0010000000000000);
/* At this point we have the mantissa of |tail| */
const int64_t negationMask = ((int64_t)(lobits.x)) >> 63;
tailMantissa = (tailMantissa ^ negationMask) - negationMask;
/* Now we have the mantissa of tail as a signed 2s-complement integer */
const int biasedTailExponent = (int)(lobits.x >> 52) & 0x7ff;
/* Shift the tail mantissa into the right position, accounting for the
* bias of 11 that we shifted the head mantissa by.
*/
tailMantissa >>= (unbiasedHeadExponent - (biasedTailExponent - (1023 - 11)));
result += tailMantissa;
}
result >>= (63 - unbiasedHeadExponent);
return result;
}
/* Edge cases are handled here, with saturation. */
if (1.0 > x.s.hi)
return UINT64_C(0);
else
return UINT64_MAX;
}

View File

@@ -0,0 +1,36 @@
/* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*/
/* long double __floatditf(long long x); */
/* This file implements the PowerPC long long -> long double conversion */
#include "DD.h"
long double __floatditf(int64_t a) {
static const double twop32 = 0x1.0p32;
static const double twop52 = 0x1.0p52;
doublebits low = { .d = twop52 };
low.x |= a & UINT64_C(0x00000000ffffffff); /* 0x1.0p52 + low 32 bits of a. */
const double high_addend = (double)((int32_t)(a >> 32))*twop32 - twop52;
/* At this point, we have two double precision numbers
* high_addend and low.d, and we wish to return their sum
* as a canonicalized long double:
*/
/* This implementation sets the inexact flag spuriously.
* This could be avoided, but at some substantial cost.
*/
DD result;
result.s.hi = high_addend + low.d;
result.s.lo = (high_addend - result.s.hi) + low.d;
return result.ld;
}

View File

@@ -0,0 +1,41 @@
/* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*/
/* long double __floatunditf(unsigned long long x); */
/* This file implements the PowerPC unsigned long long -> long double conversion */
#include "DD.h"
long double __floatunditf(uint64_t a) {
/* Begins with an exact copy of the code from __floatundidf */
static const double twop52 = 0x1.0p52;
static const double twop84 = 0x1.0p84;
static const double twop84_plus_twop52 = 0x1.00000001p84;
doublebits high = { .d = twop84 };
doublebits low = { .d = twop52 };
high.x |= a >> 32; /* 0x1.0p84 + high 32 bits of a */
low.x |= a & UINT64_C(0x00000000ffffffff); /* 0x1.0p52 + low 32 bits of a */
const double high_addend = high.d - twop84_plus_twop52;
/* At this point, we have two double precision numbers
* high_addend and low.d, and we wish to return their sum
* as a canonicalized long double:
*/
/* This implementation sets the inexact flag spuriously. */
/* This could be avoided, but at some substantial cost. */
DD result;
result.s.hi = high_addend + low.d;
result.s.lo = (high_addend - result.s.hi) + low.d;
return result.ld;
}

View File

@@ -0,0 +1,76 @@
/* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*/
/* long double __gcc_qadd(long double x, long double y);
* This file implements the PowerPC 128-bit double-double add operation.
* This implementation is shamelessly cribbed from Apple's DDRT, circa 1993(!)
*/
#include "DD.h"
long double __gcc_qadd(long double x, long double y)
{
static const uint32_t infinityHi = UINT32_C(0x7ff00000);
DD dst = { .ld = x }, src = { .ld = y };
register double A = dst.s.hi, a = dst.s.lo,
B = src.s.hi, b = src.s.lo;
/* If both operands are zero: */
if ((A == 0.0) && (B == 0.0)) {
dst.s.hi = A + B;
dst.s.lo = 0.0;
return dst.ld;
}
/* If either operand is NaN or infinity: */
const doublebits abits = { .d = A };
const doublebits bbits = { .d = B };
if ((((uint32_t)(abits.x >> 32) & infinityHi) == infinityHi) ||
(((uint32_t)(bbits.x >> 32) & infinityHi) == infinityHi)) {
dst.s.hi = A + B;
dst.s.lo = 0.0;
return dst.ld;
}
/* If the computation overflows: */
/* This may be playing things a little bit fast and loose, but it will do for a start. */
const double testForOverflow = A + (B + (a + b));
const doublebits testbits = { .d = testForOverflow };
if (((uint32_t)(testbits.x >> 32) & infinityHi) == infinityHi) {
dst.s.hi = testForOverflow;
dst.s.lo = 0.0;
return dst.ld;
}
double H, h;
double T, t;
double W, w;
double Y;
H = B + (A - (A + B));
T = b + (a - (a + b));
h = A + (B - (A + B));
t = a + (b - (a + b));
if (local_fabs(A) <= local_fabs(B))
w = (a + b) + h;
else
w = (a + b) + H;
W = (A + B) + w;
Y = (A + B) - W;
Y += w;
if (local_fabs(a) <= local_fabs(b))
w = t + Y;
else
w = T + Y;
dst.s.hi = Y = W + w;
dst.s.lo = (W - Y) + w;
return dst.ld;
}

View File

@@ -0,0 +1,55 @@
/* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*/
/* long double __gcc_qdiv(long double x, long double y);
* This file implements the PowerPC 128-bit double-double division operation.
* This implementation is shamelessly cribbed from Apple's DDRT, circa 1993(!)
*/
#include "DD.h"
long double __gcc_qdiv(long double a, long double b)
{
static const uint32_t infinityHi = UINT32_C(0x7ff00000);
DD dst = { .ld = a }, src = { .ld = b };
register double x = dst.s.hi, x1 = dst.s.lo,
y = src.s.hi, y1 = src.s.lo;
double yHi, yLo, qHi, qLo;
double yq, tmp, q;
q = x / y;
/* Detect special cases */
if (q == 0.0) {
dst.s.hi = q;
dst.s.lo = 0.0;
return dst.ld;
}
const doublebits qBits = { .d = q };
if (((uint32_t)(qBits.x >> 32) & infinityHi) == infinityHi) {
dst.s.hi = q;
dst.s.lo = 0.0;
return dst.ld;
}
yHi = high26bits(y);
qHi = high26bits(q);
yq = y * q;
yLo = y - yHi;
qLo = q - qHi;
tmp = LOWORDER(yq, yHi, yLo, qHi, qLo);
tmp = (x - yq) - tmp;
tmp = ((tmp + x1) - y1 * q) / y;
x = q + tmp;
dst.s.lo = (q - x) + tmp;
dst.s.hi = x;
return dst.ld;
}

View File

@@ -0,0 +1,53 @@
/* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*/
/* long double __gcc_qmul(long double x, long double y);
* This file implements the PowerPC 128-bit double-double multiply operation.
* This implementation is shamelessly cribbed from Apple's DDRT, circa 1993(!)
*/
#include "DD.h"
long double __gcc_qmul(long double x, long double y)
{
static const uint32_t infinityHi = UINT32_C(0x7ff00000);
DD dst = { .ld = x }, src = { .ld = y };
register double A = dst.s.hi, a = dst.s.lo,
B = src.s.hi, b = src.s.lo;
double aHi, aLo, bHi, bLo;
double ab, tmp, tau;
ab = A * B;
/* Detect special cases */
if (ab == 0.0) {
dst.s.hi = ab;
dst.s.lo = 0.0;
return dst.ld;
}
const doublebits abBits = { .d = ab };
if (((uint32_t)(abBits.x >> 32) & infinityHi) == infinityHi) {
dst.s.hi = ab;
dst.s.lo = 0.0;
return dst.ld;
}
/* Generic cases handled here. */
aHi = high26bits(A);
bHi = high26bits(B);
aLo = A - aHi;
bLo = B - bHi;
tmp = LOWORDER(ab, aHi, aLo, bHi, bLo);
tmp += (A * b + a * B);
tau = ab + tmp;
dst.s.lo = (ab - tau) + tmp;
dst.s.hi = tau;
return dst.ld;
}

View File

@@ -0,0 +1,76 @@
/* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*/
/* long double __gcc_qsub(long double x, long double y);
* This file implements the PowerPC 128-bit double-double add operation.
* This implementation is shamelessly cribbed from Apple's DDRT, circa 1993(!)
*/
#include "DD.h"
long double __gcc_qsub(long double x, long double y)
{
static const uint32_t infinityHi = UINT32_C(0x7ff00000);
DD dst = { .ld = x }, src = { .ld = y };
register double A = dst.s.hi, a = dst.s.lo,
B = -src.s.hi, b = -src.s.lo;
/* If both operands are zero: */
if ((A == 0.0) && (B == 0.0)) {
dst.s.hi = A + B;
dst.s.lo = 0.0;
return dst.ld;
}
/* If either operand is NaN or infinity: */
const doublebits abits = { .d = A };
const doublebits bbits = { .d = B };
if ((((uint32_t)(abits.x >> 32) & infinityHi) == infinityHi) ||
(((uint32_t)(bbits.x >> 32) & infinityHi) == infinityHi)) {
dst.s.hi = A + B;
dst.s.lo = 0.0;
return dst.ld;
}
/* If the computation overflows: */
/* This may be playing things a little bit fast and loose, but it will do for a start. */
const double testForOverflow = A + (B + (a + b));
const doublebits testbits = { .d = testForOverflow };
if (((uint32_t)(testbits.x >> 32) & infinityHi) == infinityHi) {
dst.s.hi = testForOverflow;
dst.s.lo = 0.0;
return dst.ld;
}
double H, h;
double T, t;
double W, w;
double Y;
H = B + (A - (A + B));
T = b + (a - (a + b));
h = A + (B - (A + B));
t = a + (b - (a + b));
if (local_fabs(A) <= local_fabs(B))
w = (a + b) + h;
else
w = (a + b) + H;
W = (A + B) + w;
Y = (A + B) - W;
Y += w;
if (local_fabs(a) <= local_fabs(b))
w = t + Y;
else
w = T + Y;
dst.s.hi = Y = W + w;
dst.s.lo = (W - Y) + w;
return dst.ld;
}

View File

@@ -0,0 +1,90 @@
/* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*/
#include "DD.h"
#include "../int_math.h"
#define makeFinite(x) { \
(x).s.hi = crt_copysign(crt_isinf((x).s.hi) ? 1.0 : 0.0, (x).s.hi); \
(x).s.lo = 0.0; \
}
#define zeroNaN(x) { \
if (crt_isnan((x).s.hi)) { \
(x).s.hi = crt_copysign(0.0, (x).s.hi); \
(x).s.lo = 0.0; \
} \
}
long double _Complex
__multc3(long double a, long double b, long double c, long double d)
{
long double ac = __gcc_qmul(a,c);
long double bd = __gcc_qmul(b,d);
long double ad = __gcc_qmul(a,d);
long double bc = __gcc_qmul(b,c);
DD real = { .ld = __gcc_qsub(ac,bd) };
DD imag = { .ld = __gcc_qadd(ad,bc) };
if (crt_isnan(real.s.hi) && crt_isnan(imag.s.hi))
{
int recalc = 0;
DD aDD = { .ld = a };
DD bDD = { .ld = b };
DD cDD = { .ld = c };
DD dDD = { .ld = d };
if (crt_isinf(aDD.s.hi) || crt_isinf(bDD.s.hi))
{
makeFinite(aDD);
makeFinite(bDD);
zeroNaN(cDD);
zeroNaN(dDD);
recalc = 1;
}
if (crt_isinf(cDD.s.hi) || crt_isinf(dDD.s.hi))
{
makeFinite(cDD);
makeFinite(dDD);
zeroNaN(aDD);
zeroNaN(bDD);
recalc = 1;
}
if (!recalc)
{
DD acDD = { .ld = ac };
DD bdDD = { .ld = bd };
DD adDD = { .ld = ad };
DD bcDD = { .ld = bc };
if (crt_isinf(acDD.s.hi) || crt_isinf(bdDD.s.hi) ||
crt_isinf(adDD.s.hi) || crt_isinf(bcDD.s.hi))
{
zeroNaN(aDD);
zeroNaN(bDD);
zeroNaN(cDD);
zeroNaN(dDD);
recalc = 1;
}
}
if (recalc)
{
real.s.hi = CRT_INFINITY * (aDD.s.hi*cDD.s.hi - bDD.s.hi*dDD.s.hi);
real.s.lo = 0.0;
imag.s.hi = CRT_INFINITY * (aDD.s.hi*dDD.s.hi + bDD.s.hi*cDD.s.hi);
imag.s.lo = 0.0;
}
}
long double _Complex z;
__real__ z = real.ld;
__imag__ z = imag.ld;
return z;
}

View File

@@ -0,0 +1,46 @@
//===-- restFP.S - Implement restFP ---------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// Helper function used by compiler to restore ppc floating point registers at
// the end of the function epilog. This function returns to the address
// in the LR slot. So a function epilog must branch (b) not branch and link
// (bl) to this function.
// If the compiler wants to restore f27..f31, it does a "b restFP+52"
//
// This function should never be exported by a shared library. Each linkage
// unit carries its own copy of this function.
//
DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(restFP)
lfd f14,-144(r1)
lfd f15,-136(r1)
lfd f16,-128(r1)
lfd f17,-120(r1)
lfd f18,-112(r1)
lfd f19,-104(r1)
lfd f20,-96(r1)
lfd f21,-88(r1)
lfd f22,-80(r1)
lfd f23,-72(r1)
lfd f24,-64(r1)
lfd f25,-56(r1)
lfd f26,-48(r1)
lfd f27,-40(r1)
lfd f28,-32(r1)
lfd f29,-24(r1)
lfd f30,-16(r1)
lfd f31,-8(r1)
lwz r0,8(r1)
mtlr r0
blr
NO_EXEC_STACK_DIRECTIVE

View File

@@ -0,0 +1,43 @@
//===-- saveFP.S - Implement saveFP ---------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// Helper function used by compiler to save ppc floating point registers in
// function prologs. This routines also saves r0 in the LR slot.
// If the compiler wants to save f27..f31, it does a "bl saveFP+52"
//
// This function should never be exported by a shared library. Each linkage
// unit carries its own copy of this function.
//
DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(saveFP)
stfd f14,-144(r1)
stfd f15,-136(r1)
stfd f16,-128(r1)
stfd f17,-120(r1)
stfd f18,-112(r1)
stfd f19,-104(r1)
stfd f20,-96(r1)
stfd f21,-88(r1)
stfd f22,-80(r1)
stfd f23,-72(r1)
stfd f24,-64(r1)
stfd f25,-56(r1)
stfd f26,-48(r1)
stfd f27,-40(r1)
stfd f28,-32(r1)
stfd f29,-24(r1)
stfd f30,-16(r1)
stfd f31,-8(r1)
stw r0,8(r1)
blr
NO_EXEC_STACK_DIRECTIVE