2 /*============================================================================
4 This C source file is an extension to the SoftFloat IEC/IEEE Floating-point
5 Arithmetic Package, Release 2a.
7 Written by Andreas Grabher for Previous, NeXT Computer Emulator.
9 =============================================================================*/
14 #include "softfloat.h"
15 #include "softfloat-specialize.h"
16 #include "softfloat_fpsp_tables.h"
19 /*----------------------------------------------------------------------------
20 | Algorithms for transcendental functions supported by MC68881 and MC68882
21 | mathematical coprocessors. The functions are derived from FPSP library.
22 *----------------------------------------------------------------------------*/
24 #define pi_sig LIT64(0xc90fdaa22168c235)
25 #define pi_sig0 LIT64(0xc90fdaa22168c234)
26 #define pi_sig1 LIT64(0xc4c6628b80dc1cd1)
29 #define piby2_exp 0x3FFF
30 #define piby4_exp 0x3FFE
32 #define one_exp 0x3FFF
33 #define one_sig LIT64(0x8000000000000000)
36 int8_t user_rnd_mode, user_rnd_prec; \
37 user_rnd_mode = status->float_rounding_mode; \
38 user_rnd_prec = status->floatx80_rounding_precision; \
39 status->float_rounding_mode = float_round_nearest_even; \
40 status->floatx80_rounding_precision = 80
43 status->float_rounding_mode = user_rnd_mode; \
44 status->floatx80_rounding_precision = user_rnd_prec
46 /*----------------------------------------------------------------------------
47 | Function for compactifying extended double-precision floating point values.
48 *----------------------------------------------------------------------------*/
50 static int32_t floatx80_make_compact(int32_t aExp, uint64_t aSig)
52 return (aExp<<16)|(aSig>>48);
56 /*----------------------------------------------------------------------------
58 *----------------------------------------------------------------------------*/
60 floatx80 floatx80_acos(floatx80 a, float_status *status)
68 floatx80 fp0, fp1, one;
70 aSig = extractFloatx80Frac(a);
71 aExp = extractFloatx80Exp(a);
72 aSign = extractFloatx80Sign(a);
74 if (aExp == 0x7FFF && (uint64_t) (aSig<<1)) {
75 return propagateFloatx80NaNOneArg(a, status);
77 if (aExp == 0 && aSig == 0) {
78 float_raise(float_flag_inexact, status);
79 return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, piby2_exp, pi_sig, 0, status);
82 compact = floatx80_make_compact(aExp, aSig);
84 if (compact >= 0x3FFF8000) { // |X| >= 1
85 if (aExp == one_exp && aSig == one_sig) { // |X| == 1
86 if (aSign) { // X == -1
87 a = packFloatx80(0, pi_exp, pi_sig);
88 float_raise(float_flag_inexact, status);
89 return floatx80_move(a, status);
91 return packFloatx80(0, 0, 0);
94 float_raise(float_flag_invalid, status);
95 return floatx80_default_nan(status);
101 one = packFloatx80(0, one_exp, one_sig);
104 fp1 = floatx80_add(one, fp0, status); // 1 + X
105 fp0 = floatx80_sub(one, fp0, status); // 1 - X
106 fp0 = floatx80_div(fp0, fp1, status); // (1-X)/(1+X)
107 fp0 = floatx80_sqrt(fp0, status); // SQRT((1-X)/(1+X))
108 fp0 = floatx80_atan(fp0, status); // ATAN(SQRT((1-X)/(1+X)))
112 a = floatx80_add(fp0, fp0, status); // 2 * ATAN(SQRT((1-X)/(1+X)))
114 float_raise(float_flag_inexact, status);
119 /*----------------------------------------------------------------------------
121 *----------------------------------------------------------------------------*/
123 floatx80 floatx80_asin(floatx80 a, float_status *status)
130 floatx80 fp0, fp1, fp2, one;
132 aSig = extractFloatx80Frac(a);
133 aExp = extractFloatx80Exp(a);
134 aSign = extractFloatx80Sign(a);
136 if (aExp == 0x7FFF && (uint64_t) (aSig<<1)) {
137 return propagateFloatx80NaNOneArg(a, status);
140 if (aExp == 0 && aSig == 0) {
141 return packFloatx80(aSign, 0, 0);
144 compact = floatx80_make_compact(aExp, aSig);
146 if (compact >= 0x3FFF8000) { // |X| >= 1
147 if (aExp == one_exp && aSig == one_sig) { // |X| == 1
148 float_raise(float_flag_inexact, status);
149 a = packFloatx80(aSign, piby2_exp, pi_sig);
150 return floatx80_move(a, status);
152 float_raise(float_flag_invalid, status);
153 return floatx80_default_nan(status);
160 one = packFloatx80(0, one_exp, one_sig);
163 fp1 = floatx80_sub(one, fp0, status); // 1 - X
164 fp2 = floatx80_add(one, fp0, status); // 1 + X
165 fp1 = floatx80_mul(fp2, fp1, status); // (1+X)*(1-X)
166 fp1 = floatx80_sqrt(fp1, status); // SQRT((1+X)*(1-X))
167 fp0 = floatx80_div(fp0, fp1, status); // X/SQRT((1+X)*(1-X))
171 a = floatx80_atan(fp0, status); // ATAN(X/SQRT((1+X)*(1-X)))
173 float_raise(float_flag_inexact, status);
178 /*----------------------------------------------------------------------------
180 *----------------------------------------------------------------------------*/
182 floatx80 floatx80_atan(floatx80 a, float_status *status)
188 int32_t compact, tbl_index;
189 floatx80 fp0, fp1, fp2, fp3, xsave;
191 aSig = extractFloatx80Frac(a);
192 aExp = extractFloatx80Exp(a);
193 aSign = extractFloatx80Sign(a);
195 if (aExp == 0x7FFF) {
196 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
197 a = packFloatx80(aSign, piby2_exp, pi_sig);
198 float_raise(float_flag_inexact, status);
199 return floatx80_move(a, status);
202 if (aExp == 0 && aSig == 0) {
203 return packFloatx80(aSign, 0, 0);
206 compact = floatx80_make_compact(aExp, aSig);
210 if (compact < 0x3FFB8000 || compact > 0x4002FFFF) { // |X| >= 16 or |X| < 1/16
211 if (compact > 0x3FFF8000) { // |X| >= 16
212 if (compact > 0x40638000) { // |X| > 2^(100)
213 fp0 = packFloatx80(aSign, piby2_exp, pi_sig);
214 fp1 = packFloatx80(aSign, 0x0001, one_sig);
218 a = floatx80_sub(fp0, fp1, status);
220 float_raise(float_flag_inexact, status);
225 fp1 = packFloatx80(1, one_exp, one_sig); // -1
226 fp1 = floatx80_div(fp1, fp0, status); // X' = -1/X
228 fp0 = floatx80_mul(fp1, fp1, status); // Y = X'*X'
229 fp1 = floatx80_mul(fp0, fp0, status); // Z = Y*Y
230 fp3 = float64_to_floatx80(LIT64(0xBFB70BF398539E6A), status); // C5
231 fp2 = float64_to_floatx80(LIT64(0x3FBC7187962D1D7D), status); // C4
232 fp3 = floatx80_mul(fp3, fp1, status); // Z*C5
233 fp2 = floatx80_mul(fp2, fp1, status); // Z*C4
234 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0xBFC24924827107B8), status), status); // C3+Z*C5
235 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3FC999999996263E), status), status); // C2+Z*C4
236 fp1 = floatx80_mul(fp1, fp3, status); // Z*(C3+Z*C5)
237 fp2 = floatx80_mul(fp2, fp0, status); // Y*(C2+Z*C4)
238 fp1 = floatx80_add(fp1, float64_to_floatx80(LIT64(0xBFD5555555555536), status), status); // C1+Z*(C3+Z*C5)
239 fp0 = floatx80_mul(fp0, xsave, status); // X'*Y
240 fp1 = floatx80_add(fp1, fp2, status); // [Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)]
241 fp0 = floatx80_mul(fp0, fp1, status); // X'*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]) ??
242 fp0 = floatx80_add(fp0, xsave, status);
243 fp1 = packFloatx80(aSign, piby2_exp, pi_sig);
247 a = floatx80_add(fp0, fp1, status);
249 float_raise(float_flag_inexact, status);
253 } else { // |X| < 1/16
254 if (compact < 0x3FD78000) { // |X| < 2^(-40)
257 a = floatx80_move(a, status);
259 float_raise(float_flag_inexact, status);
265 fp0 = floatx80_mul(fp0, fp0, status); // Y = X*X
266 fp1 = floatx80_mul(fp0, fp0, status); // Z = Y*Y
267 fp2 = float64_to_floatx80(LIT64(0x3FB344447F876989), status); // B6
268 fp3 = float64_to_floatx80(LIT64(0xBFB744EE7FAF45DB), status); // B5
269 fp2 = floatx80_mul(fp2, fp1, status); // Z*B6
270 fp3 = floatx80_mul(fp3, fp1, status); // Z*B5
271 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3FBC71C646940220), status), status); // B4+Z*B6
272 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0xBFC24924921872F9), status), status); // B3+Z*B5
273 fp2 = floatx80_mul(fp2, fp1, status); // Z*(B4+Z*B6)
274 fp1 = floatx80_mul(fp1, fp3, status); // Z*(B3+Z*B5)
275 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3FC9999999998FA9), status), status); // B2+Z*(B4+Z*B6)
276 fp1 = floatx80_add(fp1, float64_to_floatx80(LIT64(0xBFD5555555555555), status), status); // B1+Z*(B3+Z*B5)
277 fp2 = floatx80_mul(fp2, fp0, status); // Y*(B2+Z*(B4+Z*B6))
278 fp0 = floatx80_mul(fp0, xsave, status); // X*Y
279 fp1 = floatx80_add(fp1, fp2, status); // [B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]
280 fp0 = floatx80_mul(fp0, fp1, status); // X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))])
284 a = floatx80_add(fp0, xsave, status);
286 float_raise(float_flag_inexact, status);
292 aSig &= LIT64(0xF800000000000000);
293 aSig |= LIT64(0x0400000000000000);
294 xsave = packFloatx80(aSign, aExp, aSig); // F
297 fp2 = packFloatx80(0, one_exp, one_sig); // 1
298 fp1 = floatx80_mul(fp1, xsave, status); // X*F
299 fp0 = floatx80_sub(fp0, xsave, status); // X-F
300 fp1 = floatx80_add(fp1, fp2, status); // 1 + X*F
301 fp0 = floatx80_div(fp0, fp1, status); // U = (X-F)/(1+X*F)
305 tbl_index &= 0x7FFF0000;
306 tbl_index -= 0x3FFB0000;
308 tbl_index += compact&0x00007800;
311 fp3 = atan_tbl[tbl_index];
313 fp3.high |= aSign ? 0x8000 : 0; // ATAN(F)
315 fp1 = floatx80_mul(fp0, fp0, status); // V = U*U
316 fp2 = float64_to_floatx80(LIT64(0xBFF6687E314987D8), status); // A3
317 fp2 = floatx80_add(fp2, fp1, status); // A3+V
318 fp2 = floatx80_mul(fp2, fp1, status); // V*(A3+V)
319 fp1 = floatx80_mul(fp1, fp0, status); // U*V
320 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x4002AC6934A26DB3), status), status); // A2+V*(A3+V)
321 fp1 = floatx80_mul(fp1, float64_to_floatx80(LIT64(0xBFC2476F4E1DA28E), status), status); // A1+U*V
322 fp1 = floatx80_mul(fp1, fp2, status); // A1*U*V*(A2+V*(A3+V))
323 fp0 = floatx80_add(fp0, fp1, status); // ATAN(U)
327 a = floatx80_add(fp0, fp3, status); // ATAN(X)
329 float_raise(float_flag_inexact, status);
335 /*----------------------------------------------------------------------------
336 | Hyperbolic arc tangent
337 *----------------------------------------------------------------------------*/
339 floatx80 floatx80_atanh(floatx80 a, float_status *status)
346 floatx80 fp0, fp1, fp2, one;
348 aSig = extractFloatx80Frac(a);
349 aExp = extractFloatx80Exp(a);
350 aSign = extractFloatx80Sign(a);
352 if (aExp == 0x7FFF && (uint64_t) (aSig<<1)) {
353 return propagateFloatx80NaNOneArg(a, status);
356 if (aExp == 0 && aSig == 0) {
357 return packFloatx80(aSign, 0, 0);
360 compact = floatx80_make_compact(aExp, aSig);
362 if (compact >= 0x3FFF8000) { // |X| >= 1
363 if (aExp == one_exp && aSig == one_sig) { // |X| == 1
364 float_raise(float_flag_divbyzero, status);
365 return packFloatx80(aSign, 0x7FFF, floatx80_default_infinity_low);
367 float_raise(float_flag_invalid, status);
368 return floatx80_default_nan(status);
374 one = packFloatx80(0, one_exp, one_sig);
375 fp2 = packFloatx80(aSign, 0x3FFE, one_sig); // SIGN(X) * (1/2)
376 fp0 = packFloatx80(0, aExp, aSig); // Y = |X|
377 fp1 = packFloatx80(1, aExp, aSig); // -Y
378 fp0 = floatx80_add(fp0, fp0, status); // 2Y
379 fp1 = floatx80_add(fp1, one, status); // 1-Y
380 fp0 = floatx80_div(fp0, fp1, status); // Z = 2Y/(1-Y)
381 fp0 = floatx80_lognp1(fp0, status); // LOG1P(Z)
385 a = floatx80_mul(fp0, fp2, status); // ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z)
387 float_raise(float_flag_inexact, status);
392 /*----------------------------------------------------------------------------
394 *----------------------------------------------------------------------------*/
396 floatx80 floatx80_cos(floatx80 a, float_status *status)
402 int32_t compact, l, n, j;
403 floatx80 fp0, fp1, fp2, fp3, fp4, fp5, x, invtwopi, twopi1, twopi2;
404 float32 posneg1, twoto63;
407 aSig = extractFloatx80Frac(a);
408 aExp = extractFloatx80Exp(a);
409 aSign = extractFloatx80Sign(a);
411 if (aExp == 0x7FFF) {
412 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
413 float_raise(float_flag_invalid, status);
414 return floatx80_default_nan(status);
417 if (aExp == 0 && aSig == 0) {
418 return packFloatx80(0, one_exp, one_sig);
425 compact = floatx80_make_compact(aExp, aSig);
429 if (compact < 0x3FD78000 || compact > 0x4004BC7E) { // 2^(-40) > |X| > 15 PI
430 if (compact > 0x3FFF8000) { // |X| >= 15 PI
432 fp1 = packFloatx80(0, 0, 0);
433 if (compact == 0x7FFEFFFF) {
434 twopi1 = packFloatx80(aSign ^ 1, 0x7FFE, LIT64(0xC90FDAA200000000));
435 twopi2 = packFloatx80(aSign ^ 1, 0x7FDC, LIT64(0x85A308D300000000));
436 fp0 = floatx80_add(fp0, twopi1, status);
438 fp0 = floatx80_add(fp0, twopi2, status);
439 fp1 = floatx80_sub(fp1, fp0, status);
440 fp1 = floatx80_add(fp1, twopi2, status);
443 xSign = extractFloatx80Sign(fp0);
444 xExp = extractFloatx80Exp(fp0);
453 invtwopi = packFloatx80(0, 0x3FFE - l, LIT64(0xA2F9836E4E44152A)); // INVTWOPI
454 twopi1 = packFloatx80(0, 0x3FFF + l, LIT64(0xC90FDAA200000000));
455 twopi2 = packFloatx80(0, 0x3FDD + l, LIT64(0x85A308D300000000));
457 twoto63 = 0x5F000000;
458 twoto63 |= xSign ? 0x80000000 : 0x00000000; // SIGN(INARG)*2^63 IN SGL
460 fp2 = floatx80_mul(fp0, invtwopi, status);
461 fp2 = floatx80_add(fp2, float32_to_floatx80(twoto63, status), status); // THE FRACTIONAL PART OF FP2 IS ROUNDED
462 fp2 = floatx80_sub(fp2, float32_to_floatx80(twoto63, status), status); // FP2 is N
463 fp4 = floatx80_mul(twopi1, fp2, status); // W = N*P1
464 fp5 = floatx80_mul(twopi2, fp2, status); // w = N*P2
465 fp3 = floatx80_add(fp4, fp5, status); // FP3 is P
466 fp4 = floatx80_sub(fp4, fp3, status); // W-P
467 fp0 = floatx80_sub(fp0, fp3, status); // FP0 is A := R - P
468 fp4 = floatx80_add(fp4, fp5, status); // FP4 is p = (W-P)+w
469 fp3 = fp0; // FP3 is A
470 fp1 = floatx80_sub(fp1, fp4, status); // FP1 is a := r - p
471 fp0 = floatx80_add(fp0, fp1, status); // FP0 is R := A+a
474 n = floatx80_to_int32(fp2, status);
477 fp3 = floatx80_sub(fp3, fp0, status); // A-R
478 fp1 = floatx80_add(fp1, fp3, status); // FP1 is r := (A-R)+a
482 fp0 = float32_to_floatx80(0x3F800000, status); // 1
488 a = floatx80_sub(fp0, float32_to_floatx80(0x00800000, status), status);
491 a = floatx80_move(a, status);
493 float_raise(float_flag_inexact, status);
498 fp1 = floatx80_mul(fp0, float64_to_floatx80(LIT64(0x3FE45F306DC9C883), status), status); // X*2/PI
500 n = floatx80_to_int32(fp1, status);
503 fp0 = floatx80_sub(fp0, pi_tbl[j], status); // X-Y1
504 fp0 = floatx80_sub(fp0, float32_to_floatx80(pi_tbl2[j], status), status); // FP0 IS R = (X-Y1)-Y2
507 if ((n + adjn) & 1) {
509 fp0 = floatx80_mul(fp0, fp0, status); // FP0 IS S
510 fp1 = floatx80_mul(fp0, fp0, status); // FP1 IS T
511 fp2 = float64_to_floatx80(LIT64(0x3D2AC4D0D6011EE3), status); // B8
512 fp3 = float64_to_floatx80(LIT64(0xBDA9396F9F45AC19), status); // B7
514 xSign = extractFloatx80Sign(fp0); // X IS S
515 xExp = extractFloatx80Exp(fp0);
516 xSig = extractFloatx80Frac(fp0);
518 if (((n + adjn) >> 1) & 1) {
520 posneg1 = 0xBF800000; // -1
523 posneg1 = 0x3F800000; // 1
524 } // X IS NOW R'= SGN*R
526 fp2 = floatx80_mul(fp2, fp1, status); // TB8
527 fp3 = floatx80_mul(fp3, fp1, status); // TB7
528 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3E21EED90612C972), status), status); // B6+TB8
529 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0xBE927E4FB79D9FCF), status), status); // B5+TB7
530 fp2 = floatx80_mul(fp2, fp1, status); // T(B6+TB8)
531 fp3 = floatx80_mul(fp3, fp1, status); // T(B5+TB7)
532 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3EFA01A01A01D423), status), status); // B4+T(B6+TB8)
533 fp4 = packFloatx80(1, 0x3FF5, LIT64(0xB60B60B60B61D438));
534 fp3 = floatx80_add(fp3, fp4, status); // B3+T(B5+TB7)
535 fp2 = floatx80_mul(fp2, fp1, status); // T(B4+T(B6+TB8))
536 fp1 = floatx80_mul(fp1, fp3, status); // T(B3+T(B5+TB7))
537 fp4 = packFloatx80(0, 0x3FFA, LIT64(0xAAAAAAAAAAAAAB5E));
538 fp2 = floatx80_add(fp2, fp4, status); // B2+T(B4+T(B6+TB8))
539 fp1 = floatx80_add(fp1, float32_to_floatx80(0xBF000000, status), status); // B1+T(B3+T(B5+TB7))
540 fp0 = floatx80_mul(fp0, fp2, status); // S(B2+T(B4+T(B6+TB8)))
541 fp0 = floatx80_add(fp0, fp1, status); // [B1+T(B3+T(B5+TB7))]+[S(B2+T(B4+T(B6+TB8)))]
543 x = packFloatx80(xSign, xExp, xSig);
544 fp0 = floatx80_mul(fp0, x, status);
548 a = floatx80_add(fp0, float32_to_floatx80(posneg1, status), status);
550 float_raise(float_flag_inexact, status);
555 xSign = extractFloatx80Sign(fp0); // X IS R
556 xExp = extractFloatx80Exp(fp0);
557 xSig = extractFloatx80Frac(fp0);
559 xSign ^= ((n + adjn) >> 1) & 1; // X IS NOW R'= SGN*R
561 fp0 = floatx80_mul(fp0, fp0, status); // FP0 IS S
562 fp1 = floatx80_mul(fp0, fp0, status); // FP1 IS T
563 fp3 = float64_to_floatx80(LIT64(0xBD6AAA77CCC994F5), status); // A7
564 fp2 = float64_to_floatx80(LIT64(0x3DE612097AAE8DA1), status); // A6
565 fp3 = floatx80_mul(fp3, fp1, status); // T*A7
566 fp2 = floatx80_mul(fp2, fp1, status); // T*A6
567 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0xBE5AE6452A118AE4), status), status); // A5+T*A7
568 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3EC71DE3A5341531), status), status); // A4+T*A6
569 fp3 = floatx80_mul(fp3, fp1, status); // T(A5+TA7)
570 fp2 = floatx80_mul(fp2, fp1, status); // T(A4+TA6)
571 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0xBF2A01A01A018B59), status), status); // A3+T(A5+TA7)
572 fp4 = packFloatx80(0, 0x3FF8, LIT64(0x88888888888859AF));
573 fp2 = floatx80_add(fp2, fp4, status); // A2+T(A4+TA6)
574 fp1 = floatx80_mul(fp1, fp3, status); // T(A3+T(A5+TA7))
575 fp2 = floatx80_mul(fp2, fp0, status); // S(A2+T(A4+TA6))
576 fp4 = packFloatx80(1, 0x3FFC, LIT64(0xAAAAAAAAAAAAAA99));
577 fp1 = floatx80_add(fp1, fp4, status); // A1+T(A3+T(A5+TA7))
578 fp1 = floatx80_add(fp1, fp2, status); // [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
580 x = packFloatx80(xSign, xExp, xSig);
581 fp0 = floatx80_mul(fp0, x, status); // R'*S
582 fp0 = floatx80_mul(fp0, fp1, status); // SIN(R')-R'
586 a = floatx80_add(fp0, x, status);
588 float_raise(float_flag_inexact, status);
595 /*----------------------------------------------------------------------------
597 *----------------------------------------------------------------------------*/
599 floatx80 floatx80_cosh(floatx80 a, float_status *status)
608 aSig = extractFloatx80Frac(a);
609 aExp = extractFloatx80Exp(a);
610 // aSign = extractFloatx80Sign(a);
612 if (aExp == 0x7FFF) {
613 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
614 return packFloatx80(0, aExp, aSig);
617 if (aExp == 0 && aSig == 0) {
618 return packFloatx80(0, one_exp, one_sig);
623 compact = floatx80_make_compact(aExp, aSig);
625 if (compact > 0x400CB167) {
626 if (compact > 0x400CB2B3) {
628 a = roundAndPackFloatx80(status->floatx80_rounding_precision, 0, 0x8000, one_sig, 0, status);
629 float_raise(float_flag_inexact, status);
632 fp0 = packFloatx80(0, aExp, aSig);
633 fp0 = floatx80_sub(fp0, float64_to_floatx80(LIT64(0x40C62D38D3D64634), status), status);
634 fp0 = floatx80_sub(fp0, float64_to_floatx80(LIT64(0x3D6F90AEB1E75CC7), status), status);
635 fp0 = floatx80_etox(fp0, status);
636 fp1 = packFloatx80(0, 0x7FFB, one_sig);
640 a = floatx80_mul(fp0, fp1, status);
642 float_raise(float_flag_inexact, status);
648 fp0 = packFloatx80(0, aExp, aSig); // |X|
649 fp0 = floatx80_etox(fp0, status); // EXP(|X|)
650 fp0 = floatx80_mul(fp0, float32_to_floatx80(0x3F000000, status), status); // (1/2)*EXP(|X|)
651 fp1 = float32_to_floatx80(0x3E800000, status); // 1/4
652 fp1 = floatx80_div(fp1, fp0, status); // 1/(2*EXP(|X|))
656 a = floatx80_add(fp0, fp1, status);
658 float_raise(float_flag_inexact, status);
663 /*----------------------------------------------------------------------------
665 *----------------------------------------------------------------------------*/
667 floatx80 floatx80_etox(floatx80 a, float_status *status)
673 int32_t compact, n, j, k, m, m1;
674 floatx80 fp0, fp1, fp2, fp3, l2, scale, adjscale;
677 aSig = extractFloatx80Frac(a);
678 aExp = extractFloatx80Exp(a);
679 aSign = extractFloatx80Sign(a);
681 if (aExp == 0x7FFF) {
682 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
683 if (aSign) return packFloatx80(0, 0, 0);
687 if (aExp == 0 && aSig == 0) {
688 return packFloatx80(0, one_exp, one_sig);
695 if (aExp >= 0x3FBE) { // |X| >= 2^(-65)
696 compact = floatx80_make_compact(aExp, aSig);
698 if (compact < 0x400CB167) { // |X| < 16380 log2
701 fp0 = floatx80_mul(fp0, float32_to_floatx80(0x42B8AA3B, status), status); // 64/log2 * X
703 n = floatx80_to_int32(fp0, status); // int(64/log2*X)
704 fp0 = int32_to_floatx80(n);
706 j = n & 0x3F; // J = N mod 64
707 m = n / 64; // NOTE: this is really arithmetic right shift by 6
708 if (n < 0 && j) { // arithmetic right shift is division and round towards minus infinity
711 m += 0x3FFF; // biased exponent of 2^(M)
715 fp0 = floatx80_mul(fp0, float32_to_floatx80(0xBC317218, status), status); // N * L1, L1 = lead(-log2/64)
716 l2 = packFloatx80(0, 0x3FDC, LIT64(0x82E308654361C4C6));
717 fp2 = floatx80_mul(fp2, l2, status); // N * L2, L1+L2 = -log2/64
718 fp0 = floatx80_add(fp0, fp1, status); // X + N*L1
719 fp0 = floatx80_add(fp0, fp2, status); // R
721 fp1 = floatx80_mul(fp0, fp0, status); // S = R*R
722 fp2 = float32_to_floatx80(0x3AB60B70, status); // A5
723 fp2 = floatx80_mul(fp2, fp1, status); // fp2 is S*A5
724 fp3 = floatx80_mul(float32_to_floatx80(0x3C088895, status), fp1, status); // fp3 is S*A4
725 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3FA5555555554431), status), status); // fp2 is A3+S*A5
726 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0x3FC5555555554018), status), status); // fp3 is A2+S*A4
727 fp2 = floatx80_mul(fp2, fp1, status); // fp2 is S*(A3+S*A5)
728 fp3 = floatx80_mul(fp3, fp1, status); // fp3 is S*(A2+S*A4)
729 fp2 = floatx80_add(fp2, float32_to_floatx80(0x3F000000, status), status); // fp2 is A1+S*(A3+S*A5)
730 fp3 = floatx80_mul(fp3, fp0, status); // fp3 IS R*S*(A2+S*A4)
731 fp2 = floatx80_mul(fp2, fp1, status); // fp2 IS S*(A1+S*(A3+S*A5))
732 fp0 = floatx80_add(fp0, fp3, status); // fp0 IS R+R*S*(A2+S*A4)
733 fp0 = floatx80_add(fp0, fp2, status); // fp0 IS EXP(R) - 1
736 fp0 = floatx80_mul(fp0, fp1, status); // 2^(J/64)*(Exp(R)-1)
737 fp0 = floatx80_add(fp0, float32_to_floatx80(exp_tbl2[j], status), status); // accurate 2^(J/64)
738 fp0 = floatx80_add(fp0, fp1, status); // 2^(J/64) + 2^(J/64)*(Exp(R)-1)
740 scale = packFloatx80(0, m, one_sig);
742 adjscale = packFloatx80(0, m1, one_sig);
743 fp0 = floatx80_mul(fp0, adjscale, status);
748 a = floatx80_mul(fp0, scale, status);
750 float_raise(float_flag_inexact, status);
753 } else { // |X| >= 16380 log2
754 if (compact > 0x400CB27C) { // |X| >= 16480 log2
757 a = roundAndPackFloatx80(status->floatx80_rounding_precision, 0, -0x1000, aSig, 0, status);
759 a = roundAndPackFloatx80(status->floatx80_rounding_precision, 0, 0x8000, aSig, 0, status);
761 float_raise(float_flag_inexact, status);
767 fp0 = floatx80_mul(fp0, float32_to_floatx80(0x42B8AA3B, status), status); // 64/log2 * X
769 n = floatx80_to_int32(fp0, status); // int(64/log2*X)
770 fp0 = int32_to_floatx80(n);
772 j = n & 0x3F; // J = N mod 64
773 k = n / 64; // NOTE: this is really arithmetic right shift by 6
774 if (n < 0 && j) { // arithmetic right shift is division and round towards minus infinity
777 m1 = k / 2; // NOTE: this is really arithmetic right shift by 1
778 if (k < 0 && (k & 1)) { // arithmetic right shift is division and round towards minus infinity
782 m1 += 0x3FFF; // biased exponent of 2^(M1)
783 m += 0x3FFF; // biased exponent of 2^(M)
788 } else { // |X| < 2^(-65)
791 a = floatx80_add(a, float32_to_floatx80(0x3F800000, status), status); // 1 + X
793 float_raise(float_flag_inexact, status);
799 /*----------------------------------------------------------------------------
801 *----------------------------------------------------------------------------*/
803 floatx80 floatx80_etoxm1(floatx80 a, float_status *status)
809 int32_t compact, n, j, m, m1;
810 floatx80 fp0, fp1, fp2, fp3, l2, sc, onebysc;
812 aSig = extractFloatx80Frac(a);
813 aExp = extractFloatx80Exp(a);
814 aSign = extractFloatx80Sign(a);
816 if (aExp == 0x7FFF) {
817 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
818 if (aSign) return packFloatx80(aSign, one_exp, one_sig);
822 if (aExp == 0 && aSig == 0) {
823 return packFloatx80(aSign, 0, 0);
828 if (aExp >= 0x3FFD) { // |X| >= 1/4
829 compact = floatx80_make_compact(aExp, aSig);
831 if (compact <= 0x4004C215) { // |X| <= 70 log2
834 fp0 = floatx80_mul(fp0, float32_to_floatx80(0x42B8AA3B, status), status); // 64/log2 * X
835 n = floatx80_to_int32(fp0, status); // int(64/log2*X)
836 fp0 = int32_to_floatx80(n);
838 j = n & 0x3F; // J = N mod 64
839 m = n / 64; // NOTE: this is really arithmetic right shift by 6
840 if (n < 0 && j) { // arithmetic right shift is division and round towards minus infinity
844 //m += 0x3FFF; // biased exponent of 2^(M)
845 //m1 += 0x3FFF; // biased exponent of -2^(-M)
848 fp0 = floatx80_mul(fp0, float32_to_floatx80(0xBC317218, status), status); // N * L1, L1 = lead(-log2/64)
849 l2 = packFloatx80(0, 0x3FDC, LIT64(0x82E308654361C4C6));
850 fp2 = floatx80_mul(fp2, l2, status); // N * L2, L1+L2 = -log2/64
851 fp0 = floatx80_add(fp0, fp1, status); // X + N*L1
852 fp0 = floatx80_add(fp0, fp2, status); // R
854 fp1 = floatx80_mul(fp0, fp0, status); // S = R*R
855 fp2 = float32_to_floatx80(0x3950097B, status); // A6
856 fp2 = floatx80_mul(fp2, fp1, status); // fp2 is S*A6
857 fp3 = floatx80_mul(float32_to_floatx80(0x3AB60B6A, status), fp1, status); // fp3 is S*A5
858 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3F81111111174385), status), status); // fp2 IS A4+S*A6
859 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0x3FA5555555554F5A), status), status); // fp3 is A3+S*A5
860 fp2 = floatx80_mul(fp2, fp1, status); // fp2 IS S*(A4+S*A6)
861 fp3 = floatx80_mul(fp3, fp1, status); // fp3 IS S*(A3+S*A5)
862 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3FC5555555555555), status), status); // fp2 IS A2+S*(A4+S*A6)
863 fp3 = floatx80_add(fp3, float32_to_floatx80(0x3F000000, status), status); // fp3 IS A1+S*(A3+S*A5)
864 fp2 = floatx80_mul(fp2, fp1, status); // fp2 IS S*(A2+S*(A4+S*A6))
865 fp1 = floatx80_mul(fp1, fp3, status); // fp1 IS S*(A1+S*(A3+S*A5))
866 fp2 = floatx80_mul(fp2, fp0, status); // fp2 IS R*S*(A2+S*(A4+S*A6))
867 fp0 = floatx80_add(fp0, fp1, status); // fp0 IS R+S*(A1+S*(A3+S*A5))
868 fp0 = floatx80_add(fp0, fp2, status); // fp0 IS EXP(R) - 1
870 fp0 = floatx80_mul(fp0, exp_tbl[j], status); // 2^(J/64)*(Exp(R)-1)
873 fp1 = float32_to_floatx80(exp_tbl2[j], status);
874 onebysc = packFloatx80(1, m1 + 0x3FFF, one_sig); // -2^(-M)
875 fp1 = floatx80_add(fp1, onebysc, status);
876 fp0 = floatx80_add(fp0, fp1, status);
877 fp0 = floatx80_add(fp0, exp_tbl[j], status);
879 fp0 = floatx80_add(fp0, float32_to_floatx80(exp_tbl2[j], status), status);
880 fp0 = floatx80_add(fp0, exp_tbl[j], status);
881 onebysc = packFloatx80(1, m1 + 0x3FFF, one_sig); // -2^(-M)
882 fp0 = floatx80_add(fp0, onebysc, status);
883 } else { // -3 <= m <= 63
885 fp0 = floatx80_add(fp0, float32_to_floatx80(exp_tbl2[j], status), status);
886 onebysc = packFloatx80(1, m1 + 0x3FFF, one_sig); // -2^(-M)
887 fp1 = floatx80_add(fp1, onebysc, status);
888 fp0 = floatx80_add(fp0, fp1, status);
891 sc = packFloatx80(0, m + 0x3FFF, one_sig);
895 a = floatx80_mul(fp0, sc, status);
897 float_raise(float_flag_inexact, status);
900 } else { // |X| > 70 log2
902 fp0 = float32_to_floatx80(0xBF800000, status); // -1
906 a = floatx80_add(fp0, float32_to_floatx80(0x00800000, status), status); // -1 + 2^(-126)
908 float_raise(float_flag_inexact, status);
914 return floatx80_etox(a, status);
917 } else { // |X| < 1/4
918 if (aExp >= 0x3FBE) {
920 fp0 = floatx80_mul(fp0, fp0, status); // S = X*X
921 fp1 = float32_to_floatx80(0x2F30CAA8, status); // B12
922 fp1 = floatx80_mul(fp1, fp0, status); // S * B12
923 fp2 = float32_to_floatx80(0x310F8290, status); // B11
924 fp1 = floatx80_add(fp1, float32_to_floatx80(0x32D73220, status), status); // B10
925 fp2 = floatx80_mul(fp2, fp0, status);
926 fp1 = floatx80_mul(fp1, fp0, status);
927 fp2 = floatx80_add(fp2, float32_to_floatx80(0x3493F281, status), status); // B9
928 fp1 = floatx80_add(fp1, float64_to_floatx80(LIT64(0x3EC71DE3A5774682), status), status); // B8
929 fp2 = floatx80_mul(fp2, fp0, status);
930 fp1 = floatx80_mul(fp1, fp0, status);
931 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3EFA01A019D7CB68), status), status); // B7
932 fp1 = floatx80_add(fp1, float64_to_floatx80(LIT64(0x3F2A01A01A019DF3), status), status); // B6
933 fp2 = floatx80_mul(fp2, fp0, status);
934 fp1 = floatx80_mul(fp1, fp0, status);
935 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3F56C16C16C170E2), status), status); // B5
936 fp1 = floatx80_add(fp1, float64_to_floatx80(LIT64(0x3F81111111111111), status), status); // B4
937 fp2 = floatx80_mul(fp2, fp0, status);
938 fp1 = floatx80_mul(fp1, fp0, status);
939 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3FA5555555555555), status), status); // B3
940 fp3 = packFloatx80(0, 0x3FFC, LIT64(0xAAAAAAAAAAAAAAAB));
941 fp1 = floatx80_add(fp1, fp3, status); // B2
942 fp2 = floatx80_mul(fp2, fp0, status);
943 fp1 = floatx80_mul(fp1, fp0, status);
945 fp2 = floatx80_mul(fp2, fp0, status);
946 fp1 = floatx80_mul(fp1, a, status);
948 fp0 = floatx80_mul(fp0, float32_to_floatx80(0x3F000000, status), status); // S*B1
949 fp1 = floatx80_add(fp1, fp2, status); // Q
950 fp0 = floatx80_add(fp0, fp1, status); // S*B1+Q
954 a = floatx80_add(fp0, a, status);
956 float_raise(float_flag_inexact, status);
959 } else { // |X| < 2^(-65)
960 sc = packFloatx80(1, 1, one_sig);
963 if (aExp < 0x0033) { // |X| < 2^(-16382)
964 fp0 = floatx80_mul(fp0, float64_to_floatx80(LIT64(0x48B0000000000000), status), status);
965 fp0 = floatx80_add(fp0, sc, status);
969 a = floatx80_mul(fp0, float64_to_floatx80(LIT64(0x3730000000000000), status), status);
973 a = floatx80_add(fp0, sc, status);
976 float_raise(float_flag_inexact, status);
983 /*----------------------------------------------------------------------------
985 *----------------------------------------------------------------------------*/
987 floatx80 floatx80_log10(floatx80 a, float_status *status)
995 aSig = extractFloatx80Frac(a);
996 aExp = extractFloatx80Exp(a);
997 aSign = extractFloatx80Sign(a);
999 if (aExp == 0x7FFF) {
1000 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
1005 if (aExp == 0 && aSig == 0) {
1006 float_raise(float_flag_divbyzero, status);
1007 return packFloatx80(1, 0x7FFF, floatx80_default_infinity_low);
1011 float_raise(float_flag_invalid, status);
1012 return floatx80_default_nan(status);
1017 fp0 = floatx80_logn(a, status);
1018 fp1 = packFloatx80(0, 0x3FFD, LIT64(0xDE5BD8A937287195)); // INV_L10
1022 a = floatx80_mul(fp0, fp1, status); // LOGN(X)*INV_L10
1024 float_raise(float_flag_inexact, status);
1029 /*----------------------------------------------------------------------------
1031 *----------------------------------------------------------------------------*/
1033 floatx80 floatx80_log2(floatx80 a, float_status *status)
1041 aSig = extractFloatx80Frac(a);
1042 aExp = extractFloatx80Exp(a);
1043 aSign = extractFloatx80Sign(a);
1045 if (aExp == 0x7FFF) {
1046 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
1053 float_raise(float_flag_divbyzero, status);
1054 return packFloatx80(1, 0x7FFF, floatx80_default_infinity_low);
1056 normalizeFloatx80Subnormal(aSig, &aExp, &aSig);
1060 float_raise(float_flag_invalid, status);
1061 return floatx80_default_nan(status);
1066 if (aSig == one_sig) { // X is 2^k
1069 a = int32_to_floatx80(aExp-0x3FFF);
1071 fp0 = floatx80_logn(a, status);
1072 fp1 = packFloatx80(0, 0x3FFF, LIT64(0xB8AA3B295C17F0BC)); // INV_L2
1076 a = floatx80_mul(fp0, fp1, status); // LOGN(X)*INV_L2
1079 float_raise(float_flag_inexact, status);
1084 /*----------------------------------------------------------------------------
1086 *----------------------------------------------------------------------------*/
1088 floatx80 floatx80_logn(floatx80 a, float_status *status)
1092 uint64_t aSig, fSig;
1094 int32_t compact, j, k, adjk;
1095 floatx80 fp0, fp1, fp2, fp3, f, logof2, klog2, saveu;
1097 aSig = extractFloatx80Frac(a);
1098 aExp = extractFloatx80Exp(a);
1099 aSign = extractFloatx80Sign(a);
1101 if (aExp == 0x7FFF) {
1102 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
1110 if (aSig == 0) { // zero
1111 float_raise(float_flag_divbyzero, status);
1112 return packFloatx80(1, 0x7FFF, floatx80_default_infinity_low);
1115 if ((aSig & one_sig) == 0) { // denormal
1116 normalizeFloatx80Subnormal(aSig, &aExp, &aSig);
1119 a = packFloatx80(aSign, aExp, aSig);
1122 normalizeFloatx80Subnormal(aSig, &aExp, &aSig);
1127 float_raise(float_flag_invalid, status);
1128 return floatx80_default_nan(status);
1133 compact = floatx80_make_compact(aExp, aSig);
1135 if (compact < 0x3FFEF07D || compact > 0x3FFF8841) { // |X| < 15/16 or |X| > 17/16
1138 fp1 = int32_to_floatx80(k);
1140 fSig = (aSig & LIT64(0xFE00000000000000)) | LIT64(0x0100000000000000);
1141 j = (fSig >> 56) & 0x7E; // DISPLACEMENT FOR 1/F
1143 f = packFloatx80(0, 0x3FFF, fSig); // F
1144 fp0 = packFloatx80(0, 0x3FFF, aSig); // Y
1146 fp0 = floatx80_sub(fp0, f, status); // Y-F
1149 fp0 = floatx80_mul(fp0, log_tbl[j], status); // FP0 IS U = (Y-F)/F
1150 logof2 = packFloatx80(0, 0x3FFE, LIT64(0xB17217F7D1CF79AC));
1151 klog2 = floatx80_mul(fp1, logof2, status); // FP1 IS K*LOG2
1152 fp2 = floatx80_mul(fp0, fp0, status); // FP2 IS V=U*U
1157 fp1 = floatx80_mul(fp1, float64_to_floatx80(LIT64(0x3FC2499AB5E4040B), status), status); // V*A6
1158 fp2 = floatx80_mul(fp2, float64_to_floatx80(LIT64(0xBFC555B5848CB7DB), status), status); // V*A5
1159 fp1 = floatx80_add(fp1, float64_to_floatx80(LIT64(0x3FC99999987D8730), status), status); // A4+V*A6
1160 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0xBFCFFFFFFF6F7E97), status), status); // A3+V*A5
1161 fp1 = floatx80_mul(fp1, fp3, status); // V*(A4+V*A6)
1162 fp2 = floatx80_mul(fp2, fp3, status); // V*(A3+V*A5)
1163 fp1 = floatx80_add(fp1, float64_to_floatx80(LIT64(0x3FD55555555555A4), status), status); // A2+V*(A4+V*A6)
1164 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0xBFE0000000000008), status), status); // A1+V*(A3+V*A5)
1165 fp1 = floatx80_mul(fp1, fp3, status); // V*(A2+V*(A4+V*A6))
1166 fp2 = floatx80_mul(fp2, fp3, status); // V*(A1+V*(A3+V*A5))
1167 fp1 = floatx80_mul(fp1, fp0, status); // U*V*(A2+V*(A4+V*A6))
1168 fp0 = floatx80_add(fp0, fp2, status); // U+V*(A1+V*(A3+V*A5))
1170 fp1 = floatx80_add(fp1, log_tbl[j+1], status); // LOG(F)+U*V*(A2+V*(A4+V*A6))
1171 fp0 = floatx80_add(fp0, fp1, status); // FP0 IS LOG(F) + LOG(1+U)
1175 a = floatx80_add(fp0, klog2, status);
1177 float_raise(float_flag_inexact, status);
1180 } else { // |X-1| >= 1/16
1183 fp1 = floatx80_sub(fp1, float32_to_floatx80(0x3F800000, status), status); // FP1 IS X-1
1184 fp0 = floatx80_add(fp0, float32_to_floatx80(0x3F800000, status), status); // FP0 IS X+1
1185 fp1 = floatx80_add(fp1, fp1, status); // FP1 IS 2(X-1)
1188 fp1 = floatx80_div(fp1, fp0, status); // U
1190 fp0 = floatx80_mul(fp1, fp1, status); // FP0 IS V = U*U
1191 fp1 = floatx80_mul(fp0, fp0, status); // FP1 IS W = V*V
1193 fp3 = float64_to_floatx80(LIT64(0x3F175496ADD7DAD6), status); // B5
1194 fp2 = float64_to_floatx80(LIT64(0x3F3C71C2FE80C7E0), status); // B4
1195 fp3 = floatx80_mul(fp3, fp1, status); // W*B5
1196 fp2 = floatx80_mul(fp2, fp1, status); // W*B4
1197 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0x3F624924928BCCFF), status), status); // B3+W*B5
1198 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3F899999999995EC), status), status); // B2+W*B4
1199 fp1 = floatx80_mul(fp1, fp3, status); // W*(B3+W*B5)
1200 fp2 = floatx80_mul(fp2, fp0, status); // V*(B2+W*B4)
1201 fp1 = floatx80_add(fp1, float64_to_floatx80(LIT64(0x3FB5555555555555), status), status); // B1+W*(B3+W*B5)
1203 fp0 = floatx80_mul(fp0, saveu, status); // FP0 IS U*V
1204 fp1 = floatx80_add(fp1, fp2, status); // B1+W*(B3+W*B5) + V*(B2+W*B4)
1205 fp0 = floatx80_mul(fp0, fp1, status); // U*V*( [B1+W*(B3+W*B5)] + [V*(B2+W*B4)] )
1209 a = floatx80_add(fp0, saveu, status);
1211 float_raise(float_flag_inexact, status);
1217 /*----------------------------------------------------------------------------
1218 | Log base e of x plus 1
1219 *----------------------------------------------------------------------------*/
1221 floatx80 floatx80_lognp1(floatx80 a, float_status *status)
1225 uint64_t aSig, fSig;
1227 int32_t compact, j, k;
1228 floatx80 fp0, fp1, fp2, fp3, f, logof2, klog2, saveu;
1230 aSig = extractFloatx80Frac(a);
1231 aExp = extractFloatx80Exp(a);
1232 aSign = extractFloatx80Sign(a);
1234 if (aExp == 0x7FFF) {
1235 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
1237 float_raise(float_flag_invalid, status);
1238 return floatx80_default_nan(status);
1243 if (aExp == 0 && aSig == 0) {
1244 return packFloatx80(aSign, 0, 0);
1247 if (aSign && aExp >= one_exp) {
1248 if (aExp == one_exp && aSig == one_sig) {
1249 float_raise(float_flag_divbyzero, status);
1250 return packFloatx80(aSign, 0x7FFF, floatx80_default_infinity_low);
1252 float_raise(float_flag_invalid, status);
1253 return floatx80_default_nan(status);
1256 if (aExp < 0x3f99 || (aExp == 0x3f99 && aSig == one_sig)) { // <= min threshold
1257 float_raise(float_flag_inexact, status);
1258 return floatx80_move(a, status);
1263 compact = floatx80_make_compact(aExp, aSig);
1268 fp0 = floatx80_add(fp0, float32_to_floatx80(0x3F800000, status), status); // X = (1+Z)
1270 aExp = extractFloatx80Exp(fp0);
1271 aSig = extractFloatx80Frac(fp0);
1273 compact = floatx80_make_compact(aExp, aSig);
1275 if (compact < 0x3FFE8000 || compact > 0x3FFFC000) { // |X| < 1/2 or |X| > 3/2
1277 fp1 = int32_to_floatx80(k);
1279 fSig = (aSig & LIT64(0xFE00000000000000)) | LIT64(0x0100000000000000);
1280 j = (fSig >> 56) & 0x7E; // DISPLACEMENT FOR 1/F
1282 f = packFloatx80(0, 0x3FFF, fSig); // F
1283 fp0 = packFloatx80(0, 0x3FFF, aSig); // Y
1285 fp0 = floatx80_sub(fp0, f, status); // Y-F
1289 fp0 = floatx80_mul(fp0, log_tbl[j], status); // FP0 IS U = (Y-F)/F
1290 logof2 = packFloatx80(0, 0x3FFE, LIT64(0xB17217F7D1CF79AC));
1291 klog2 = floatx80_mul(fp1, logof2, status); // FP1 IS K*LOG2
1292 fp2 = floatx80_mul(fp0, fp0, status); // FP2 IS V=U*U
1297 fp1 = floatx80_mul(fp1, float64_to_floatx80(LIT64(0x3FC2499AB5E4040B), status), status); // V*A6
1298 fp2 = floatx80_mul(fp2, float64_to_floatx80(LIT64(0xBFC555B5848CB7DB), status), status); // V*A5
1299 fp1 = floatx80_add(fp1, float64_to_floatx80(LIT64(0x3FC99999987D8730), status), status); // A4+V*A6
1300 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0xBFCFFFFFFF6F7E97), status), status); // A3+V*A5
1301 fp1 = floatx80_mul(fp1, fp3, status); // V*(A4+V*A6)
1302 fp2 = floatx80_mul(fp2, fp3, status); // V*(A3+V*A5)
1303 fp1 = floatx80_add(fp1, float64_to_floatx80(LIT64(0x3FD55555555555A4), status), status); // A2+V*(A4+V*A6)
1304 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0xBFE0000000000008), status), status); // A1+V*(A3+V*A5)
1305 fp1 = floatx80_mul(fp1, fp3, status); // V*(A2+V*(A4+V*A6))
1306 fp2 = floatx80_mul(fp2, fp3, status); // V*(A1+V*(A3+V*A5))
1307 fp1 = floatx80_mul(fp1, fp0, status); // U*V*(A2+V*(A4+V*A6))
1308 fp0 = floatx80_add(fp0, fp2, status); // U+V*(A1+V*(A3+V*A5))
1310 fp1 = floatx80_add(fp1, log_tbl[j+1], status); // LOG(F)+U*V*(A2+V*(A4+V*A6))
1311 fp0 = floatx80_add(fp0, fp1, status); // FP0 IS LOG(F) + LOG(1+U)
1315 a = floatx80_add(fp0, klog2, status);
1317 float_raise(float_flag_inexact, status);
1320 } else if (compact < 0x3FFEF07D || compact > 0x3FFF8841) { // |X| < 1/16 or |X| > -1/16
1322 fSig = (aSig & LIT64(0xFE00000000000000)) | LIT64(0x0100000000000000);
1323 f = packFloatx80(0, 0x3FFF, fSig); // F
1324 j = (fSig >> 56) & 0x7E; // DISPLACEMENT FOR 1/F
1326 if (compact >= 0x3FFF8000) { // 1+Z >= 1
1328 fp0 = floatx80_sub(float32_to_floatx80(0x3F800000, status), f, status); // 1-F
1329 fp0 = floatx80_add(fp0, fp1, status); // FP0 IS Y-F = (1-F)+Z
1330 fp1 = packFloatx80(0, 0, 0); // K = 0
1333 fp0 = floatx80_sub(float32_to_floatx80(0x40000000, status), f, status); // 2-F
1334 fp1 = floatx80_add(fp1, fp1, status); // 2Z
1335 fp0 = floatx80_add(fp0, fp1, status); // FP0 IS Y-F = (2-F)+2Z
1336 fp1 = packFloatx80(1, one_exp, one_sig); // K = -1
1341 fp1 = floatx80_add(fp1, fp1, status); // FP1 IS 2Z
1342 fp0 = floatx80_add(fp0, float32_to_floatx80(0x3F800000, status), status); // FP0 IS 1+X
1345 fp1 = floatx80_div(fp1, fp0, status); // U
1347 fp0 = floatx80_mul(fp1, fp1, status); // FP0 IS V = U*U
1348 fp1 = floatx80_mul(fp0, fp0, status); // FP1 IS W = V*V
1350 fp3 = float64_to_floatx80(LIT64(0x3F175496ADD7DAD6), status); // B5
1351 fp2 = float64_to_floatx80(LIT64(0x3F3C71C2FE80C7E0), status); // B4
1352 fp3 = floatx80_mul(fp3, fp1, status); // W*B5
1353 fp2 = floatx80_mul(fp2, fp1, status); // W*B4
1354 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0x3F624924928BCCFF), status), status); // B3+W*B5
1355 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3F899999999995EC), status), status); // B2+W*B4
1356 fp1 = floatx80_mul(fp1, fp3, status); // W*(B3+W*B5)
1357 fp2 = floatx80_mul(fp2, fp0, status); // V*(B2+W*B4)
1358 fp1 = floatx80_add(fp1, float64_to_floatx80(LIT64(0x3FB5555555555555), status), status); // B1+W*(B3+W*B5)
1360 fp0 = floatx80_mul(fp0, saveu, status); // FP0 IS U*V
1361 fp1 = floatx80_add(fp1, fp2, status); // B1+W*(B3+W*B5) + V*(B2+W*B4)
1362 fp0 = floatx80_mul(fp0, fp1, status); // U*V*( [B1+W*(B3+W*B5)] + [V*(B2+W*B4)] )
1366 a = floatx80_add(fp0, saveu, status);
1368 float_raise(float_flag_inexact, status);
1374 /*----------------------------------------------------------------------------
1376 *----------------------------------------------------------------------------*/
1378 floatx80 floatx80_sin(floatx80 a, float_status *status)
1382 uint64_t aSig, xSig;
1384 int32_t compact, l, n, j;
1385 floatx80 fp0, fp1, fp2, fp3, fp4, fp5, x, invtwopi, twopi1, twopi2;
1386 float32 posneg1, twoto63;
1389 aSig = extractFloatx80Frac(a);
1390 aExp = extractFloatx80Exp(a);
1391 aSign = extractFloatx80Sign(a);
1393 if (aExp == 0x7FFF) {
1394 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
1395 float_raise(float_flag_invalid, status);
1396 return floatx80_default_nan(status);
1399 if (aExp == 0 && aSig == 0) {
1400 return packFloatx80(aSign, 0, 0);
1407 compact = floatx80_make_compact(aExp, aSig);
1411 if (compact < 0x3FD78000 || compact > 0x4004BC7E) { // 2^(-40) > |X| > 15 PI
1412 if (compact > 0x3FFF8000) { // |X| >= 15 PI
1414 fp1 = packFloatx80(0, 0, 0);
1415 if (compact == 0x7FFEFFFF) {
1416 twopi1 = packFloatx80(aSign ^ 1, 0x7FFE, LIT64(0xC90FDAA200000000));
1417 twopi2 = packFloatx80(aSign ^ 1, 0x7FDC, LIT64(0x85A308D300000000));
1418 fp0 = floatx80_add(fp0, twopi1, status);
1420 fp0 = floatx80_add(fp0, twopi2, status);
1421 fp1 = floatx80_sub(fp1, fp0, status);
1422 fp1 = floatx80_add(fp1, twopi2, status);
1425 xSign = extractFloatx80Sign(fp0);
1426 xExp = extractFloatx80Exp(fp0);
1435 invtwopi = packFloatx80(0, 0x3FFE - l, LIT64(0xA2F9836E4E44152A)); // INVTWOPI
1436 twopi1 = packFloatx80(0, 0x3FFF + l, LIT64(0xC90FDAA200000000));
1437 twopi2 = packFloatx80(0, 0x3FDD + l, LIT64(0x85A308D300000000));
1439 twoto63 = 0x5F000000;
1440 twoto63 |= xSign ? 0x80000000 : 0x00000000; // SIGN(INARG)*2^63 IN SGL
1442 fp2 = floatx80_mul(fp0, invtwopi, status);
1443 fp2 = floatx80_add(fp2, float32_to_floatx80(twoto63, status), status); // THE FRACTIONAL PART OF FP2 IS ROUNDED
1444 fp2 = floatx80_sub(fp2, float32_to_floatx80(twoto63, status), status); // FP2 is N
1445 fp4 = floatx80_mul(twopi1, fp2, status); // W = N*P1
1446 fp5 = floatx80_mul(twopi2, fp2, status); // w = N*P2
1447 fp3 = floatx80_add(fp4, fp5, status); // FP3 is P
1448 fp4 = floatx80_sub(fp4, fp3, status); // W-P
1449 fp0 = floatx80_sub(fp0, fp3, status); // FP0 is A := R - P
1450 fp4 = floatx80_add(fp4, fp5, status); // FP4 is p = (W-P)+w
1451 fp3 = fp0; // FP3 is A
1452 fp1 = floatx80_sub(fp1, fp4, status); // FP1 is a := r - p
1453 fp0 = floatx80_add(fp0, fp1, status); // FP0 is R := A+a
1456 n = floatx80_to_int32(fp2, status);
1459 fp3 = floatx80_sub(fp3, fp0, status); // A-R
1460 fp1 = floatx80_add(fp1, fp3, status); // FP1 is r := (A-R)+a
1464 fp0 = float32_to_floatx80(0x3F800000, status); // 1
1470 a = floatx80_sub(fp0, float32_to_floatx80(0x00800000, status), status);
1473 a = floatx80_move(a, status);
1475 float_raise(float_flag_inexact, status);
1480 fp1 = floatx80_mul(fp0, float64_to_floatx80(LIT64(0x3FE45F306DC9C883), status), status); // X*2/PI
1482 n = floatx80_to_int32(fp1, status);
1485 fp0 = floatx80_sub(fp0, pi_tbl[j], status); // X-Y1
1486 fp0 = floatx80_sub(fp0, float32_to_floatx80(pi_tbl2[j], status), status); // FP0 IS R = (X-Y1)-Y2
1489 if ((n + adjn) & 1) {
1491 fp0 = floatx80_mul(fp0, fp0, status); // FP0 IS S
1492 fp1 = floatx80_mul(fp0, fp0, status); // FP1 IS T
1493 fp2 = float64_to_floatx80(LIT64(0x3D2AC4D0D6011EE3), status); // B8
1494 fp3 = float64_to_floatx80(LIT64(0xBDA9396F9F45AC19), status); // B7
1496 xSign = extractFloatx80Sign(fp0); // X IS S
1497 xExp = extractFloatx80Exp(fp0);
1498 xSig = extractFloatx80Frac(fp0);
1500 if (((n + adjn) >> 1) & 1) {
1502 posneg1 = 0xBF800000; // -1
1505 posneg1 = 0x3F800000; // 1
1506 } // X IS NOW R'= SGN*R
1508 fp2 = floatx80_mul(fp2, fp1, status); // TB8
1509 fp3 = floatx80_mul(fp3, fp1, status); // TB7
1510 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3E21EED90612C972), status), status); // B6+TB8
1511 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0xBE927E4FB79D9FCF), status), status); // B5+TB7
1512 fp2 = floatx80_mul(fp2, fp1, status); // T(B6+TB8)
1513 fp3 = floatx80_mul(fp3, fp1, status); // T(B5+TB7)
1514 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3EFA01A01A01D423), status), status); // B4+T(B6+TB8)
1515 fp4 = packFloatx80(1, 0x3FF5, LIT64(0xB60B60B60B61D438));
1516 fp3 = floatx80_add(fp3, fp4, status); // B3+T(B5+TB7)
1517 fp2 = floatx80_mul(fp2, fp1, status); // T(B4+T(B6+TB8))
1518 fp1 = floatx80_mul(fp1, fp3, status); // T(B3+T(B5+TB7))
1519 fp4 = packFloatx80(0, 0x3FFA, LIT64(0xAAAAAAAAAAAAAB5E));
1520 fp2 = floatx80_add(fp2, fp4, status); // B2+T(B4+T(B6+TB8))
1521 fp1 = floatx80_add(fp1, float32_to_floatx80(0xBF000000, status), status); // B1+T(B3+T(B5+TB7))
1522 fp0 = floatx80_mul(fp0, fp2, status); // S(B2+T(B4+T(B6+TB8)))
1523 fp0 = floatx80_add(fp0, fp1, status); // [B1+T(B3+T(B5+TB7))]+[S(B2+T(B4+T(B6+TB8)))]
1525 x = packFloatx80(xSign, xExp, xSig);
1526 fp0 = floatx80_mul(fp0, x, status);
1530 a = floatx80_add(fp0, float32_to_floatx80(posneg1, status), status);
1532 float_raise(float_flag_inexact, status);
1537 xSign = extractFloatx80Sign(fp0); // X IS R
1538 xExp = extractFloatx80Exp(fp0);
1539 xSig = extractFloatx80Frac(fp0);
1541 xSign ^= ((n + adjn) >> 1) & 1; // X IS NOW R'= SGN*R
1543 fp0 = floatx80_mul(fp0, fp0, status); // FP0 IS S
1544 fp1 = floatx80_mul(fp0, fp0, status); // FP1 IS T
1545 fp3 = float64_to_floatx80(LIT64(0xBD6AAA77CCC994F5), status); // A7
1546 fp2 = float64_to_floatx80(LIT64(0x3DE612097AAE8DA1), status); // A6
1547 fp3 = floatx80_mul(fp3, fp1, status); // T*A7
1548 fp2 = floatx80_mul(fp2, fp1, status); // T*A6
1549 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0xBE5AE6452A118AE4), status), status); // A5+T*A7
1550 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3EC71DE3A5341531), status), status); // A4+T*A6
1551 fp3 = floatx80_mul(fp3, fp1, status); // T(A5+TA7)
1552 fp2 = floatx80_mul(fp2, fp1, status); // T(A4+TA6)
1553 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0xBF2A01A01A018B59), status), status); // A3+T(A5+TA7)
1554 fp4 = packFloatx80(0, 0x3FF8, LIT64(0x88888888888859AF));
1555 fp2 = floatx80_add(fp2, fp4, status); // A2+T(A4+TA6)
1556 fp1 = floatx80_mul(fp1, fp3, status); // T(A3+T(A5+TA7))
1557 fp2 = floatx80_mul(fp2, fp0, status); // S(A2+T(A4+TA6))
1558 fp4 = packFloatx80(1, 0x3FFC, LIT64(0xAAAAAAAAAAAAAA99));
1559 fp1 = floatx80_add(fp1, fp4, status); // A1+T(A3+T(A5+TA7))
1560 fp1 = floatx80_add(fp1, fp2, status); // [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
1562 x = packFloatx80(xSign, xExp, xSig);
1563 fp0 = floatx80_mul(fp0, x, status); // R'*S
1564 fp0 = floatx80_mul(fp0, fp1, status); // SIN(R')-R'
1568 a = floatx80_add(fp0, x, status);
1570 float_raise(float_flag_inexact, status);
1577 /*----------------------------------------------------------------------------
1579 *----------------------------------------------------------------------------*/
1581 floatx80 floatx80_sinh(floatx80 a, float_status *status)
1588 floatx80 fp0, fp1, fp2;
1591 aSig = extractFloatx80Frac(a);
1592 aExp = extractFloatx80Exp(a);
1593 aSign = extractFloatx80Sign(a);
1595 if (aExp == 0x7FFF) {
1596 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
1600 if (aExp == 0 && aSig == 0) {
1601 return packFloatx80(aSign, 0, 0);
1606 compact = floatx80_make_compact(aExp, aSig);
1608 if (compact > 0x400CB167) {
1610 if (compact > 0x400CB2B3) {
1613 a = roundAndPackFloatx80(status->floatx80_rounding_precision, aSign, 0x8000, aSig, 0, status);
1614 float_raise(float_flag_inexact, status);
1617 fp0 = floatx80_abs(a, status); // Y = |X|
1618 fp0 = floatx80_sub(fp0, float64_to_floatx80(LIT64(0x40C62D38D3D64634), status), status); // (|X|-16381LOG2_LEAD)
1619 fp0 = floatx80_sub(fp0, float64_to_floatx80(LIT64(0x3D6F90AEB1E75CC7), status), status); // |X| - 16381 LOG2, ACCURATE
1620 fp0 = floatx80_etox(fp0, status);
1621 fp2 = packFloatx80(aSign, 0x7FFB, one_sig);
1625 a = floatx80_mul(fp0, fp2, status);
1627 float_raise(float_flag_inexact, status);
1631 } else { // |X| < 16380 LOG2
1632 fp0 = floatx80_abs(a, status); // Y = |X|
1633 fp0 = floatx80_etoxm1(fp0, status); // FP0 IS Z = EXPM1(Y)
1634 fp1 = floatx80_add(fp0, float32_to_floatx80(0x3F800000, status), status); // 1+Z
1636 fp0 = floatx80_div(fp0, fp1, status); // Z/(1+Z)
1637 fp0 = floatx80_add(fp0, fp2, status);
1640 fact |= aSign ? 0x80000000 : 0x00000000;
1644 a = floatx80_mul(fp0, float32_to_floatx80(fact, status), status);
1646 float_raise(float_flag_inexact, status);
1652 /*----------------------------------------------------------------------------
1654 *----------------------------------------------------------------------------*/
1656 floatx80 floatx80_tan(floatx80 a, float_status *status)
1660 uint64_t aSig, xSig;
1662 int32_t compact, l, n, j;
1663 floatx80 fp0, fp1, fp2, fp3, fp4, fp5, invtwopi, twopi1, twopi2;
1667 aSig = extractFloatx80Frac(a);
1668 aExp = extractFloatx80Exp(a);
1669 aSign = extractFloatx80Sign(a);
1671 if (aExp == 0x7FFF) {
1672 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
1673 float_raise(float_flag_invalid, status);
1674 return floatx80_default_nan(status);
1677 if (aExp == 0 && aSig == 0) {
1678 return packFloatx80(aSign, 0, 0);
1683 compact = floatx80_make_compact(aExp, aSig);
1687 if (compact < 0x3FD78000 || compact > 0x4004BC7E) { // 2^(-40) > |X| > 15 PI
1688 if (compact > 0x3FFF8000) { // |X| >= 15 PI
1690 fp1 = packFloatx80(0, 0, 0);
1691 if (compact == 0x7FFEFFFF) {
1692 twopi1 = packFloatx80(aSign ^ 1, 0x7FFE, LIT64(0xC90FDAA200000000));
1693 twopi2 = packFloatx80(aSign ^ 1, 0x7FDC, LIT64(0x85A308D300000000));
1694 fp0 = floatx80_add(fp0, twopi1, status);
1696 fp0 = floatx80_add(fp0, twopi2, status);
1697 fp1 = floatx80_sub(fp1, fp0, status);
1698 fp1 = floatx80_add(fp1, twopi2, status);
1701 xSign = extractFloatx80Sign(fp0);
1702 xExp = extractFloatx80Exp(fp0);
1711 invtwopi = packFloatx80(0, 0x3FFE - l, LIT64(0xA2F9836E4E44152A)); // INVTWOPI
1712 twopi1 = packFloatx80(0, 0x3FFF + l, LIT64(0xC90FDAA200000000));
1713 twopi2 = packFloatx80(0, 0x3FDD + l, LIT64(0x85A308D300000000));
1715 twoto63 = 0x5F000000;
1716 twoto63 |= xSign ? 0x80000000 : 0x00000000; // SIGN(INARG)*2^63 IN SGL
1718 fp2 = floatx80_mul(fp0, invtwopi, status);
1719 fp2 = floatx80_add(fp2, float32_to_floatx80(twoto63, status), status); // THE FRACTIONAL PART OF FP2 IS ROUNDED
1720 fp2 = floatx80_sub(fp2, float32_to_floatx80(twoto63, status), status); // FP2 is N
1721 fp4 = floatx80_mul(twopi1, fp2, status); // W = N*P1
1722 fp5 = floatx80_mul(twopi2, fp2, status); // w = N*P2
1723 fp3 = floatx80_add(fp4, fp5, status); // FP3 is P
1724 fp4 = floatx80_sub(fp4, fp3, status); // W-P
1725 fp0 = floatx80_sub(fp0, fp3, status); // FP0 is A := R - P
1726 fp4 = floatx80_add(fp4, fp5, status); // FP4 is p = (W-P)+w
1727 fp3 = fp0; // FP3 is A
1728 fp1 = floatx80_sub(fp1, fp4, status); // FP1 is a := r - p
1729 fp0 = floatx80_add(fp0, fp1, status); // FP0 is R := A+a
1732 n = floatx80_to_int32(fp2, status);
1735 fp3 = floatx80_sub(fp3, fp0, status); // A-R
1736 fp1 = floatx80_add(fp1, fp3, status); // FP1 is r := (A-R)+a
1741 a = floatx80_move(a, status);
1743 float_raise(float_flag_inexact, status);
1748 fp1 = floatx80_mul(fp0, float64_to_floatx80(LIT64(0x3FE45F306DC9C883), status), status); // X*2/PI
1750 n = floatx80_to_int32(fp1, status);
1753 fp0 = floatx80_sub(fp0, pi_tbl[j], status); // X-Y1
1754 fp0 = floatx80_sub(fp0, float32_to_floatx80(pi_tbl2[j], status), status); // FP0 IS R = (X-Y1)-Y2
1760 fp0 = floatx80_mul(fp0, fp0, status); // S = R*R
1761 fp3 = float64_to_floatx80(LIT64(0x3EA0B759F50F8688), status); // Q4
1762 fp2 = float64_to_floatx80(LIT64(0xBEF2BAA5A8924F04), status); // P3
1763 fp3 = floatx80_mul(fp3, fp0, status); // SQ4
1764 fp2 = floatx80_mul(fp2, fp0, status); // SP3
1765 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0xBF346F59B39BA65F), status), status); // Q3+SQ4
1766 fp4 = packFloatx80(0, 0x3FF6, LIT64(0xE073D3FC199C4A00));
1767 fp2 = floatx80_add(fp2, fp4, status); // P2+SP3
1768 fp3 = floatx80_mul(fp3, fp0, status); // S(Q3+SQ4)
1769 fp2 = floatx80_mul(fp2, fp0, status); // S(P2+SP3)
1770 fp4 = packFloatx80(0, 0x3FF9, LIT64(0xD23CD68415D95FA1));
1771 fp3 = floatx80_add(fp3, fp4, status); // Q2+S(Q3+SQ4)
1772 fp4 = packFloatx80(1, 0x3FFC, LIT64(0x8895A6C5FB423BCA));
1773 fp2 = floatx80_add(fp2, fp4, status); // P1+S(P2+SP3)
1774 fp3 = floatx80_mul(fp3, fp0, status); // S(Q2+S(Q3+SQ4))
1775 fp2 = floatx80_mul(fp2, fp0, status); // S(P1+S(P2+SP3))
1776 fp4 = packFloatx80(1, 0x3FFD, LIT64(0xEEF57E0DA84BC8CE));
1777 fp3 = floatx80_add(fp3, fp4, status); // Q1+S(Q2+S(Q3+SQ4))
1778 fp2 = floatx80_mul(fp2, fp1, status); // RS(P1+S(P2+SP3))
1779 fp0 = floatx80_mul(fp0, fp3, status); // S(Q1+S(Q2+S(Q3+SQ4)))
1780 fp1 = floatx80_add(fp1, fp2, status); // R+RS(P1+S(P2+SP3))
1781 fp0 = floatx80_add(fp0, float32_to_floatx80(0x3F800000, status), status); // 1+S(Q1+S(Q2+S(Q3+SQ4)))
1783 xSign = extractFloatx80Sign(fp1);
1784 xExp = extractFloatx80Exp(fp1);
1785 xSig = extractFloatx80Frac(fp1);
1787 fp1 = packFloatx80(xSign, xExp, xSig);
1791 a = floatx80_div(fp0, fp1, status);
1793 float_raise(float_flag_inexact, status);
1797 fp1 = floatx80_mul(fp0, fp0, status); // S = R*R
1798 fp3 = float64_to_floatx80(LIT64(0x3EA0B759F50F8688), status); // Q4
1799 fp2 = float64_to_floatx80(LIT64(0xBEF2BAA5A8924F04), status); // P3
1800 fp3 = floatx80_mul(fp3, fp1, status); // SQ4
1801 fp2 = floatx80_mul(fp2, fp1, status); // SP3
1802 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0xBF346F59B39BA65F), status), status); // Q3+SQ4
1803 fp4 = packFloatx80(0, 0x3FF6, LIT64(0xE073D3FC199C4A00));
1804 fp2 = floatx80_add(fp2, fp4, status); // P2+SP3
1805 fp3 = floatx80_mul(fp3, fp1, status); // S(Q3+SQ4)
1806 fp2 = floatx80_mul(fp2, fp1, status); // S(P2+SP3)
1807 fp4 = packFloatx80(0, 0x3FF9, LIT64(0xD23CD68415D95FA1));
1808 fp3 = floatx80_add(fp3, fp4, status); // Q2+S(Q3+SQ4)
1809 fp4 = packFloatx80(1, 0x3FFC, LIT64(0x8895A6C5FB423BCA));
1810 fp2 = floatx80_add(fp2, fp4, status); // P1+S(P2+SP3)
1811 fp3 = floatx80_mul(fp3, fp1, status); // S(Q2+S(Q3+SQ4))
1812 fp2 = floatx80_mul(fp2, fp1, status); // S(P1+S(P2+SP3))
1813 fp4 = packFloatx80(1, 0x3FFD, LIT64(0xEEF57E0DA84BC8CE));
1814 fp3 = floatx80_add(fp3, fp4, status); // Q1+S(Q2+S(Q3+SQ4))
1815 fp2 = floatx80_mul(fp2, fp0, status); // RS(P1+S(P2+SP3))
1816 fp1 = floatx80_mul(fp1, fp3, status); // S(Q1+S(Q2+S(Q3+SQ4)))
1817 fp0 = floatx80_add(fp0, fp2, status); // R+RS(P1+S(P2+SP3))
1818 fp1 = floatx80_add(fp1, float32_to_floatx80(0x3F800000, status), status); // 1+S(Q1+S(Q2+S(Q3+SQ4)))
1822 a = floatx80_div(fp0, fp1, status);
1824 float_raise(float_flag_inexact, status);
1831 /*----------------------------------------------------------------------------
1832 | Hyperbolic tangent
1833 *----------------------------------------------------------------------------*/
1835 floatx80 floatx80_tanh(floatx80 a, float_status *status)
1839 uint64_t aSig, vSig;
1845 aSig = extractFloatx80Frac(a);
1846 aExp = extractFloatx80Exp(a);
1847 aSign = extractFloatx80Sign(a);
1849 if (aExp == 0x7FFF) {
1850 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
1851 return packFloatx80(aSign, one_exp, one_sig);
1854 if (aExp == 0 && aSig == 0) {
1855 return packFloatx80(aSign, 0, 0);
1860 compact = floatx80_make_compact(aExp, aSig);
1862 if (compact < 0x3FD78000 || compact > 0x3FFFDDCE) {
1864 if (compact < 0x3FFF8000) {
1868 a = floatx80_move(a, status);
1870 float_raise(float_flag_inexact, status);
1874 if (compact > 0x40048AA1) {
1877 sign |= aSign ? 0x80000000 : 0x00000000;
1878 fp0 = float32_to_floatx80(sign, status);
1880 sign ^= 0x80800000; // -SIGN(X)*EPS
1884 a = floatx80_add(fp0, float32_to_floatx80(sign, status), status);
1886 float_raise(float_flag_inexact, status);
1890 fp0 = packFloatx80(0, aExp+1, aSig); // Y = 2|X|
1891 fp0 = floatx80_etox(fp0, status); // FP0 IS EXP(Y)
1892 fp0 = floatx80_add(fp0, float32_to_floatx80(0x3F800000, status), status); // EXP(Y)+1
1893 sign = aSign ? 0x80000000 : 0x00000000;
1894 fp1 = floatx80_div(float32_to_floatx80(sign^0xC0000000, status), fp0, status); // -SIGN(X)*2 / [EXP(Y)+1]
1895 fp0 = float32_to_floatx80(sign | 0x3F800000, status); // SIGN
1899 a = floatx80_add(fp1, fp0, status);
1901 float_raise(float_flag_inexact, status);
1906 } else { // 2**(-40) < |X| < (5/2)LOG2
1907 fp0 = packFloatx80(0, aExp+1, aSig); // Y = 2|X|
1908 fp0 = floatx80_etoxm1(fp0, status); // FP0 IS Z = EXPM1(Y)
1909 fp1 = floatx80_add(fp0, float32_to_floatx80(0x40000000, status), status); // Z+2
1911 vSign = extractFloatx80Sign(fp1);
1912 vExp = extractFloatx80Exp(fp1);
1913 vSig = extractFloatx80Frac(fp1);
1915 fp1 = packFloatx80(vSign ^ aSign, vExp, vSig);
1919 a = floatx80_div(fp0, fp1, status);
1921 float_raise(float_flag_inexact, status);
1927 /*----------------------------------------------------------------------------
1929 *----------------------------------------------------------------------------*/
1931 floatx80 floatx80_tentox(floatx80 a, float_status *status)
1937 int32_t compact, n, j, l, m, m1;
1938 floatx80 fp0, fp1, fp2, fp3, adjfact, fact1, fact2;
1940 aSig = extractFloatx80Frac(a);
1941 aExp = extractFloatx80Exp(a);
1942 aSign = extractFloatx80Sign(a);
1944 if (aExp == 0x7FFF) {
1945 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
1946 if (aSign) return packFloatx80(0, 0, 0);
1950 if (aExp == 0 && aSig == 0) {
1951 return packFloatx80(0, one_exp, one_sig);
1958 compact = floatx80_make_compact(aExp, aSig);
1960 if (compact < 0x3FB98000 || compact > 0x400B9B07) { // |X| > 16480 LOG2/LOG10 or |X| < 2^(-70)
1961 if (compact > 0x3FFF8000) { // |X| > 16480
1965 return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, -0x1000, aSig, 0, status);
1967 return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, 0x8000, aSig, 0, status);
1969 } else { // |X| < 2^(-70)
1972 a = floatx80_add(fp0, float32_to_floatx80(0x3F800000, status), status); // 1 + X
1974 float_raise(float_flag_inexact, status);
1978 } else { // 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10
1980 fp1 = floatx80_mul(fp1, float64_to_floatx80(LIT64(0x406A934F0979A371), status), status); // X*64*LOG10/LOG2
1981 n = floatx80_to_int32(fp1, status); // N=INT(X*64*LOG10/LOG2)
1982 fp1 = int32_to_floatx80(n);
1985 l = n / 64; // NOTE: this is really arithmetic right shift by 6
1986 if (n < 0 && j) { // arithmetic right shift is division and round towards minus infinity
1989 m = l / 2; // NOTE: this is really arithmetic right shift by 1
1990 if (l < 0 && (l & 1)) { // arithmetic right shift is division and round towards minus infinity
1994 m1 += 0x3FFF; // ADJFACT IS 2^(M')
1996 adjfact = packFloatx80(0, m1, one_sig);
1997 fact1 = exp2_tbl[j];
1999 fact2.high = exp2_tbl2[j]>>16;
2001 fact2.low = (uint64_t)(exp2_tbl2[j] & 0xFFFF);
2005 fp1 = floatx80_mul(fp1, float64_to_floatx80(LIT64(0x3F734413509F8000), status), status); // N*(LOG2/64LOG10)_LEAD
2006 fp3 = packFloatx80(1, 0x3FCD, LIT64(0xC0219DC1DA994FD2));
2007 fp2 = floatx80_mul(fp2, fp3, status); // N*(LOG2/64LOG10)_TRAIL
2008 fp0 = floatx80_sub(fp0, fp1, status); // X - N L_LEAD
2009 fp0 = floatx80_sub(fp0, fp2, status); // X - N L_TRAIL
2010 fp2 = packFloatx80(0, 0x4000, LIT64(0x935D8DDDAAA8AC17)); // LOG10
2011 fp0 = floatx80_mul(fp0, fp2, status); // R
2014 fp1 = floatx80_mul(fp0, fp0, status); // S = R*R
2015 fp2 = float64_to_floatx80(LIT64(0x3F56C16D6F7BD0B2), status); // A5
2016 fp3 = float64_to_floatx80(LIT64(0x3F811112302C712C), status); // A4
2017 fp2 = floatx80_mul(fp2, fp1, status); // S*A5
2018 fp3 = floatx80_mul(fp3, fp1, status); // S*A4
2019 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3FA5555555554CC1), status), status); // A3+S*A5
2020 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0x3FC5555555554A54), status), status); // A2+S*A4
2021 fp2 = floatx80_mul(fp2, fp1, status); // S*(A3+S*A5)
2022 fp3 = floatx80_mul(fp3, fp1, status); // S*(A2+S*A4)
2023 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3FE0000000000000), status), status); // A1+S*(A3+S*A5)
2024 fp3 = floatx80_mul(fp3, fp0, status); // R*S*(A2+S*A4)
2026 fp2 = floatx80_mul(fp2, fp1, status); // S*(A1+S*(A3+S*A5))
2027 fp0 = floatx80_add(fp0, fp3, status); // R+R*S*(A2+S*A4)
2028 fp0 = floatx80_add(fp0, fp2, status); // EXP(R) - 1
2030 fp0 = floatx80_mul(fp0, fact1, status);
2031 fp0 = floatx80_add(fp0, fact2, status);
2032 fp0 = floatx80_add(fp0, fact1, status);
2036 a = floatx80_mul(fp0, adjfact, status);
2038 float_raise(float_flag_inexact, status);
2044 /*----------------------------------------------------------------------------
2046 *----------------------------------------------------------------------------*/
2048 floatx80 floatx80_twotox(floatx80 a, float_status *status)
2054 int32_t compact, n, j, l, m, m1;
2055 floatx80 fp0, fp1, fp2, fp3, adjfact, fact1, fact2;
2057 aSig = extractFloatx80Frac(a);
2058 aExp = extractFloatx80Exp(a);
2059 aSign = extractFloatx80Sign(a);
2061 if (aExp == 0x7FFF) {
2062 if ((uint64_t) (aSig<<1)) return propagateFloatx80NaNOneArg(a, status);
2063 if (aSign) return packFloatx80(0, 0, 0);
2067 if (aExp == 0 && aSig == 0) {
2068 return packFloatx80(0, one_exp, one_sig);
2075 compact = floatx80_make_compact(aExp, aSig);
2077 if (compact < 0x3FB98000 || compact > 0x400D80C0) { // |X| > 16480 or |X| < 2^(-70)
2078 if (compact > 0x3FFF8000) { // |X| > 16480
2082 return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, -0x1000, aSig, 0, status);
2084 return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, 0x8000, aSig, 0, status);
2086 } else { // |X| < 2^(-70)
2089 a = floatx80_add(fp0, float32_to_floatx80(0x3F800000, status), status); // 1 + X
2091 float_raise(float_flag_inexact, status);
2095 } else { // 2^(-70) <= |X| <= 16480
2097 fp1 = floatx80_mul(fp1, float32_to_floatx80(0x42800000, status), status); // X * 64
2098 n = floatx80_to_int32(fp1, status);
2099 fp1 = int32_to_floatx80(n);
2101 l = n / 64; // NOTE: this is really arithmetic right shift by 6
2102 if (n < 0 && j) { // arithmetic right shift is division and round towards minus infinity
2105 m = l / 2; // NOTE: this is really arithmetic right shift by 1
2106 if (l < 0 && (l & 1)) { // arithmetic right shift is division and round towards minus infinity
2110 m1 += 0x3FFF; // ADJFACT IS 2^(M')
2112 adjfact = packFloatx80(0, m1, one_sig);
2113 fact1 = exp2_tbl[j];
2115 fact2.high = exp2_tbl2[j]>>16;
2117 fact2.low = (uint64_t)(exp2_tbl2[j] & 0xFFFF);
2120 fp1 = floatx80_mul(fp1, float32_to_floatx80(0x3C800000, status), status); // (1/64)*N
2121 fp0 = floatx80_sub(fp0, fp1, status); // X - (1/64)*INT(64 X)
2122 fp2 = packFloatx80(0, 0x3FFE, LIT64(0xB17217F7D1CF79AC)); // LOG2
2123 fp0 = floatx80_mul(fp0, fp2, status); // R
2126 fp1 = floatx80_mul(fp0, fp0, status); // S = R*R
2127 fp2 = float64_to_floatx80(LIT64(0x3F56C16D6F7BD0B2), status); // A5
2128 fp3 = float64_to_floatx80(LIT64(0x3F811112302C712C), status); // A4
2129 fp2 = floatx80_mul(fp2, fp1, status); // S*A5
2130 fp3 = floatx80_mul(fp3, fp1, status); // S*A4
2131 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3FA5555555554CC1), status), status); // A3+S*A5
2132 fp3 = floatx80_add(fp3, float64_to_floatx80(LIT64(0x3FC5555555554A54), status), status); // A2+S*A4
2133 fp2 = floatx80_mul(fp2, fp1, status); // S*(A3+S*A5)
2134 fp3 = floatx80_mul(fp3, fp1, status); // S*(A2+S*A4)
2135 fp2 = floatx80_add(fp2, float64_to_floatx80(LIT64(0x3FE0000000000000), status), status); // A1+S*(A3+S*A5)
2136 fp3 = floatx80_mul(fp3, fp0, status); // R*S*(A2+S*A4)
2138 fp2 = floatx80_mul(fp2, fp1, status); // S*(A1+S*(A3+S*A5))
2139 fp0 = floatx80_add(fp0, fp3, status); // R+R*S*(A2+S*A4)
2140 fp0 = floatx80_add(fp0, fp2, status); // EXP(R) - 1
2142 fp0 = floatx80_mul(fp0, fact1, status);
2143 fp0 = floatx80_add(fp0, fact2, status);
2144 fp0 = floatx80_add(fp0, fact1, status);
2148 a = floatx80_mul(fp0, adjfact, status);
2150 float_raise(float_flag_inexact, status);