2 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #ifndef AVUTIL_SOFTFLOAT_H
22 #define AVUTIL_SOFTFLOAT_H
28 #include "softfloat_tables.h"
34 typedef struct SoftFloat{
39 static const SoftFloat FLOAT_0 = { 0, MIN_EXP};
40 static const SoftFloat FLOAT_05 = { 0x20000000, 0};
41 static const SoftFloat FLOAT_1 = { 0x20000000, 1};
42 static const SoftFloat FLOAT_EPSILON = { 0x29F16B12, -16};
43 static const SoftFloat FLOAT_1584893192 = { 0x32B771ED, 1};
44 static const SoftFloat FLOAT_100000 = { 0x30D40000, 17};
45 static const SoftFloat FLOAT_0999999 = { 0x3FFFFBCE, 0};
47 static inline av_const double av_sf2double(SoftFloat v) {
49 if(v.exp > 0) return (double)v.mant * (double)(1 << v.exp);
50 else return (double)v.mant / (double)(1 << (-v.exp));
53 static av_const SoftFloat av_normalize_sf(SoftFloat a){
56 while((a.mant + 0x1FFFFFFFU)<0x3FFFFFFFU){
61 int s=ONE_BITS - av_log2(FFABS(a.mant));
75 static inline av_const SoftFloat av_normalize1_sf(SoftFloat a){
77 if((int32_t)(a.mant + 0x40000000U) <= 0){
81 av_assert2(a.mant < 0x40000000 && a.mant > -0x40000000);
82 av_assert2(a.exp <= MAX_EXP);
85 int t= a.mant + 0x40000000 < 0;
86 return (SoftFloat){ a.mant>>t, a.exp+t};
88 int t= (a.mant + 0x3FFFFFFFU)>>31;
89 return (SoftFloat){a.mant>>t, a.exp+t};
94 * @return Will not be more denormalized than a*b. So if either input is
95 * normalized, then the output will not be worse then the other input.
96 * If both are normalized, then the output will be normalized.
98 static inline av_const SoftFloat av_mul_sf(SoftFloat a, SoftFloat b){
100 av_assert2((int32_t)((a.mant * (int64_t)b.mant) >> ONE_BITS) == (a.mant * (int64_t)b.mant) >> ONE_BITS);
101 a.mant = (a.mant * (int64_t)b.mant) >> ONE_BITS;
102 a = av_normalize1_sf((SoftFloat){a.mant, a.exp - 1});
103 if (!a.mant || a.exp < MIN_EXP)
109 * b has to be normalized and not zero.
110 * @return Will not be more denormalized than a.
112 static inline av_const SoftFloat av_div_sf(SoftFloat a, SoftFloat b){
114 a.mant = ((int64_t)a.mant<<(ONE_BITS+1)) / b.mant;
115 a = av_normalize1_sf(a);
116 if (!a.mant || a.exp < MIN_EXP)
121 static inline av_const int av_cmp_sf(SoftFloat a, SoftFloat b){
122 int t= a.exp - b.exp;
123 if (t <-31) return - b.mant ;
124 else if (t < 0) return (a.mant >> (-t)) - b.mant ;
125 else if (t < 32) return a.mant - (b.mant >> t);
129 static inline av_const int av_gt_sf(SoftFloat a, SoftFloat b)
131 int t= a.exp - b.exp;
132 if (t <-31) return 0 > b.mant ;
133 else if (t < 0) return (a.mant >> (-t)) > b.mant ;
134 else if (t < 32) return a.mant > (b.mant >> t);
135 else return a.mant > 0 ;
138 static inline av_const SoftFloat av_add_sf(SoftFloat a, SoftFloat b){
139 int t= a.exp - b.exp;
140 if (t <-31) return b;
141 else if (t < 0) return av_normalize_sf(av_normalize1_sf((SoftFloat){ b.mant + (a.mant >> (-t)), b.exp}));
142 else if (t < 32) return av_normalize_sf(av_normalize1_sf((SoftFloat){ a.mant + (b.mant >> t ), a.exp}));
146 static inline av_const SoftFloat av_sub_sf(SoftFloat a, SoftFloat b){
147 return av_add_sf(a, (SoftFloat){ -b.mant, b.exp});
150 //FIXME log, exp, pow
153 * Converts a mantisse and exponent to a SoftFloat
154 * @returns a SoftFloat with value v * 2^frac_bits
156 static inline av_const SoftFloat av_int2sf(int v, int frac_bits){
162 return av_normalize_sf(av_normalize1_sf((SoftFloat){v, ONE_BITS + 1 - frac_bits + exp_offset}));
166 * Rounding is to -inf.
168 static inline av_const int av_sf2int(SoftFloat v, int frac_bits){
169 v.exp += frac_bits - (ONE_BITS + 1);
170 if(v.exp >= 0) return v.mant << v.exp ;
171 else return v.mant >>(-v.exp);
175 * Rounding-to-nearest used.
177 static av_always_inline SoftFloat av_sqrt_sf(SoftFloat val)
183 else if (val.mant < 0)
187 tabIndex = (val.mant - 0x20000000) >> 20;
189 rem = val.mant & 0xFFFFF;
190 val.mant = (int)(((int64_t)av_sqrttbl_sf[tabIndex] * (0x100000 - rem) +
191 (int64_t)av_sqrttbl_sf[tabIndex + 1] * rem +
193 val.mant = (int)(((int64_t)av_sqr_exp_multbl_sf[val.exp & 1] * val.mant +
196 if (val.mant < 0x40000000)
201 val.exp = (val.exp >> 1) + 1;
208 * Rounding-to-nearest used.
210 static av_unused void av_sincos_sf(int a, int *s, int *c)
217 sign = (idx << 27) >> 31;
218 cv = av_costbl_1_sf[idx & 0xf];
219 cv = (cv ^ sign) - sign;
222 sign = (idx << 27) >> 31;
223 sv = av_costbl_1_sf[idx & 0xf];
224 sv = (sv ^ sign) - sign;
227 ct = av_costbl_2_sf[idx & 0x1f];
228 st = av_sintbl_2_sf[idx & 0x1f];
230 idx = (int)(((int64_t)cv * ct - (int64_t)sv * st + 0x20000000) >> 30);
232 sv = (int)(((int64_t)cv * st + (int64_t)sv * ct + 0x20000000) >> 30);
237 ct = av_costbl_3_sf[idx & 0x1f];
238 st = av_sintbl_3_sf[idx & 0x1f];
240 idx = (int)(((int64_t)cv * ct - (int64_t)sv * st + 0x20000000) >> 30);
242 sv = (int)(((int64_t)cv * st + (int64_t)sv * ct + 0x20000000) >> 30);
247 ct = (int)(((int64_t)av_costbl_4_sf[idx & 0x1f] * (0x800 - (a & 0x7ff)) +
248 (int64_t)av_costbl_4_sf[(idx & 0x1f)+1]*(a & 0x7ff) +
250 st = (int)(((int64_t)av_sintbl_4_sf[idx & 0x1f] * (0x800 - (a & 0x7ff)) +
251 (int64_t)av_sintbl_4_sf[(idx & 0x1f) + 1] * (a & 0x7ff) +
254 *c = (int)(((int64_t)cv * ct + (int64_t)sv * st + 0x20000000) >> 30);
256 *s = (int)(((int64_t)cv * st + (int64_t)sv * ct + 0x20000000) >> 30);
259 #endif /* AVUTIL_SOFTFLOAT_H */