]> git.sesse.net Git - movit/commitdiff
Make all fp16 routines work with fp32 as input instead of fp64, since that is what...
authorSteinar H. Gunderson <sgunderson@bigfoot.com>
Fri, 15 Jan 2016 23:57:00 +0000 (00:57 +0100)
committerSteinar H. Gunderson <sgunderson@bigfoot.com>
Sat, 16 Jan 2016 00:10:56 +0000 (01:10 +0100)
fft_input.cpp
fft_pass_effect.cpp
fp16.cpp
fp16.h
fp16_test.cpp
resample_effect.cpp
util.cpp

index b37d52039b77c7b15aa4a13242af328d6a54d4f2..f9339df24779fab2cb37fb00baba51e88f613d4b 100644 (file)
@@ -67,8 +67,8 @@ void FFTInput::set_gl_state(GLuint glsl_program_num, const string& prefix, unsig
                // Convert to fp16.
                fp16_int_t *kernel = new fp16_int_t[fft_width * fft_height * 2];
                for (int i = 0; i < fft_width * fft_height; ++i) {
-                       kernel[i * 2 + 0] = fp64_to_fp16(out[i][0]);
-                       kernel[i * 2 + 1] = fp64_to_fp16(out[i][1]);
+                       kernel[i * 2 + 0] = fp32_to_fp16(out[i][0]);
+                       kernel[i * 2 + 1] = fp32_to_fp16(out[i][1]);
                }
 
                // (Re-)upload the texture.
index 434ce22a79d7baf7877e61a0bb3b00bce3414421..b0006087cd2779282af2ec3750d5ba7761d084f5 100644 (file)
@@ -167,10 +167,10 @@ void FFTPassEffect::generate_support_texture()
                        support_texture_index = subfft_size - support_texture_index - 1;
                        sign = -1.0;
                }
-               tmp[support_texture_index * 4 + 0] = fp64_to_fp16(sign * (src1 - i * stride) / double(input_size));
-               tmp[support_texture_index * 4 + 1] = fp64_to_fp16(sign * (src2 - i * stride) / double(input_size));
-               tmp[support_texture_index * 4 + 2] = fp64_to_fp16(twiddle_real);
-               tmp[support_texture_index * 4 + 3] = fp64_to_fp16(twiddle_imag);
+               tmp[support_texture_index * 4 + 0] = fp32_to_fp16(sign * (src1 - i * stride) / double(input_size));
+               tmp[support_texture_index * 4 + 1] = fp32_to_fp16(sign * (src2 - i * stride) / double(input_size));
+               tmp[support_texture_index * 4 + 2] = fp32_to_fp16(twiddle_real);
+               tmp[support_texture_index * 4 + 3] = fp32_to_fp16(twiddle_imag);
        }
 
        // Supposedly FFTs are very sensitive to inaccuracies in the twiddle factors,
index e8993f9eb153f508516dec4cbd6a2642965d9597..d83871bd358841efd564eaaed4ed2477245be239 100644 (file)
--- a/fp16.cpp
+++ b/fp16.cpp
@@ -3,23 +3,23 @@
 namespace movit {
 namespace {
 
-union fp64 {
-       double f;
-       unsigned long long ll;
+union fp32 {
+       float f;
+       unsigned int u;
 };
 
 template<class FP16_INT_T,
          int FP16_BIAS, int FP16_MANTISSA_BITS, int FP16_EXPONENT_BITS, int FP16_MAX_EXPONENT,
-         int FP64_BIAS, int FP64_MANTISSA_BITS, int FP64_EXPONENT_BITS, int FP64_MAX_EXPONENT>
-inline double fp_upconvert(FP16_INT_T x)
+         int FP32_BIAS, int FP32_MANTISSA_BITS, int FP32_EXPONENT_BITS, int FP32_MAX_EXPONENT>
+inline float fp_upconvert(FP16_INT_T x)
 {
        int sign = x.val >> (FP16_MANTISSA_BITS + FP16_EXPONENT_BITS);
-       int exponent = (x.val & ((1ULL << (FP16_MANTISSA_BITS + FP16_EXPONENT_BITS)) - 1)) >> FP16_MANTISSA_BITS;
-       unsigned long long mantissa = x.val & ((1ULL << FP16_MANTISSA_BITS) - 1);
+       int exponent = (x.val & ((1U << (FP16_MANTISSA_BITS + FP16_EXPONENT_BITS)) - 1)) >> FP16_MANTISSA_BITS;
+       unsigned int mantissa = x.val & ((1U << FP16_MANTISSA_BITS) - 1);
 
-       int sign64;
-       int exponent64;
-       unsigned long long mantissa64;
+       int sign32;
+       int exponent32;
+       unsigned int mantissa32;
 
        if (exponent == 0) {
                /* 
@@ -27,22 +27,22 @@ inline double fp_upconvert(FP16_INT_T x)
                 * ordinary numbers.
                 */
                if (mantissa == 0) {
-                       sign64 = sign;
-                       exponent64 = 0;
-                       mantissa64 = 0;
+                       sign32 = sign;
+                       exponent32 = 0;
+                       mantissa32 = 0;
                } else {
-                       sign64 = sign;
-                       exponent64 = FP64_BIAS - FP16_BIAS;
-                       mantissa64 = mantissa << (FP64_MANTISSA_BITS - FP16_MANTISSA_BITS + 1);
+                       sign32 = sign;
+                       exponent32 = FP32_BIAS - FP16_BIAS;
+                       mantissa32 = mantissa << (FP32_MANTISSA_BITS - FP16_MANTISSA_BITS + 1);
 
                        /* Normalize the number. */
-                       while ((mantissa64 & (1ULL << FP64_MANTISSA_BITS)) == 0) {
-                               --exponent64;
-                               mantissa64 <<= 1;
+                       while ((mantissa32 & (1U << FP32_MANTISSA_BITS)) == 0) {
+                               --exponent32;
+                               mantissa32 <<= 1;
                        }
 
                        /* Clear the now-implicit one-bit. */
-                       mantissa64 &= ~(1ULL << FP64_MANTISSA_BITS);
+                       mantissa32 &= ~(1U << FP32_MANTISSA_BITS);
                }
        } else if (exponent == FP16_MAX_EXPONENT) {
                /*
@@ -51,44 +51,44 @@ inline double fp_upconvert(FP16_INT_T x)
                 * keep the first bit (which signals signalling/non-signalling
                 * in many implementations).
                 */
-               sign64 = sign;
-               exponent64 = FP64_MAX_EXPONENT;
-               mantissa64 = mantissa << (FP64_MANTISSA_BITS - FP16_MANTISSA_BITS);
+               sign32 = sign;
+               exponent32 = FP32_MAX_EXPONENT;
+               mantissa32 = mantissa << (FP32_MANTISSA_BITS - FP16_MANTISSA_BITS);
        } else {
-               sign64 = sign;
+               sign32 = sign;
 
                /* Up-conversion is simple. Just re-bias the exponent... */
-               exponent64 = exponent + FP64_BIAS - FP16_BIAS;
+               exponent32 = exponent + FP32_BIAS - FP16_BIAS;
 
                /* ...and convert the mantissa. */
-               mantissa64 = mantissa << (FP64_MANTISSA_BITS - FP16_MANTISSA_BITS);
+               mantissa32 = mantissa << (FP32_MANTISSA_BITS - FP16_MANTISSA_BITS);
        }
 
-       union fp64 nx;
-       nx.ll = ((unsigned long long)sign64 << (FP64_MANTISSA_BITS + FP64_EXPONENT_BITS))
-           | ((unsigned long long)exponent64 << FP64_MANTISSA_BITS)
-           | mantissa64;
+       union fp32 nx;
+       nx.u = ((unsigned int)sign32 << (FP32_MANTISSA_BITS + FP32_EXPONENT_BITS))
+           | ((unsigned int)exponent32 << FP32_MANTISSA_BITS)
+           | mantissa32;
        return nx.f;
 }
-               
-unsigned long long shift_right_with_round(unsigned long long x, unsigned shift)
+
+unsigned int shift_right_with_round(unsigned int x, unsigned shift)
 {
-       /* shifts >= 64 need to be special-cased */
-       if (shift > 64) {
+       /* shifts >= 32 need to be special-cased */
+       if (shift > 32) {
                return 0;
-       } else if (shift == 64) {
-               if (x > (1ULL << 63)) {
+       } else if (shift == 32) {
+               if (x > (1U << 31)) {
                        return 1;
                } else {
                        return 0;
                }
        }
 
-       unsigned long long round_part = x & ((1ULL << shift) - 1);
-       if (round_part < (1ULL << (shift - 1))) {
+       unsigned int round_part = x & ((1U << shift) - 1);
+       if (round_part < (1U << (shift - 1))) {
                /* round down */
                x >>= shift;
-       } else if (round_part > (1ULL << (shift - 1))) {
+       } else if (round_part > (1U << (shift - 1))) {
                /* round up */
                x >>= shift;
                ++x;
@@ -104,23 +104,23 @@ unsigned long long shift_right_with_round(unsigned long long x, unsigned shift)
 
 template<class FP16_INT_T,
          int FP16_BIAS, int FP16_MANTISSA_BITS, int FP16_EXPONENT_BITS, int FP16_MAX_EXPONENT,
-         int FP64_BIAS, int FP64_MANTISSA_BITS, int FP64_EXPONENT_BITS, int FP64_MAX_EXPONENT>
-inline FP16_INT_T fp_downconvert(double x)
+         int FP32_BIAS, int FP32_MANTISSA_BITS, int FP32_EXPONENT_BITS, int FP32_MAX_EXPONENT>
+inline FP16_INT_T fp_downconvert(float x)
 {
-       union fp64 nx;
+       union fp32 nx;
        nx.f = x;
-       unsigned long long f = nx.ll;
-       int sign = f >> (FP64_MANTISSA_BITS + FP64_EXPONENT_BITS);
-       int exponent = (f & ((1ULL << (FP64_MANTISSA_BITS + FP64_EXPONENT_BITS)) - 1)) >> FP64_MANTISSA_BITS;
-       unsigned long long mantissa = f & ((1ULL << FP64_MANTISSA_BITS) - 1);
+       unsigned int f = nx.u;
+       int sign = f >> (FP32_MANTISSA_BITS + FP32_EXPONENT_BITS);
+       int exponent = (f & ((1U << (FP32_MANTISSA_BITS + FP32_EXPONENT_BITS)) - 1)) >> FP32_MANTISSA_BITS;
+       unsigned int mantissa = f & ((1U << FP32_MANTISSA_BITS) - 1);
 
        int sign16;
        int exponent16;
-       unsigned long long mantissa16;
+       unsigned int mantissa16;
 
        if (exponent == 0) {
                /*
-                * Denormals, or zero. The largest possible 64-bit
+                * Denormals, or zero. The largest possible 32-bit
                 * denormal is about +- 2^-1022, and the smallest possible
                 * 16-bit denormal is +- 2^-24. Thus, we can safely
                 * just set all of these to zero (but keep the sign bit).
@@ -128,7 +128,7 @@ inline FP16_INT_T fp_downconvert(double x)
                sign16 = sign;
                exponent16 = 0;
                mantissa16 = 0;
-       } else if (exponent == FP64_MAX_EXPONENT) {
+       } else if (exponent == FP32_MAX_EXPONENT) {
                /*
                 * Infinities or NaN (mantissa=0 => infinity, otherwise NaN).
                 * We don't care much about NaNs, so let us just keep the first
@@ -142,25 +142,25 @@ inline FP16_INT_T fp_downconvert(double x)
                } else {
                        sign16 = sign;  /* undefined */
                        exponent16 = FP16_MAX_EXPONENT;
-                       mantissa16 = mantissa >> (FP64_MANTISSA_BITS - FP16_MANTISSA_BITS);
+                       mantissa16 = mantissa >> (FP32_MANTISSA_BITS - FP16_MANTISSA_BITS);
                        if (mantissa16 == 0) {
                                mantissa16 = 1;
                        }
                }
        } else {
                /* Re-bias the exponent, and check if we will create a denormal. */
-               exponent16 = exponent + FP16_BIAS - FP64_BIAS;
+               exponent16 = exponent + FP16_BIAS - FP32_BIAS;
                if (exponent16 <= 0) {
-                       int shift_amount = FP64_MANTISSA_BITS - FP16_MANTISSA_BITS - exponent16 + 1;
+                       int shift_amount = FP32_MANTISSA_BITS - FP16_MANTISSA_BITS - exponent16 + 1;
                        sign16 = sign;
                        exponent16 = 0;
-                       mantissa16 = shift_right_with_round(mantissa | (1ULL << FP64_MANTISSA_BITS), shift_amount);
+                       mantissa16 = shift_right_with_round(mantissa | (1U << FP32_MANTISSA_BITS), shift_amount);
 
                        /*
                         * We could actually have rounded back into the lowest possible non-denormal
                         * here, so check for that.
                         */
-                       if (mantissa16 == (1ULL << FP16_MANTISSA_BITS)) {
+                       if (mantissa16 == (1U << FP16_MANTISSA_BITS)) {
                                exponent16 = 1;
                                mantissa16 = 0;
                        }
@@ -171,10 +171,10 @@ inline FP16_INT_T fp_downconvert(double x)
                         * mode.
                         */
                        sign16 = sign;
-                       mantissa16 = shift_right_with_round(mantissa, FP64_MANTISSA_BITS - FP16_MANTISSA_BITS);
+                       mantissa16 = shift_right_with_round(mantissa, FP32_MANTISSA_BITS - FP16_MANTISSA_BITS);
 
                        /* Check if we overflowed and need to increase the exponent. */
-                       if (mantissa16 == (1ULL << FP16_MANTISSA_BITS)) {
+                       if (mantissa16 == (1U << FP16_MANTISSA_BITS)) {
                                ++exponent16;
                                mantissa16 = 0;
                        }
@@ -213,34 +213,20 @@ const int FP16_MAX_EXPONENT = (1 << FP16_EXPONENT_BITS) - 1;
 
 #ifndef __F16C__
 
-double fp16_to_fp64(fp16_int_t x)
+float fp16_to_fp32(fp16_int_t x)
 {
        return fp_upconvert<fp16_int_t,
               FP16_BIAS, FP16_MANTISSA_BITS, FP16_EXPONENT_BITS, FP16_MAX_EXPONENT,
-              FP64_BIAS, FP64_MANTISSA_BITS, FP64_EXPONENT_BITS, FP64_MAX_EXPONENT>(x);
+              FP32_BIAS, FP32_MANTISSA_BITS, FP32_EXPONENT_BITS, FP32_MAX_EXPONENT>(x);
 }
 
-fp16_int_t fp64_to_fp16(double x)
+fp16_int_t fp32_to_fp16(float x)
 {
        return fp_downconvert<fp16_int_t,
               FP16_BIAS, FP16_MANTISSA_BITS, FP16_EXPONENT_BITS, FP16_MAX_EXPONENT,
-              FP64_BIAS, FP64_MANTISSA_BITS, FP64_EXPONENT_BITS, FP64_MAX_EXPONENT>(x);
+              FP32_BIAS, FP32_MANTISSA_BITS, FP32_EXPONENT_BITS, FP32_MAX_EXPONENT>(x);
 }
 
 #endif
 
-double fp32_to_fp64(fp32_int_t x)
-{
-       return fp_upconvert<fp32_int_t,
-              FP32_BIAS, FP32_MANTISSA_BITS, FP32_EXPONENT_BITS, FP32_MAX_EXPONENT,
-              FP64_BIAS, FP64_MANTISSA_BITS, FP64_EXPONENT_BITS, FP64_MAX_EXPONENT>(x);
-}
-
-fp32_int_t fp64_to_fp32(double x)
-{
-       return fp_downconvert<fp32_int_t,
-              FP32_BIAS, FP32_MANTISSA_BITS, FP32_EXPONENT_BITS, FP32_MAX_EXPONENT,
-              FP64_BIAS, FP64_MANTISSA_BITS, FP64_EXPONENT_BITS, FP64_MAX_EXPONENT>(x);
-}
-
 }  // namespace
diff --git a/fp16.h b/fp16.h
index c21153b3466fe71424283063cb7cf9b775a89153..49828bb62af922b6069c3884f765c2ad64c96094 100644 (file)
--- a/fp16.h
+++ b/fp16.h
@@ -26,15 +26,13 @@ struct fp16_int_t {
 
 // Use the f16c instructions from Haswell if available (and we know that they
 // are at compile time).
-static inline double fp16_to_fp64(fp16_int_t x)
+static inline float fp16_to_fp32(fp16_int_t x)
 {
        return _cvtsh_ss(x.val);
 }
 
-static inline fp16_int_t fp64_to_fp16(double x)
+static inline fp16_int_t fp32_to_fp16(float x)
 {
-       // NOTE: Strictly speaking, there are some select values where this isn't correct,
-       // since we first round to fp32 and then to fp16.
        fp16_int_t ret;
        ret.val = _cvtss_sh(x, 0);
        return ret;
@@ -42,29 +40,23 @@ static inline fp16_int_t fp64_to_fp16(double x)
 
 #else
 
-double fp16_to_fp64(fp16_int_t x);
-fp16_int_t fp64_to_fp16(double x);
+float fp16_to_fp32(fp16_int_t x);
+fp16_int_t fp32_to_fp16(float x);
 
 #endif
 
-// These are not very useful by themselves, but are implemented using the same
-// code as the fp16 ones (just with different constants), so they are useful
-// for verifying against the FPU in unit tests.
-double fp32_to_fp64(fp32_int_t x);
-fp32_int_t fp64_to_fp32(double x);
-
 // Overloads for use in templates.
-static inline double to_fp64(double x) { return x; }
-static inline double to_fp64(float x) { return x; }
-static inline double to_fp64(fp16_int_t x) { return fp16_to_fp64(x); }
+static inline float to_fp32(double x) { return x; }
+static inline float to_fp32(float x) { return x; }
+static inline float to_fp32(fp16_int_t x) { return fp16_to_fp32(x); }
 
-template<class T> inline T from_fp64(double x);
-template<> inline double from_fp64<double>(double x) { return x; }
-template<> inline float from_fp64<float>(double x) { return x; }
-template<> inline fp16_int_t from_fp64<fp16_int_t>(double x) { return fp64_to_fp16(x); }
+template<class T> inline T from_fp32(float x);
+template<> inline double from_fp32<double>(float x) { return x; }
+template<> inline float from_fp32<float>(float x) { return x; }
+template<> inline fp16_int_t from_fp32<fp16_int_t>(float x) { return fp32_to_fp16(x); }
 
 template<class From, class To>
-inline To convert_float(From x) { return from_fp64<To>(to_fp64(x)); }
+inline To convert_float(From x) { return from_fp32<To>(to_fp32(x)); }
 
 template<class Same>
 inline Same convert_float(Same x) { return x; }
index e0920e9ce60bf499bb636ac4c7ab8eac9b2b1b5c..d95e5c6b6d01813dfa367169c9ee15f35d6bc41a 100644 (file)
@@ -13,39 +13,32 @@ fp16_int_t make_fp16(unsigned short x)
        return ret;
 }
 
-fp32_int_t make_fp32(unsigned int x)
-{
-       fp32_int_t ret;
-       ret.val = x;
-       return ret;
-}
-
 }  // namespace
 
 TEST(FP16Test, Simple) {
-       EXPECT_EQ(0x0000, fp64_to_fp16(0.0).val);
-       EXPECT_DOUBLE_EQ(0.0, fp16_to_fp64(make_fp16(0x0000)));
+       EXPECT_EQ(0x0000, fp32_to_fp16(0.0).val);
+       EXPECT_DOUBLE_EQ(0.0, fp16_to_fp32(make_fp16(0x0000)));
 
-       EXPECT_EQ(0x3c00, fp64_to_fp16(1.0).val);
-       EXPECT_DOUBLE_EQ(1.0, fp16_to_fp64(make_fp16(0x3c00)));
+       EXPECT_EQ(0x3c00, fp32_to_fp16(1.0).val);
+       EXPECT_DOUBLE_EQ(1.0, fp16_to_fp32(make_fp16(0x3c00)));
 
-       EXPECT_EQ(0x3555, fp64_to_fp16(1.0 / 3.0).val);
-       EXPECT_DOUBLE_EQ(0.333251953125, fp16_to_fp64(make_fp16(0x3555)));
+       EXPECT_EQ(0x3555, fp32_to_fp16(1.0 / 3.0).val);
+       EXPECT_DOUBLE_EQ(0.333251953125, fp16_to_fp32(make_fp16(0x3555)));
 }
 
 TEST(FP16Test, RoundToNearestEven) {
-       ASSERT_DOUBLE_EQ(1.0, fp16_to_fp64(make_fp16(0x3c00)));
-
-       double x0 = fp16_to_fp64(make_fp16(0x3c00));
-       double x1 = fp16_to_fp64(make_fp16(0x3c01));
-       double x2 = fp16_to_fp64(make_fp16(0x3c02));
-       double x3 = fp16_to_fp64(make_fp16(0x3c03));
-       double x4 = fp16_to_fp64(make_fp16(0x3c04));
-
-       EXPECT_EQ(0x3c00, fp64_to_fp16(0.5 * (x0 + x1)).val);
-       EXPECT_EQ(0x3c02, fp64_to_fp16(0.5 * (x1 + x2)).val);
-       EXPECT_EQ(0x3c02, fp64_to_fp16(0.5 * (x2 + x3)).val);
-       EXPECT_EQ(0x3c04, fp64_to_fp16(0.5 * (x3 + x4)).val);
+       ASSERT_DOUBLE_EQ(1.0, fp16_to_fp32(make_fp16(0x3c00)));
+
+       double x0 = fp16_to_fp32(make_fp16(0x3c00));
+       double x1 = fp16_to_fp32(make_fp16(0x3c01));
+       double x2 = fp16_to_fp32(make_fp16(0x3c02));
+       double x3 = fp16_to_fp32(make_fp16(0x3c03));
+       double x4 = fp16_to_fp32(make_fp16(0x3c04));
+
+       EXPECT_EQ(0x3c00, fp32_to_fp16(0.5 * (x0 + x1)).val);
+       EXPECT_EQ(0x3c02, fp32_to_fp16(0.5 * (x1 + x2)).val);
+       EXPECT_EQ(0x3c02, fp32_to_fp16(0.5 * (x2 + x3)).val);
+       EXPECT_EQ(0x3c04, fp32_to_fp16(0.5 * (x3 + x4)).val);
 }
 
 union fp64 {
@@ -59,13 +52,13 @@ union fp32 {
 
 TEST(FP16Test, NaN) {
        // Ignore the sign bit.
-       EXPECT_EQ(0x7e00, fp64_to_fp16(0.0 / 0.0).val & 0x7fff);
-       EXPECT_TRUE(isnan(fp16_to_fp64(make_fp16(0xfe00))));
+       EXPECT_EQ(0x7e00, fp32_to_fp16(0.0 / 0.0).val & 0x7fff);
+       EXPECT_TRUE(isnan(fp16_to_fp32(make_fp16(0xfe00))));
 
-       fp64 borderline_inf;
-       borderline_inf.ll = 0x7ff0000000000000ull;
-       fp64 borderline_nan;
-       borderline_nan.ll = 0x7ff0000000000001ull;
+       fp32 borderline_inf;
+       borderline_inf.u = 0x7f800000ull;
+       fp32 borderline_nan;
+       borderline_nan.u = 0x7f800001ull;
 
        ASSERT_FALSE(isfinite(borderline_inf.f));
        ASSERT_FALSE(isnan(borderline_inf.f));
@@ -73,8 +66,8 @@ TEST(FP16Test, NaN) {
        ASSERT_FALSE(isfinite(borderline_nan.f));
        ASSERT_TRUE(isnan(borderline_nan.f));
 
-       double borderline_inf_roundtrip = fp16_to_fp64(fp64_to_fp16(borderline_inf.f));
-       double borderline_nan_roundtrip = fp16_to_fp64(fp64_to_fp16(borderline_nan.f));
+       double borderline_inf_roundtrip = fp16_to_fp32(fp32_to_fp16(borderline_inf.f));
+       double borderline_nan_roundtrip = fp16_to_fp32(fp32_to_fp16(borderline_nan.f));
 
        EXPECT_FALSE(isfinite(borderline_inf_roundtrip));
        EXPECT_FALSE(isnan(borderline_inf_roundtrip));
@@ -85,62 +78,15 @@ TEST(FP16Test, NaN) {
 
 TEST(FP16Test, Denormals) {
        const double smallest_fp16_denormal = 5.9604644775390625e-08;
-       EXPECT_EQ(0x0001, fp64_to_fp16(smallest_fp16_denormal).val);
-       EXPECT_EQ(0x0000, fp64_to_fp16(0.5 * smallest_fp16_denormal).val);  // Round-to-even.
-       EXPECT_EQ(0x0001, fp64_to_fp16(0.51 * smallest_fp16_denormal).val);
-       EXPECT_EQ(0x0002, fp64_to_fp16(1.5 * smallest_fp16_denormal).val);
+       EXPECT_EQ(0x0001, fp32_to_fp16(smallest_fp16_denormal).val);
+       EXPECT_EQ(0x0000, fp32_to_fp16(0.5 * smallest_fp16_denormal).val);  // Round-to-even.
+       EXPECT_EQ(0x0001, fp32_to_fp16(0.51 * smallest_fp16_denormal).val);
+       EXPECT_EQ(0x0002, fp32_to_fp16(1.5 * smallest_fp16_denormal).val);
 
        const double smallest_fp16_non_denormal = 6.103515625e-05;
-       EXPECT_EQ(0x0400, fp64_to_fp16(smallest_fp16_non_denormal).val);
-       EXPECT_EQ(0x0400, fp64_to_fp16(smallest_fp16_non_denormal - 0.5 * smallest_fp16_denormal).val);  // Round-to-even.
-       EXPECT_EQ(0x03ff, fp64_to_fp16(smallest_fp16_non_denormal - smallest_fp16_denormal).val);
-}
-
-// Randomly test a large number of fp64 -> fp32 conversions, comparing
-// against the FPU.
-TEST(FP16Test, FP32ReferenceDownconvert) {
-       srand(12345);
-
-       for (int i = 0; i < 1000000; ++i) {
-               unsigned r1 = rand();
-               unsigned r2 = rand();
-               unsigned r3 = rand();
-               union fp64 src;
-               union fp32 reference, result;
-
-               src.ll = (((unsigned long long)r1) << 33) ^ ((unsigned long long)r2 << 16) ^ r3;
-               reference.f = float(src.f);
-               result.u = fp64_to_fp32(src.f).val;
-
-               EXPECT_EQ(isnan(result.f), isnan(reference.f));
-               if (!isnan(result.f)) {
-                       EXPECT_EQ(result.u, reference.u)
-                           << src.f << " got rounded to " << result.u << " (" << result.f << ")";
-               }
-       }
-}
-
-// Randomly test a large number of fp32 -> fp64 conversions, comparing
-// against the FPU.
-TEST(FP16Test, FP32ReferenceUpconvert) {
-       srand(12345);
-
-       for (int i = 0; i < 1000000; ++i) {
-               unsigned r1 = rand();
-               unsigned r2 = rand();
-               union fp32 src;
-               union fp64 reference, result;
-
-               src.u = ((unsigned long long)r1 << 16) ^ r2;
-               reference.f = double(src.f);
-               result.f = fp32_to_fp64(make_fp32(src.u));
-
-               EXPECT_EQ(isnan(result.f), isnan(reference.f));
-               if (!isnan(result.f)) {
-                       EXPECT_EQ(result.ll, reference.ll)
-                           << src.f << " got converted to " << result.ll << " (" << result.f << ")";
-               }
-       }
+       EXPECT_EQ(0x0400, fp32_to_fp16(smallest_fp16_non_denormal).val);
+       EXPECT_EQ(0x0400, fp32_to_fp16(smallest_fp16_non_denormal - 0.5 * smallest_fp16_denormal).val);  // Round-to-even.
+       EXPECT_EQ(0x03ff, fp32_to_fp16(smallest_fp16_non_denormal - smallest_fp16_denormal).val);
 }
 
 }  // namespace movit
index a7315689532e85a98a9c2b7884ec52b85f2ed7db..9c8caf3bb77ed58bf2d143dcc2b3b837b81a001c 100644 (file)
@@ -192,13 +192,13 @@ template<class T>
 void normalize_sum(Tap<T>* vals, unsigned num)
 {
        for (int normalize_pass = 0; normalize_pass < 2; ++normalize_pass) {
-               double sum = 0.0;
+               float sum = 0.0;
                for (unsigned i = 0; i < num; ++i) {
-                       sum += to_fp64(vals[i].weight);
+                       sum += to_fp32(vals[i].weight);
                }
-               double inv_sum = 1.0 / sum;
+               float inv_sum = 1.0 / sum;
                for (unsigned i = 0; i < num; ++i) {
-                       vals[i].weight = from_fp64<T>(to_fp64(vals[i].weight) * inv_sum);
+                       vals[i].weight = from_fp32<T>(to_fp32(vals[i].weight) * inv_sum);
                }
        }
 }
@@ -255,8 +255,8 @@ double compute_sum_sq_error(const Tap<float>* weights, unsigned num_weights,
        // Find the effective range of the bilinear-optimized kernel.
        // Due to rounding of the positions, this is not necessarily the same
        // as the intended range (ie., the range of the original weights).
-       int lower_pos = int(floor(to_fp64(bilinear_weights[0].pos) * size - 0.5));
-       int upper_pos = int(ceil(to_fp64(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5)) + 2;
+       int lower_pos = int(floor(to_fp32(bilinear_weights[0].pos) * size - 0.5));
+       int upper_pos = int(ceil(to_fp32(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5)) + 2;
        lower_pos = min<int>(lower_pos, lrintf(weights[0].pos * size - 0.5));
        upper_pos = max<int>(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5) + 1);
 
@@ -267,7 +267,7 @@ double compute_sum_sq_error(const Tap<float>* weights, unsigned num_weights,
 
        // Now find the effective weights that result from this sampling.
        for (unsigned i = 0; i < num_bilinear_weights; ++i) {
-               const float pixel_pos = to_fp64(bilinear_weights[i].pos) * size - 0.5f;
+               const float pixel_pos = to_fp32(bilinear_weights[i].pos) * size - 0.5f;
                const int x0 = int(floor(pixel_pos)) - lower_pos;
                const int x1 = x0 + 1;
                const float f = lrintf((pixel_pos - (x0 + lower_pos)) / movit_texel_subpixel_precision) * movit_texel_subpixel_precision;
@@ -277,8 +277,8 @@ double compute_sum_sq_error(const Tap<float>* weights, unsigned num_weights,
                assert(x0 < upper_pos - lower_pos);
                assert(x1 < upper_pos - lower_pos);
 
-               effective_weights[x0] += to_fp64(bilinear_weights[i].weight) * (1.0 - f);
-               effective_weights[x1] += to_fp64(bilinear_weights[i].weight) * f;
+               effective_weights[x0] += to_fp32(bilinear_weights[i].weight) * (1.0 - f);
+               effective_weights[x1] += to_fp32(bilinear_weights[i].weight) * f;
        }
 
        // Subtract the desired weights to get the error.
index d50039993bafc08853fe1278803f053212521f71..29428aad3e09ed0a1a66a660125433a7db6a3313 100644 (file)
--- a/util.cpp
+++ b/util.cpp
@@ -233,8 +233,8 @@ void combine_two_samples(float w1, float w2, float pos1, float pos2, float num_s
        }
 
        // Round to the desired precision. Note that this might take z outside the 0..1 range.
-       *offset = from_fp64<DestFloat>(pos1 + z * (pos2 - pos1));
-       z = (to_fp64(*offset) - pos1) / (pos2 - pos1);
+       *offset = from_fp32<DestFloat>(pos1 + z * (pos2 - pos1));
+       z = (to_fp32(*offset) - pos1) / (pos2 - pos1);
 
        // Round to the minimum number of bits we have measured earlier.
        // The card will do this for us anyway, but if we know what the real z
@@ -254,11 +254,11 @@ void combine_two_samples(float w1, float w2, float pos1, float pos2, float num_s
        //   w = (a(1-z) + bz) / ((1-z)² + z²)
        //
        // If z had infinite precision, this would simply reduce to w = w1 + w2.
-       *total_weight = from_fp64<DestFloat>((w1 + z * (w2 - w1)) / (z * z + (1 - z) * (1 - z)));
+       *total_weight = from_fp32<DestFloat>((w1 + z * (w2 - w1)) / (z * z + (1 - z) * (1 - z)));
 
        if (sum_sq_error != NULL) {
-               float err1 = to_fp64(*total_weight) * (1 - z) - w1;
-               float err2 = to_fp64(*total_weight) * z - w2;
+               float err1 = to_fp32(*total_weight) * (1 - z) - w1;
+               float err2 = to_fp32(*total_weight) * z - w2;
                *sum_sq_error = err1 * err1 + err2 * err2;
        }
 }