]> git.sesse.net Git - x264/blobdiff - common/mc.c
rm msvc project files and related ifdefs
[x264] / common / mc.c
index 9fc7a343c8f44130c61dedb69bee187fed02d86b..41cf5938727a527eed6d940fc6f536a11d66a76d 100644 (file)
@@ -29,6 +29,9 @@
 #ifdef ARCH_PPC
 #include "ppc/mc.h"
 #endif
+#ifdef ARCH_ARM
+#include "arm/mc.h"
+#endif
 
 
 static inline void pixel_avg( uint8_t *dst,  int i_dst_stride,
@@ -64,25 +67,6 @@ static inline void pixel_avg_wxh( uint8_t *dst, int i_dst, uint8_t *src1, int i_
     }
 }
 
-#define PIXEL_AVG_C( name, width, height ) \
-static void name( uint8_t *pix1, int i_stride_pix1, \
-                  uint8_t *pix2, int i_stride_pix2, \
-                  uint8_t *pix3, int i_stride_pix3 ) \
-{ \
-    pixel_avg_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height ); \
-}
-PIXEL_AVG_C( pixel_avg_16x16, 16, 16 )
-PIXEL_AVG_C( pixel_avg_16x8,  16, 8 )
-PIXEL_AVG_C( pixel_avg_8x16,  8, 16 )
-PIXEL_AVG_C( pixel_avg_8x8,   8, 8 )
-PIXEL_AVG_C( pixel_avg_8x4,   8, 4 )
-PIXEL_AVG_C( pixel_avg_4x8,   4, 8 )
-PIXEL_AVG_C( pixel_avg_4x4,   4, 4 )
-PIXEL_AVG_C( pixel_avg_4x2,   4, 2 )
-PIXEL_AVG_C( pixel_avg_2x4,   2, 4 )
-PIXEL_AVG_C( pixel_avg_2x2,   2, 2 )
-
-
 /* Implicit weighted bipred only:
  * assumes log2_denom = 5, offset = 0, weight1 + weight2 = 64 */
 #define op_scale2(x) dst[x] = x264_clip_uint8( (src1[x]*i_weight1 + src2[x]*i_weight2 + (1<<5)) >> 6 )
@@ -113,28 +97,28 @@ static inline void pixel_avg_weight_wxh( uint8_t *dst, int i_dst, uint8_t *src1,
         op_scale2(15);
     }
 }
+#undef op_scale2
 
-#define PIXEL_AVG_WEIGHT_C( width, height ) \
-static void pixel_avg_weight_##width##x##height( \
-                uint8_t *pix1, int i_stride_pix1, \
-                uint8_t *pix2, int i_stride_pix2, \
-                uint8_t *pix3, int i_stride_pix3, int i_weight1 ) \
+#define PIXEL_AVG_C( name, width, height ) \
+static void name( uint8_t *pix1, int i_stride_pix1, \
+                  uint8_t *pix2, int i_stride_pix2, \
+                  uint8_t *pix3, int i_stride_pix3, int weight ) \
 { \
-    pixel_avg_weight_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height, i_weight1 ); \
+    if( weight == 32 )\
+        pixel_avg_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height ); \
+    else\
+        pixel_avg_weight_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height, weight ); \
 }
-
-PIXEL_AVG_WEIGHT_C(16,16)
-PIXEL_AVG_WEIGHT_C(16,8)
-PIXEL_AVG_WEIGHT_C(8,16)
-PIXEL_AVG_WEIGHT_C(8,8)
-PIXEL_AVG_WEIGHT_C(8,4)
-PIXEL_AVG_WEIGHT_C(4,8)
-PIXEL_AVG_WEIGHT_C(4,4)
-PIXEL_AVG_WEIGHT_C(4,2)
-PIXEL_AVG_WEIGHT_C(2,4)
-PIXEL_AVG_WEIGHT_C(2,2)
-#undef op_scale2
-#undef PIXEL_AVG_WEIGHT_C
+PIXEL_AVG_C( pixel_avg_16x16, 16, 16 )
+PIXEL_AVG_C( pixel_avg_16x8,  16, 8 )
+PIXEL_AVG_C( pixel_avg_8x16,  8, 16 )
+PIXEL_AVG_C( pixel_avg_8x8,   8, 8 )
+PIXEL_AVG_C( pixel_avg_8x4,   8, 4 )
+PIXEL_AVG_C( pixel_avg_4x8,   4, 8 )
+PIXEL_AVG_C( pixel_avg_4x4,   4, 4 )
+PIXEL_AVG_C( pixel_avg_4x2,   4, 2 )
+PIXEL_AVG_C( pixel_avg_2x4,   2, 4 )
+PIXEL_AVG_C( pixel_avg_2x2,   2, 2 )
 
 static void mc_copy( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
 {
@@ -151,9 +135,8 @@ static void mc_copy( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_str
 
 #define TAPFILTER(pix, d) ((pix)[x-2*d] + (pix)[x+3*d] - 5*((pix)[x-d] + (pix)[x+2*d]) + 20*((pix)[x] + (pix)[x+d]))
 static void hpel_filter( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
-                         int stride, int width, int height )
+                         int stride, int width, int height, int16_t *buf )
 {
-    int16_t *buf = x264_malloc((width+5)*sizeof(int16_t));
     int x, y;
     for( y=0; y<height; y++ )
     {
@@ -172,7 +155,6 @@ static void hpel_filter( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *s
         dstc += stride;
         src += stride;
     }
-    x264_free(buf);
 }
 
 static const int hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
@@ -288,6 +270,42 @@ static void memzero_aligned( void * dst, int n )
     memset( dst, 0, n );
 }
 
+static void integral_init4h( uint16_t *sum, uint8_t *pix, int stride )
+{
+    int x, v = pix[0]+pix[1]+pix[2]+pix[3];
+    for( x=0; x<stride-4; x++ )
+    {
+        sum[x] = v + sum[x-stride];
+        v += pix[x+4] - pix[x];
+    }
+}
+
+static void integral_init8h( uint16_t *sum, uint8_t *pix, int stride )
+{
+    int x, v = pix[0]+pix[1]+pix[2]+pix[3]+pix[4]+pix[5]+pix[6]+pix[7];
+    for( x=0; x<stride-8; x++ )
+    {
+        sum[x] = v + sum[x-stride];
+        v += pix[x+8] - pix[x];
+    }
+}
+
+static void integral_init4v( uint16_t *sum8, uint16_t *sum4, int stride )
+{
+    int x;
+    for( x=0; x<stride-8; x++ )
+        sum4[x] = sum8[x+4*stride] - sum8[x];
+    for( x=0; x<stride-8; x++ )
+        sum8[x] = sum8[x+8*stride] + sum8[x+8*stride+4] - sum8[x] - sum8[x+4];
+}
+
+static void integral_init8v( uint16_t *sum8, int stride )
+{
+    int x;
+    for( x=0; x<stride-8; x++ )
+        sum8[x] = sum8[x+8*stride] - sum8[x];
+}
+
 void x264_frame_init_lowres( x264_t *h, x264_frame_t *frame )
 {
     uint8_t *src = frame->plane[0];
@@ -299,7 +317,7 @@ void x264_frame_init_lowres( x264_t *h, x264_frame_t *frame )
     // duplicate last row and column so that their interpolation doesn't have to be special-cased
     for( y=0; y<i_height; y++ )
         src[i_width+y*i_stride] = src[i_width-1+y*i_stride];
-    h->mc.memcpy_aligned( src+i_stride*i_height, src+i_stride*(i_height-1), i_width );
+    memcpy( src+i_stride*i_height, src+i_stride*(i_height-1), i_width );
     h->mc.frame_init_lowres_core( src, frame->lowres[0], frame->lowres[1], frame->lowres[2], frame->lowres[3],
                                   i_stride, frame->i_stride_lowres, frame->i_width_lowres, frame->i_lines_lowres );
     x264_frame_expand_border_lowres( frame );
@@ -341,6 +359,33 @@ static void frame_init_lowres_core( uint8_t *src0, uint8_t *dst0, uint8_t *dsth,
     }
 }
 
+#if defined(__GNUC__) && (defined(ARCH_X86) || defined(ARCH_X86_64))
+// gcc isn't smart enough to use the "idiv" instruction
+static ALWAYS_INLINE int32_t div_64_32(int64_t x, int32_t y) {
+    int32_t quotient, remainder;
+    asm("idiv %4"
+        :"=a"(quotient), "=d"(remainder)
+        :"a"((uint32_t)x), "d"((int32_t)(x>>32)), "r"(y)
+    );
+    return quotient;
+}
+#else
+#define div_64_32(x,y) ((x)/(y))
+#endif
+
+/* Estimate the total amount of influence on future quality that could be had if we
+ * were to improve the reference samples used to inter predict any given macroblock. */
+static void mbtree_propagate_cost( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
+                                   uint16_t *inter_costs, uint16_t *inv_qscales, int len )
+{
+    int i;
+    for( i=0; i<len; i++ )
+    {
+        int propagate_amount = propagate_in[i] + ((intra_costs[i] * inv_qscales[i] + 128)>>8);
+        dst[i] = div_64_32((int64_t)propagate_amount * (intra_costs[i] - inter_costs[i]), intra_costs[i]);
+    }
+}
+
 void x264_mc_init( int cpu, x264_mc_functions_t *pf )
 {
     pf->mc_luma   = mc_luma;
@@ -358,17 +403,7 @@ void x264_mc_init( int cpu, x264_mc_functions_t *pf )
     pf->avg[PIXEL_2x4]  = pixel_avg_2x4;
     pf->avg[PIXEL_2x2]  = pixel_avg_2x2;
 
-    pf->avg_weight[PIXEL_16x16]= pixel_avg_weight_16x16;
-    pf->avg_weight[PIXEL_16x8] = pixel_avg_weight_16x8;
-    pf->avg_weight[PIXEL_8x16] = pixel_avg_weight_8x16;
-    pf->avg_weight[PIXEL_8x8]  = pixel_avg_weight_8x8;
-    pf->avg_weight[PIXEL_8x4]  = pixel_avg_weight_8x4;
-    pf->avg_weight[PIXEL_4x8]  = pixel_avg_weight_4x8;
-    pf->avg_weight[PIXEL_4x4]  = pixel_avg_weight_4x4;
-    pf->avg_weight[PIXEL_4x2]  = pixel_avg_weight_4x2;
-    pf->avg_weight[PIXEL_2x4]  = pixel_avg_weight_2x4;
-    pf->avg_weight[PIXEL_2x2]  = pixel_avg_weight_2x2;
-
+    pf->copy_16x16_unaligned = mc_copy_w16;
     pf->copy[PIXEL_16x16] = mc_copy_w16;
     pf->copy[PIXEL_8x8]   = mc_copy_w8;
     pf->copy[PIXEL_4x4]   = mc_copy_w4;
@@ -382,6 +417,13 @@ void x264_mc_init( int cpu, x264_mc_functions_t *pf )
     pf->memzero_aligned = memzero_aligned;
     pf->frame_init_lowres_core = frame_init_lowres_core;
 
+    pf->integral_init4h = integral_init4h;
+    pf->integral_init8h = integral_init8h;
+    pf->integral_init4v = integral_init4v;
+    pf->integral_init8v = integral_init8v;
+
+    pf->mbtree_propagate_cost = mbtree_propagate_cost;
+
 #ifdef HAVE_MMX
     x264_mc_init_mmx( cpu, pf );
 #endif
@@ -389,6 +431,9 @@ void x264_mc_init( int cpu, x264_mc_functions_t *pf )
     if( cpu&X264_CPU_ALTIVEC )
         x264_mc_altivec_init( pf );
 #endif
+#ifdef HAVE_ARMV6
+    x264_mc_init_arm( cpu, pf );
+#endif
 }
 
 void x264_frame_filter( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
@@ -399,7 +444,7 @@ void x264_frame_filter( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
     int start = (mb_y*16 >> b_interlaced) - 8; // buffer = 4 for deblock + 3 for 6tap, rounded to 8
     int height = ((b_end ? frame->i_lines[0] : mb_y*16) >> b_interlaced) + 8;
     int offs = start*stride - 8; // buffer = 3 for 6tap, aligned to 8 for simd
-    int x, y;
+    int y;
 
     if( mb_y & b_interlaced )
         return;
@@ -411,7 +456,8 @@ void x264_frame_filter( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
             frame->filtered[2] + offs,
             frame->filtered[3] + offs,
             frame->plane[0] + offs,
-            stride, width + 16, height - start );
+            stride, width + 16, height - start,
+            h->scratch_buffer );
     }
 
     /* generate integral image:
@@ -427,23 +473,25 @@ void x264_frame_filter( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
             start = -PADV;
         }
         if( b_end )
-            height += PADV-8;
+            height += PADV-9;
         for( y = start; y < height; y++ )
         {
-            uint8_t  *ref  = frame->plane[0] + y * stride - PADH;
-            uint16_t *line = frame->integral + (y+1) * stride - PADH + 1;
-            uint16_t v = line[0] = 0;
-            for( x = 1; x < stride-1; x++ )
-                line[x] = v += ref[x] + line[x-stride] - line[x-stride-1];
-            line -= 8*stride;
-            if( y >= 9-PADV )
+            uint8_t  *pix  = frame->plane[0] + y * stride - PADH;
+            uint16_t *sum8 = frame->integral + (y+1) * stride - PADH;
+            uint16_t *sum4;
+            if( h->frames.b_have_sub8x8_esa )
+            {
+                h->mc.integral_init4h( sum8, pix, stride );
+                sum8 -= 8*stride;
+                sum4 = sum8 + stride * (frame->i_lines[0] + PADV*2);
+                if( y >= 8-PADV )
+                    h->mc.integral_init4v( sum8, sum4, stride );
+            }
+            else
             {
-                uint16_t *sum4 = line + stride * (frame->i_lines[0] + PADV*2);
-                for( x = 1; x < stride-8; x++, line++, sum4++ )
-                {
-                    sum4[0] =  line[4+4*stride] - line[4] - line[4*stride] + line[0];
-                    line[0] += line[8+8*stride] - line[8] - line[8*stride];
-                }
+                h->mc.integral_init8h( sum8, pix, stride );
+                if( y >= 8-PADV )
+                    h->mc.integral_init8v( sum8-8*stride, stride );
             }
         }
     }