1 /*****************************************************************************
2 * mc.c: h264 encoder library (Motion Compensation)
3 *****************************************************************************
4 * Copyright (C) 2003 Laurent Aimar
5 * $Id: mc.c,v 1.1 2004/06/03 19:27:07 fenrir Exp $
7 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
22 *****************************************************************************/
38 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
40 return pix[-2*i_pix_next] - 5*pix[-1*i_pix_next] + 20*(pix[0] + pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] + pix[ 3*i_pix_next];
42 static inline int x264_tapfilter1( uint8_t *pix )
44 return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] + pix[ 3];
47 static inline void pixel_avg( uint8_t *dst, int i_dst_stride,
48 uint8_t *src1, int i_src1_stride,
49 uint8_t *src2, int i_src2_stride,
50 int i_width, int i_height )
53 for( y = 0; y < i_height; y++ )
55 for( x = 0; x < i_width; x++ )
57 dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
60 src1 += i_src1_stride;
61 src2 += i_src2_stride;
65 static inline void pixel_avg_wxh( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int width, int height )
68 for( y = 0; y < height; y++ )
70 for( x = 0; x < width; x++ )
72 dst[x] = ( dst[x] + src[x] + 1 ) >> 1;
79 #define PIXEL_AVG_C( name, width, height ) \
80 static void name( uint8_t *pix1, int i_stride_pix1, \
81 uint8_t *pix2, int i_stride_pix2 ) \
83 pixel_avg_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height ); \
85 PIXEL_AVG_C( pixel_avg_16x16, 16, 16 )
86 PIXEL_AVG_C( pixel_avg_16x8, 16, 8 )
87 PIXEL_AVG_C( pixel_avg_8x16, 8, 16 )
88 PIXEL_AVG_C( pixel_avg_8x8, 8, 8 )
89 PIXEL_AVG_C( pixel_avg_8x4, 8, 4 )
90 PIXEL_AVG_C( pixel_avg_4x8, 4, 8 )
91 PIXEL_AVG_C( pixel_avg_4x4, 4, 4 )
92 PIXEL_AVG_C( pixel_avg_4x2, 4, 2 )
93 PIXEL_AVG_C( pixel_avg_2x4, 2, 4 )
94 PIXEL_AVG_C( pixel_avg_2x2, 2, 2 )
97 /* Implicit weighted bipred only:
98 * assumes log2_denom = 5, offset = 0, weight1 + weight2 = 64 */
99 #define op_scale2(x) dst[x] = x264_clip_uint8( (dst[x]*i_weight1 + src[x]*i_weight2 + (1<<5)) >> 6 )
100 static inline void pixel_avg_weight_wxh( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int width, int height, int i_weight1 ){
102 const int i_weight2 = 64 - i_weight1;
103 for(y=0; y<height; y++, dst += i_dst, src += i_src){
106 if(width==2) continue;
109 if(width==4) continue;
114 if(width==8) continue;
126 #define PIXEL_AVG_WEIGHT_C( width, height ) \
127 static void pixel_avg_weight_##width##x##height( \
128 uint8_t *pix1, int i_stride_pix1, \
129 uint8_t *pix2, int i_stride_pix2, int i_weight1 ) \
131 pixel_avg_weight_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height, i_weight1 ); \
134 PIXEL_AVG_WEIGHT_C(16,16)
135 PIXEL_AVG_WEIGHT_C(16,8)
136 PIXEL_AVG_WEIGHT_C(8,16)
137 PIXEL_AVG_WEIGHT_C(8,8)
138 PIXEL_AVG_WEIGHT_C(8,4)
139 PIXEL_AVG_WEIGHT_C(4,8)
140 PIXEL_AVG_WEIGHT_C(4,4)
141 PIXEL_AVG_WEIGHT_C(4,2)
142 PIXEL_AVG_WEIGHT_C(2,4)
143 PIXEL_AVG_WEIGHT_C(2,2)
145 #undef PIXEL_AVG_WEIGHT_C
147 typedef void (*pf_mc_t)(uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height );
149 static void mc_copy( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
153 for( y = 0; y < i_height; y++ )
155 memcpy( dst, src, i_width );
161 static inline void mc_hh( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
165 for( y = 0; y < i_height; y++ )
167 for( x = 0; x < i_width; x++ )
169 dst[x] = x264_mc_clip1( ( x264_tapfilter1( &src[x] ) + 16 ) >> 5 );
175 static inline void mc_hv( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
179 for( y = 0; y < i_height; y++ )
181 for( x = 0; x < i_width; x++ )
183 dst[x] = x264_mc_clip1( ( x264_tapfilter( &src[x], i_src_stride ) + 16 ) >> 5 );
189 static inline void mc_hc( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
195 for( x = 0; x < i_width; x++ )
202 tap[0] = x264_tapfilter1( &pix[-2*i_src_stride] );
203 tap[1] = x264_tapfilter1( &pix[-1*i_src_stride] );
204 tap[2] = x264_tapfilter1( &pix[ 0*i_src_stride] );
205 tap[3] = x264_tapfilter1( &pix[ 1*i_src_stride] );
206 tap[4] = x264_tapfilter1( &pix[ 2*i_src_stride] );
208 for( y = 0; y < i_height; y++ )
210 tap[5] = x264_tapfilter1( &pix[ 3*i_src_stride] );
212 *out = x264_mc_clip1( ( tap[0] - 5*tap[1] + 20 * tap[2] + 20 * tap[3] -5*tap[4] + tap[5] + 512 ) >> 10 );
226 static const int hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
227 static const int hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
229 static void mc_luma( uint8_t *src[4], int i_src_stride,
230 uint8_t *dst, int i_dst_stride,
232 int i_width, int i_height )
234 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
235 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
236 uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
238 if( qpel_idx & 5 ) /* qpel interpolation needed */
240 uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
241 pixel_avg( dst, i_dst_stride, src1, i_src_stride,
242 src2, i_src_stride, i_width, i_height );
246 mc_copy( src1, i_src_stride, dst, i_dst_stride, i_width, i_height );
250 static uint8_t *get_ref( uint8_t *src[4], int i_src_stride,
251 uint8_t *dst, int *i_dst_stride,
253 int i_width, int i_height )
255 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
256 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
257 uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
259 if( qpel_idx & 5 ) /* qpel interpolation needed */
261 uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
262 pixel_avg( dst, *i_dst_stride, src1, i_src_stride,
263 src2, i_src_stride, i_width, i_height );
268 *i_dst_stride = i_src_stride;
273 /* full chroma mc (ie until 1/8 pixel)*/
274 static void motion_compensation_chroma( uint8_t *src, int i_src_stride,
275 uint8_t *dst, int i_dst_stride,
277 int i_width, int i_height )
282 const int d8x = mvx&0x07;
283 const int d8y = mvy&0x07;
285 const int cA = (8-d8x)*(8-d8y);
286 const int cB = d8x *(8-d8y);
287 const int cC = (8-d8x)*d8y;
288 const int cD = d8x *d8y;
290 src += (mvy >> 3) * i_src_stride + (mvx >> 3);
291 srcp = &src[i_src_stride];
293 for( y = 0; y < i_height; y++ )
295 for( x = 0; x < i_width; x++ )
297 dst[x] = ( cA*src[x] + cB*src[x+1] +
298 cC*srcp[x] + cD*srcp[x+1] + 32 ) >> 6;
303 srcp += i_src_stride;
308 static void mc_copy_w##W( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int i_height ) \
310 mc_copy( src, i_src, dst, i_dst, W, i_height ); \
316 static void plane_copy( uint8_t *dst, int i_dst,
317 uint8_t *src, int i_src, int w, int h)
321 memcpy( dst, src, w );
327 void prefetch_fenc_null( uint8_t *pix_y, int stride_y,
328 uint8_t *pix_uv, int stride_uv, int mb_x )
331 void prefetch_ref_null( uint8_t *pix, int stride, int parity )
334 void x264_mc_init( int cpu, x264_mc_functions_t *pf )
336 pf->mc_luma = mc_luma;
337 pf->get_ref = get_ref;
338 pf->mc_chroma = motion_compensation_chroma;
340 pf->avg[PIXEL_16x16]= pixel_avg_16x16;
341 pf->avg[PIXEL_16x8] = pixel_avg_16x8;
342 pf->avg[PIXEL_8x16] = pixel_avg_8x16;
343 pf->avg[PIXEL_8x8] = pixel_avg_8x8;
344 pf->avg[PIXEL_8x4] = pixel_avg_8x4;
345 pf->avg[PIXEL_4x8] = pixel_avg_4x8;
346 pf->avg[PIXEL_4x4] = pixel_avg_4x4;
347 pf->avg[PIXEL_4x2] = pixel_avg_4x2;
348 pf->avg[PIXEL_2x4] = pixel_avg_2x4;
349 pf->avg[PIXEL_2x2] = pixel_avg_2x2;
351 pf->avg_weight[PIXEL_16x16]= pixel_avg_weight_16x16;
352 pf->avg_weight[PIXEL_16x8] = pixel_avg_weight_16x8;
353 pf->avg_weight[PIXEL_8x16] = pixel_avg_weight_8x16;
354 pf->avg_weight[PIXEL_8x8] = pixel_avg_weight_8x8;
355 pf->avg_weight[PIXEL_8x4] = pixel_avg_weight_8x4;
356 pf->avg_weight[PIXEL_4x8] = pixel_avg_weight_4x8;
357 pf->avg_weight[PIXEL_4x4] = pixel_avg_weight_4x4;
358 pf->avg_weight[PIXEL_4x2] = pixel_avg_weight_4x2;
359 pf->avg_weight[PIXEL_2x4] = pixel_avg_weight_2x4;
360 pf->avg_weight[PIXEL_2x2] = pixel_avg_weight_2x2;
362 pf->copy[PIXEL_16x16] = mc_copy_w16;
363 pf->copy[PIXEL_8x8] = mc_copy_w8;
364 pf->copy[PIXEL_4x4] = mc_copy_w4;
366 pf->plane_copy = plane_copy;
368 pf->prefetch_fenc = prefetch_fenc_null;
369 pf->prefetch_ref = prefetch_ref_null;
372 if( cpu&X264_CPU_MMXEXT ) {
373 x264_mc_mmxext_init( pf );
374 pf->mc_chroma = x264_mc_chroma_mmxext;
376 if( cpu&X264_CPU_SSE2 )
377 x264_mc_sse2_init( pf );
380 if( cpu&X264_CPU_ALTIVEC )
381 x264_mc_altivec_init( pf );
385 extern void x264_hpel_filter_mmxext( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
386 int i_stride, int i_width, int i_height );
388 void x264_frame_filter( int cpu, x264_frame_t *frame, int b_interlaced, int mb_y, int b_end )
390 const int x_inc = 16, y_inc = 16;
391 const int stride = frame->i_stride[0] << b_interlaced;
392 const int start = (mb_y*16 >> b_interlaced) - 8;
393 const int height = ((b_end ? frame->i_lines[0] : mb_y*16) >> b_interlaced) + 8;
396 if( mb_y & b_interlaced )
398 mb_y >>= b_interlaced;
401 if ( cpu & X264_CPU_MMXEXT )
403 // buffer = 4 for deblock + 3 for 6tap, rounded to 8
404 int offs = start*stride - 8;
405 x264_hpel_filter_mmxext(
406 frame->filtered[1] + offs,
407 frame->filtered[2] + offs,
408 frame->filtered[3] + offs,
409 frame->plane[0] + offs,
410 stride, stride - 48, height - start );
415 for( y = start; y < height; y += y_inc )
417 uint8_t *p_in = frame->plane[0] + y * stride - 8;
418 uint8_t *p_h = frame->filtered[1] + y * stride - 8;
419 uint8_t *p_v = frame->filtered[2] + y * stride - 8;
420 uint8_t *p_c = frame->filtered[3] + y * stride - 8;
421 for( x = -8; x < stride - 64 + 8; x += x_inc )
423 mc_hh( p_in, stride, p_h, stride, x_inc, y_inc );
424 mc_hv( p_in, stride, p_v, stride, x_inc, y_inc );
425 mc_hc( p_in, stride, p_c, stride, x_inc, y_inc );
435 /* generate integral image:
436 * frame->integral contains 2 planes. in the upper plane, each element is
437 * the sum of an 8x8 pixel region with top-left corner on that point.
438 * in the lower plane, 4x4 sums (needed only with --analyse p4x4). */
440 if( frame->integral && b_end )
443 memset( frame->integral - 32 * stride - 32, 0, stride * sizeof(uint16_t) );
444 for( y = -32; y < frame->i_lines[0] + 31; y++ )
446 uint8_t *ref = frame->plane[0] + y * stride - 32;
447 uint16_t *line = frame->integral + (y+1) * stride - 31;
448 uint16_t v = line[0] = 0;
449 for( x = 0; x < stride-1; x++ )
450 line[x] = v += ref[x] + line[x-stride] - line[x-stride-1];
452 for( y = -31; y < frame->i_lines[0] + 24; y++ )
454 uint16_t *line = frame->integral + y * stride - 31;
455 uint16_t *sum4 = line + frame->i_stride[0] * (frame->i_lines[0] + 64);
456 for( x = -31; x < stride - 40; x++, line++, sum4++ )
458 sum4[0] = line[4+4*stride] - line[4] - line[4*stride] + line[0];
459 line[0] += line[8+8*stride] - line[8] - line[8*stride];
465 void x264_frame_init_lowres( int cpu, x264_frame_t *frame )
468 const int i_stride = frame->i_stride[0];
469 const int i_stride2 = frame->i_stride_lowres;
470 const int i_width2 = i_stride2 - 64;
472 for( y = 0; y < frame->i_lines_lowres - 1; y++ )
474 uint8_t *src0 = &frame->plane[0][2*y*i_stride];
475 uint8_t *src1 = src0+i_stride;
476 uint8_t *src2 = src1+i_stride;
477 uint8_t *dst0 = &frame->lowres[0][y*i_stride2];
478 uint8_t *dsth = &frame->lowres[1][y*i_stride2];
479 uint8_t *dstv = &frame->lowres[2][y*i_stride2];
480 uint8_t *dstc = &frame->lowres[3][y*i_stride2];
481 for( x = 0; x < i_width2 - 1; x++ )
483 dst0[x] = (src0[2*x ] + src0[2*x+1] + src1[2*x ] + src1[2*x+1] + 2) >> 2;
484 dsth[x] = (src0[2*x+1] + src0[2*x+2] + src1[2*x+1] + src1[2*x+2] + 2) >> 2;
485 dstv[x] = (src1[2*x ] + src1[2*x+1] + src2[2*x ] + src2[2*x+1] + 2) >> 2;
486 dstc[x] = (src1[2*x+1] + src1[2*x+2] + src2[2*x+1] + src2[2*x+2] + 2) >> 2;
488 dst0[x] = (src0[2*x ] + src0[2*x+1] + src1[2*x ] + src1[2*x+1] + 2) >> 2;
489 dstv[x] = (src1[2*x ] + src1[2*x+1] + src2[2*x ] + src2[2*x+1] + 2) >> 2;
490 dsth[x] = (src0[2*x+1] + src1[2*x+1] + 1) >> 1;
491 dstc[x] = (src1[2*x+1] + src2[2*x+1] + 1) >> 1;
493 for( i = 0; i < 4; i++ )
494 memcpy( &frame->lowres[i][y*i_stride2], &frame->lowres[i][(y-1)*i_stride2], i_width2 );
496 for( y = 0; y < 16; y++ )
497 for( x = 0; x < 16; x++ )
498 frame->i_cost_est[x][y] = -1;
500 x264_frame_expand_border_lowres( frame );