1 /*****************************************************************************
2 * mc.c: h264 encoder library (Motion Compensation)
3 *****************************************************************************
4 * Copyright (C) 2003 Laurent Aimar
5 * $Id: mc.c,v 1.1 2004/06/03 19:27:07 fenrir Exp $
7 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
22 *****************************************************************************/
38 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
40 return pix[-2*i_pix_next] - 5*pix[-1*i_pix_next] + 20*(pix[0] + pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] + pix[ 3*i_pix_next];
42 static inline int x264_tapfilter1( uint8_t *pix )
44 return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] + pix[ 3];
47 static inline void pixel_avg( uint8_t *dst, int i_dst_stride,
48 uint8_t *src1, int i_src1_stride,
49 uint8_t *src2, int i_src2_stride,
50 int i_width, int i_height )
53 for( y = 0; y < i_height; y++ )
55 for( x = 0; x < i_width; x++ )
57 dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
60 src1 += i_src1_stride;
61 src2 += i_src2_stride;
65 static inline void pixel_avg_wxh( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int width, int height )
68 for( y = 0; y < height; y++ )
70 for( x = 0; x < width; x++ )
72 dst[x] = ( dst[x] + src[x] + 1 ) >> 1;
79 #define PIXEL_AVG_C( name, width, height ) \
80 static void name( uint8_t *pix1, int i_stride_pix1, \
81 uint8_t *pix2, int i_stride_pix2 ) \
83 pixel_avg_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height ); \
85 PIXEL_AVG_C( pixel_avg_16x16, 16, 16 )
86 PIXEL_AVG_C( pixel_avg_16x8, 16, 8 )
87 PIXEL_AVG_C( pixel_avg_8x16, 8, 16 )
88 PIXEL_AVG_C( pixel_avg_8x8, 8, 8 )
89 PIXEL_AVG_C( pixel_avg_8x4, 8, 4 )
90 PIXEL_AVG_C( pixel_avg_4x8, 4, 8 )
91 PIXEL_AVG_C( pixel_avg_4x4, 4, 4 )
92 PIXEL_AVG_C( pixel_avg_4x2, 4, 2 )
93 PIXEL_AVG_C( pixel_avg_2x4, 2, 4 )
94 PIXEL_AVG_C( pixel_avg_2x2, 2, 2 )
97 /* Implicit weighted bipred only:
98 * assumes log2_denom = 5, offset = 0, weight1 + weight2 = 64 */
99 #define op_scale2(x) dst[x] = x264_clip_uint8( (dst[x]*i_weight1 + src[x]*i_weight2 + (1<<5)) >> 6 )
100 static inline void pixel_avg_weight_wxh( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int width, int height, int i_weight1 ){
102 const int i_weight2 = 64 - i_weight1;
103 for(y=0; y<height; y++, dst += i_dst, src += i_src){
106 if(width==2) continue;
109 if(width==4) continue;
114 if(width==8) continue;
126 #define PIXEL_AVG_WEIGHT_C( width, height ) \
127 static void pixel_avg_weight_##width##x##height( \
128 uint8_t *pix1, int i_stride_pix1, \
129 uint8_t *pix2, int i_stride_pix2, int i_weight1 ) \
131 pixel_avg_weight_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height, i_weight1 ); \
134 PIXEL_AVG_WEIGHT_C(16,16)
135 PIXEL_AVG_WEIGHT_C(16,8)
136 PIXEL_AVG_WEIGHT_C(8,16)
137 PIXEL_AVG_WEIGHT_C(8,8)
138 PIXEL_AVG_WEIGHT_C(8,4)
139 PIXEL_AVG_WEIGHT_C(4,8)
140 PIXEL_AVG_WEIGHT_C(4,4)
141 PIXEL_AVG_WEIGHT_C(4,2)
142 PIXEL_AVG_WEIGHT_C(2,4)
143 PIXEL_AVG_WEIGHT_C(2,2)
145 #undef PIXEL_AVG_WEIGHT_C
147 typedef void (*pf_mc_t)(uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height );
149 static void mc_copy( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
153 for( y = 0; y < i_height; y++ )
155 memcpy( dst, src, i_width );
161 static inline void mc_hh( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
165 for( y = 0; y < i_height; y++ )
167 for( x = 0; x < i_width; x++ )
169 dst[x] = x264_mc_clip1( ( x264_tapfilter1( &src[x] ) + 16 ) >> 5 );
175 static inline void mc_hv( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
179 for( y = 0; y < i_height; y++ )
181 for( x = 0; x < i_width; x++ )
183 dst[x] = x264_mc_clip1( ( x264_tapfilter( &src[x], i_src_stride ) + 16 ) >> 5 );
189 static inline void mc_hc( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
195 for( x = 0; x < i_width; x++ )
202 tap[0] = x264_tapfilter1( &pix[-2*i_src_stride] );
203 tap[1] = x264_tapfilter1( &pix[-1*i_src_stride] );
204 tap[2] = x264_tapfilter1( &pix[ 0*i_src_stride] );
205 tap[3] = x264_tapfilter1( &pix[ 1*i_src_stride] );
206 tap[4] = x264_tapfilter1( &pix[ 2*i_src_stride] );
208 for( y = 0; y < i_height; y++ )
210 tap[5] = x264_tapfilter1( &pix[ 3*i_src_stride] );
212 *out = x264_mc_clip1( ( tap[0] - 5*tap[1] + 20 * tap[2] + 20 * tap[3] -5*tap[4] + tap[5] + 512 ) >> 10 );
226 static const int hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
227 static const int hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
229 static void mc_luma( uint8_t *src[4], int i_src_stride,
230 uint8_t *dst, int i_dst_stride,
232 int i_width, int i_height )
234 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
235 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
236 uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
238 if( qpel_idx & 5 ) /* qpel interpolation needed */
240 uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
242 pixel_avg( dst, i_dst_stride, src1, i_src_stride,
243 src2, i_src_stride, i_width, i_height );
247 mc_copy( src1, i_src_stride, dst, i_dst_stride, i_width, i_height );
251 static uint8_t *get_ref( uint8_t *src[4], int i_src_stride,
252 uint8_t *dst, int * i_dst_stride,
254 int i_width, int i_height )
256 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
257 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
258 uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
260 if( qpel_idx & 5 ) /* qpel interpolation needed */
262 uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
264 pixel_avg( dst, *i_dst_stride, src1, i_src_stride,
265 src2, i_src_stride, i_width, i_height );
271 *i_dst_stride = i_src_stride;
276 /* full chroma mc (ie until 1/8 pixel)*/
277 static void motion_compensation_chroma( uint8_t *src, int i_src_stride,
278 uint8_t *dst, int i_dst_stride,
280 int i_width, int i_height )
285 const int d8x = mvx&0x07;
286 const int d8y = mvy&0x07;
288 const int cA = (8-d8x)*(8-d8y);
289 const int cB = d8x *(8-d8y);
290 const int cC = (8-d8x)*d8y;
291 const int cD = d8x *d8y;
293 src += (mvy >> 3) * i_src_stride + (mvx >> 3);
294 srcp = &src[i_src_stride];
296 for( y = 0; y < i_height; y++ )
298 for( x = 0; x < i_width; x++ )
300 dst[x] = ( cA*src[x] + cB*src[x+1] +
301 cC*srcp[x] + cD*srcp[x+1] + 32 ) >> 6;
306 srcp += i_src_stride;
311 static void mc_copy_w##W( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int i_height ) \
313 mc_copy( src, i_src, dst, i_dst, W, i_height ); \
319 static void plane_copy( uint8_t *dst, int i_dst,
320 uint8_t *src, int i_src, int w, int h)
324 memcpy( dst, src, w );
330 void prefetch_fenc_null( uint8_t *pix_y, int stride_y,
331 uint8_t *pix_uv, int stride_uv, int mb_x )
334 void prefetch_ref_null( uint8_t *pix, int stride, int parity )
337 void x264_mc_init( int cpu, x264_mc_functions_t *pf )
339 pf->mc_luma = mc_luma;
340 pf->get_ref = get_ref;
341 pf->mc_chroma = motion_compensation_chroma;
343 pf->avg[PIXEL_16x16]= pixel_avg_16x16;
344 pf->avg[PIXEL_16x8] = pixel_avg_16x8;
345 pf->avg[PIXEL_8x16] = pixel_avg_8x16;
346 pf->avg[PIXEL_8x8] = pixel_avg_8x8;
347 pf->avg[PIXEL_8x4] = pixel_avg_8x4;
348 pf->avg[PIXEL_4x8] = pixel_avg_4x8;
349 pf->avg[PIXEL_4x4] = pixel_avg_4x4;
350 pf->avg[PIXEL_4x2] = pixel_avg_4x2;
351 pf->avg[PIXEL_2x4] = pixel_avg_2x4;
352 pf->avg[PIXEL_2x2] = pixel_avg_2x2;
354 pf->avg_weight[PIXEL_16x16]= pixel_avg_weight_16x16;
355 pf->avg_weight[PIXEL_16x8] = pixel_avg_weight_16x8;
356 pf->avg_weight[PIXEL_8x16] = pixel_avg_weight_8x16;
357 pf->avg_weight[PIXEL_8x8] = pixel_avg_weight_8x8;
358 pf->avg_weight[PIXEL_8x4] = pixel_avg_weight_8x4;
359 pf->avg_weight[PIXEL_4x8] = pixel_avg_weight_4x8;
360 pf->avg_weight[PIXEL_4x4] = pixel_avg_weight_4x4;
361 pf->avg_weight[PIXEL_4x2] = pixel_avg_weight_4x2;
362 pf->avg_weight[PIXEL_2x4] = pixel_avg_weight_2x4;
363 pf->avg_weight[PIXEL_2x2] = pixel_avg_weight_2x2;
365 pf->copy[PIXEL_16x16] = mc_copy_w16;
366 pf->copy[PIXEL_8x8] = mc_copy_w8;
367 pf->copy[PIXEL_4x4] = mc_copy_w4;
369 pf->plane_copy = plane_copy;
371 pf->prefetch_fenc = prefetch_fenc_null;
372 pf->prefetch_ref = prefetch_ref_null;
375 if( cpu&X264_CPU_MMXEXT ) {
376 x264_mc_mmxext_init( pf );
377 pf->mc_chroma = x264_mc_chroma_mmxext;
381 if( cpu&X264_CPU_SSE2 )
382 x264_mc_sse2_init( pf );
385 if( cpu&X264_CPU_ALTIVEC )
386 x264_mc_altivec_init( pf );
390 extern void x264_hpel_filter_mmxext( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
391 int i_stride, int i_width, int i_height );
393 void x264_frame_filter( int cpu, x264_frame_t *frame, int b_interlaced, int mb_y, int b_end )
395 const int x_inc = 16, y_inc = 16;
396 const int stride = frame->i_stride[0] << b_interlaced;
397 const int start = (mb_y*16 >> b_interlaced) - 8;
398 const int height = ((b_end ? frame->i_lines[0] : mb_y*16) >> b_interlaced) + 8;
401 if( mb_y & b_interlaced )
403 mb_y >>= b_interlaced;
406 if ( cpu & X264_CPU_MMXEXT )
408 // buffer = 4 for deblock + 3 for 6tap, rounded to 8
409 int offs = start*stride - 8;
410 x264_hpel_filter_mmxext(
411 frame->filtered[1] + offs,
412 frame->filtered[2] + offs,
413 frame->filtered[3] + offs,
414 frame->plane[0] + offs,
415 stride, stride - 48, height - start );
420 for( y = start; y < height; y += y_inc )
422 uint8_t *p_in = frame->plane[0] + y * stride - 8;
423 uint8_t *p_h = frame->filtered[1] + y * stride - 8;
424 uint8_t *p_v = frame->filtered[2] + y * stride - 8;
425 uint8_t *p_c = frame->filtered[3] + y * stride - 8;
426 for( x = -8; x < stride - 64 + 8; x += x_inc )
428 mc_hh( p_in, stride, p_h, stride, x_inc, y_inc );
429 mc_hv( p_in, stride, p_v, stride, x_inc, y_inc );
430 mc_hc( p_in, stride, p_c, stride, x_inc, y_inc );
440 /* generate integral image:
441 * frame->integral contains 2 planes. in the upper plane, each element is
442 * the sum of an 8x8 pixel region with top-left corner on that point.
443 * in the lower plane, 4x4 sums (needed only with --analyse p4x4). */
445 if( frame->integral && b_end )
448 memset( frame->integral - 32 * stride - 32, 0, stride * sizeof(uint16_t) );
449 for( y = -32; y < frame->i_lines[0] + 31; y++ )
451 uint8_t *ref = frame->plane[0] + y * stride - 32;
452 uint16_t *line = frame->integral + (y+1) * stride - 31;
453 uint16_t v = line[0] = 0;
454 for( x = 0; x < stride-1; x++ )
455 line[x] = v += ref[x] + line[x-stride] - line[x-stride-1];
457 for( y = -31; y < frame->i_lines[0] + 24; y++ )
459 uint16_t *line = frame->integral + y * stride - 31;
460 uint16_t *sum4 = line + frame->i_stride[0] * (frame->i_lines[0] + 64);
461 for( x = -31; x < stride - 40; x++, line++, sum4++ )
463 sum4[0] = line[4+4*stride] - line[4] - line[4*stride] + line[0];
464 line[0] += line[8+8*stride] - line[8] - line[8*stride];
470 void x264_frame_init_lowres( int cpu, x264_frame_t *frame )
473 const int i_stride = frame->i_stride[0];
474 const int i_stride2 = frame->i_stride_lowres;
475 const int i_width2 = i_stride2 - 64;
477 for( y = 0; y < frame->i_lines_lowres - 1; y++ )
479 uint8_t *src0 = &frame->plane[0][2*y*i_stride];
480 uint8_t *src1 = src0+i_stride;
481 uint8_t *src2 = src1+i_stride;
482 uint8_t *dst0 = &frame->lowres[0][y*i_stride2];
483 uint8_t *dsth = &frame->lowres[1][y*i_stride2];
484 uint8_t *dstv = &frame->lowres[2][y*i_stride2];
485 uint8_t *dstc = &frame->lowres[3][y*i_stride2];
486 for( x = 0; x < i_width2 - 1; x++ )
488 dst0[x] = (src0[2*x ] + src0[2*x+1] + src1[2*x ] + src1[2*x+1] + 2) >> 2;
489 dsth[x] = (src0[2*x+1] + src0[2*x+2] + src1[2*x+1] + src1[2*x+2] + 2) >> 2;
490 dstv[x] = (src1[2*x ] + src1[2*x+1] + src2[2*x ] + src2[2*x+1] + 2) >> 2;
491 dstc[x] = (src1[2*x+1] + src1[2*x+2] + src2[2*x+1] + src2[2*x+2] + 2) >> 2;
493 dst0[x] = (src0[2*x ] + src0[2*x+1] + src1[2*x ] + src1[2*x+1] + 2) >> 2;
494 dstv[x] = (src1[2*x ] + src1[2*x+1] + src2[2*x ] + src2[2*x+1] + 2) >> 2;
495 dsth[x] = (src0[2*x+1] + src1[2*x+1] + 1) >> 1;
496 dstc[x] = (src1[2*x+1] + src2[2*x+1] + 1) >> 1;
498 for( i = 0; i < 4; i++ )
499 memcpy( &frame->lowres[i][y*i_stride2], &frame->lowres[i][(y-1)*i_stride2], i_width2 );
501 for( y = 0; y < 16; y++ )
502 for( x = 0; x < 16; x++ )
503 frame->i_cost_est[x][y] = -1;
505 x264_frame_expand_border_lowres( frame );