1 /*****************************************************************************
2 * mc.c: h264 encoder library (Motion Compensation)
3 *****************************************************************************
4 * Copyright (C) 2003 Laurent Aimar
5 * $Id: mc.c,v 1.1 2004/06/03 19:27:07 fenrir Exp $
7 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
22 *****************************************************************************/
35 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
37 return pix[-2*i_pix_next] - 5*pix[-1*i_pix_next] + 20*(pix[0] + pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] + pix[ 3*i_pix_next];
39 static inline int x264_tapfilter1( uint8_t *pix )
41 return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] + pix[ 3];
44 static inline void pixel_avg( uint8_t *dst, int i_dst_stride,
45 uint8_t *src1, int i_src1_stride,
46 uint8_t *src2, int i_src2_stride,
47 int i_width, int i_height )
50 for( y = 0; y < i_height; y++ )
52 for( x = 0; x < i_width; x++ )
54 dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
57 src1 += i_src1_stride;
58 src2 += i_src2_stride;
62 static inline void pixel_avg_wxh( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int width, int height )
65 for( y = 0; y < height; y++ )
67 for( x = 0; x < width; x++ )
69 dst[x] = ( dst[x] + src[x] + 1 ) >> 1;
76 #define PIXEL_AVG_C( name, width, height ) \
77 static void name( uint8_t *pix1, int i_stride_pix1, \
78 uint8_t *pix2, int i_stride_pix2 ) \
80 pixel_avg_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height ); \
82 PIXEL_AVG_C( pixel_avg_16x16, 16, 16 )
83 PIXEL_AVG_C( pixel_avg_16x8, 16, 8 )
84 PIXEL_AVG_C( pixel_avg_8x16, 8, 16 )
85 PIXEL_AVG_C( pixel_avg_8x8, 8, 8 )
86 PIXEL_AVG_C( pixel_avg_8x4, 8, 4 )
87 PIXEL_AVG_C( pixel_avg_4x8, 4, 8 )
88 PIXEL_AVG_C( pixel_avg_4x4, 4, 4 )
89 PIXEL_AVG_C( pixel_avg_4x2, 4, 2 )
90 PIXEL_AVG_C( pixel_avg_2x4, 2, 4 )
91 PIXEL_AVG_C( pixel_avg_2x2, 2, 2 )
94 /* Implicit weighted bipred only:
95 * assumes log2_denom = 5, offset = 0, weight1 + weight2 = 64 */
96 #define op_scale2(x) dst[x] = x264_clip_uint8( (dst[x]*i_weight1 + src[x]*i_weight2 + (1<<5)) >> 6 )
97 static inline void pixel_avg_weight_wxh( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int width, int height, int i_weight1 ){
99 const int i_weight2 = 64 - i_weight1;
100 for(y=0; y<height; y++, dst += i_dst, src += i_src){
103 if(width==2) continue;
106 if(width==4) continue;
111 if(width==8) continue;
123 #define PIXEL_AVG_WEIGHT_C( width, height ) \
124 static void pixel_avg_weight_##width##x##height( \
125 uint8_t *pix1, int i_stride_pix1, \
126 uint8_t *pix2, int i_stride_pix2, int i_weight1 ) \
128 pixel_avg_weight_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height, i_weight1 ); \
131 PIXEL_AVG_WEIGHT_C(16,16)
132 PIXEL_AVG_WEIGHT_C(16,8)
133 PIXEL_AVG_WEIGHT_C(8,16)
134 PIXEL_AVG_WEIGHT_C(8,8)
135 PIXEL_AVG_WEIGHT_C(8,4)
136 PIXEL_AVG_WEIGHT_C(4,8)
137 PIXEL_AVG_WEIGHT_C(4,4)
138 PIXEL_AVG_WEIGHT_C(4,2)
139 PIXEL_AVG_WEIGHT_C(2,4)
140 PIXEL_AVG_WEIGHT_C(2,2)
142 #undef PIXEL_AVG_WEIGHT_C
144 typedef void (*pf_mc_t)(uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height );
146 static void mc_copy( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
150 for( y = 0; y < i_height; y++ )
152 memcpy( dst, src, i_width );
158 static inline void mc_hh( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
162 for( y = 0; y < i_height; y++ )
164 for( x = 0; x < i_width; x++ )
166 dst[x] = x264_mc_clip1( ( x264_tapfilter1( &src[x] ) + 16 ) >> 5 );
172 static inline void mc_hv( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
176 for( y = 0; y < i_height; y++ )
178 for( x = 0; x < i_width; x++ )
180 dst[x] = x264_mc_clip1( ( x264_tapfilter( &src[x], i_src_stride ) + 16 ) >> 5 );
186 static inline void mc_hc( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
192 for( x = 0; x < i_width; x++ )
199 tap[0] = x264_tapfilter1( &pix[-2*i_src_stride] );
200 tap[1] = x264_tapfilter1( &pix[-1*i_src_stride] );
201 tap[2] = x264_tapfilter1( &pix[ 0*i_src_stride] );
202 tap[3] = x264_tapfilter1( &pix[ 1*i_src_stride] );
203 tap[4] = x264_tapfilter1( &pix[ 2*i_src_stride] );
205 for( y = 0; y < i_height; y++ )
207 tap[5] = x264_tapfilter1( &pix[ 3*i_src_stride] );
209 *out = x264_mc_clip1( ( tap[0] - 5*tap[1] + 20 * tap[2] + 20 * tap[3] -5*tap[4] + tap[5] + 512 ) >> 10 );
223 static const int hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
224 static const int hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
226 static void mc_luma( uint8_t *src[4], int i_src_stride,
227 uint8_t *dst, int i_dst_stride,
229 int i_width, int i_height )
231 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
232 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
233 uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
235 if( qpel_idx & 5 ) /* qpel interpolation needed */
237 uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
238 pixel_avg( dst, i_dst_stride, src1, i_src_stride,
239 src2, i_src_stride, i_width, i_height );
243 mc_copy( src1, i_src_stride, dst, i_dst_stride, i_width, i_height );
247 static uint8_t *get_ref( uint8_t *src[4], int i_src_stride,
248 uint8_t *dst, int *i_dst_stride,
250 int i_width, int i_height )
252 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
253 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
254 uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
256 if( qpel_idx & 5 ) /* qpel interpolation needed */
258 uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
259 pixel_avg( dst, *i_dst_stride, src1, i_src_stride,
260 src2, i_src_stride, i_width, i_height );
265 *i_dst_stride = i_src_stride;
270 /* full chroma mc (ie until 1/8 pixel)*/
271 static void motion_compensation_chroma( uint8_t *src, int i_src_stride,
272 uint8_t *dst, int i_dst_stride,
274 int i_width, int i_height )
279 const int d8x = mvx&0x07;
280 const int d8y = mvy&0x07;
282 const int cA = (8-d8x)*(8-d8y);
283 const int cB = d8x *(8-d8y);
284 const int cC = (8-d8x)*d8y;
285 const int cD = d8x *d8y;
287 src += (mvy >> 3) * i_src_stride + (mvx >> 3);
288 srcp = &src[i_src_stride];
290 for( y = 0; y < i_height; y++ )
292 for( x = 0; x < i_width; x++ )
294 dst[x] = ( cA*src[x] + cB*src[x+1] +
295 cC*srcp[x] + cD*srcp[x+1] + 32 ) >> 6;
300 srcp += i_src_stride;
305 static void mc_copy_w##W( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int i_height ) \
307 mc_copy( src, i_src, dst, i_dst, W, i_height ); \
313 static void plane_copy( uint8_t *dst, int i_dst,
314 uint8_t *src, int i_src, int w, int h)
318 memcpy( dst, src, w );
324 void prefetch_fenc_null( uint8_t *pix_y, int stride_y,
325 uint8_t *pix_uv, int stride_uv, int mb_x )
328 void prefetch_ref_null( uint8_t *pix, int stride, int parity )
331 void x264_mc_init( int cpu, x264_mc_functions_t *pf )
333 pf->mc_luma = mc_luma;
334 pf->get_ref = get_ref;
335 pf->mc_chroma = motion_compensation_chroma;
337 pf->avg[PIXEL_16x16]= pixel_avg_16x16;
338 pf->avg[PIXEL_16x8] = pixel_avg_16x8;
339 pf->avg[PIXEL_8x16] = pixel_avg_8x16;
340 pf->avg[PIXEL_8x8] = pixel_avg_8x8;
341 pf->avg[PIXEL_8x4] = pixel_avg_8x4;
342 pf->avg[PIXEL_4x8] = pixel_avg_4x8;
343 pf->avg[PIXEL_4x4] = pixel_avg_4x4;
344 pf->avg[PIXEL_4x2] = pixel_avg_4x2;
345 pf->avg[PIXEL_2x4] = pixel_avg_2x4;
346 pf->avg[PIXEL_2x2] = pixel_avg_2x2;
348 pf->avg_weight[PIXEL_16x16]= pixel_avg_weight_16x16;
349 pf->avg_weight[PIXEL_16x8] = pixel_avg_weight_16x8;
350 pf->avg_weight[PIXEL_8x16] = pixel_avg_weight_8x16;
351 pf->avg_weight[PIXEL_8x8] = pixel_avg_weight_8x8;
352 pf->avg_weight[PIXEL_8x4] = pixel_avg_weight_8x4;
353 pf->avg_weight[PIXEL_4x8] = pixel_avg_weight_4x8;
354 pf->avg_weight[PIXEL_4x4] = pixel_avg_weight_4x4;
355 pf->avg_weight[PIXEL_4x2] = pixel_avg_weight_4x2;
356 pf->avg_weight[PIXEL_2x4] = pixel_avg_weight_2x4;
357 pf->avg_weight[PIXEL_2x2] = pixel_avg_weight_2x2;
359 pf->copy[PIXEL_16x16] = mc_copy_w16;
360 pf->copy[PIXEL_8x8] = mc_copy_w8;
361 pf->copy[PIXEL_4x4] = mc_copy_w4;
363 pf->plane_copy = plane_copy;
365 pf->prefetch_fenc = prefetch_fenc_null;
366 pf->prefetch_ref = prefetch_ref_null;
369 if( cpu&X264_CPU_MMXEXT ) {
370 x264_mc_mmxext_init( pf );
371 pf->mc_chroma = x264_mc_chroma_mmxext;
373 if( cpu&X264_CPU_SSE2 )
374 x264_mc_sse2_init( pf );
377 if( cpu&X264_CPU_ALTIVEC )
378 x264_mc_altivec_init( pf );
382 extern void x264_hpel_filter_mmxext( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
383 int i_stride, int i_width, int i_height );
385 void x264_frame_filter( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
387 const int b_interlaced = h->sh.b_mbaff;
388 const int x_inc = 16, y_inc = 16;
389 const int stride = frame->i_stride[0] << b_interlaced;
390 const int width = stride + frame->i_width[0] - frame->i_stride[0];
391 int start = (mb_y*16 >> b_interlaced) - 8;
392 int height = ((b_end ? frame->i_lines[0] : mb_y*16) >> b_interlaced) + 8;
395 if( mb_y & b_interlaced )
397 mb_y >>= b_interlaced;
400 if( h->param.cpu & X264_CPU_MMXEXT )
402 // buffer = 4 for deblock + 3 for 6tap, rounded to 8
403 int offs = start*stride - 8;
404 x264_hpel_filter_mmxext(
405 frame->filtered[1] + offs,
406 frame->filtered[2] + offs,
407 frame->filtered[3] + offs,
408 frame->plane[0] + offs,
409 stride, width + 16, height - start );
414 for( y = start; y < height; y += y_inc )
416 uint8_t *p_in = frame->plane[0] + y * stride - 8;
417 uint8_t *p_h = frame->filtered[1] + y * stride - 8;
418 uint8_t *p_v = frame->filtered[2] + y * stride - 8;
419 uint8_t *p_c = frame->filtered[3] + y * stride - 8;
420 for( x = -8; x < width + 8; x += x_inc )
422 mc_hh( p_in, stride, p_h, stride, x_inc, y_inc );
423 mc_hv( p_in, stride, p_v, stride, x_inc, y_inc );
424 mc_hc( p_in, stride, p_c, stride, x_inc, y_inc );
434 /* generate integral image:
435 * frame->integral contains 2 planes. in the upper plane, each element is
436 * the sum of an 8x8 pixel region with top-left corner on that point.
437 * in the lower plane, 4x4 sums (needed only with --partitions p4x4). */
439 if( frame->integral )
443 memset( frame->integral - 32 * stride - 32, 0, stride * sizeof(uint16_t) );
448 for( y = start; y < height; y++ )
450 uint8_t *ref = frame->plane[0] + y * stride - 32;
451 uint16_t *line = frame->integral + (y+1) * stride - 31;
452 uint16_t v = line[0] = 0;
453 for( x = 0; x < stride-1; x++ )
454 line[x] = v += ref[x] + line[x-stride] - line[x-stride-1];
458 uint16_t *sum4 = line + frame->i_stride[0] * (frame->i_lines[0] + 64);
459 for( x = 1; x < stride-8; x++, line++, sum4++ )
461 sum4[0] = line[4+4*stride] - line[4] - line[4*stride] + line[0];
462 line[0] += line[8+8*stride] - line[8] - line[8*stride];
469 void x264_frame_init_lowres( x264_t *h, x264_frame_t *frame )
472 const int i_stride = frame->i_stride[0];
473 const int i_stride2 = frame->i_stride_lowres;
474 const int i_width2 = frame->i_width_lowres;
476 for( y = 0; y < frame->i_lines_lowres - 1; y++ )
478 uint8_t *src0 = &frame->plane[0][2*y*i_stride];
479 uint8_t *src1 = src0+i_stride;
480 uint8_t *src2 = src1+i_stride;
481 uint8_t *dst0 = &frame->lowres[0][y*i_stride2];
482 uint8_t *dsth = &frame->lowres[1][y*i_stride2];
483 uint8_t *dstv = &frame->lowres[2][y*i_stride2];
484 uint8_t *dstc = &frame->lowres[3][y*i_stride2];
485 for( x = 0; x < i_width2 - 1; x++ )
487 dst0[x] = (src0[2*x ] + src0[2*x+1] + src1[2*x ] + src1[2*x+1] + 2) >> 2;
488 dsth[x] = (src0[2*x+1] + src0[2*x+2] + src1[2*x+1] + src1[2*x+2] + 2) >> 2;
489 dstv[x] = (src1[2*x ] + src1[2*x+1] + src2[2*x ] + src2[2*x+1] + 2) >> 2;
490 dstc[x] = (src1[2*x+1] + src1[2*x+2] + src2[2*x+1] + src2[2*x+2] + 2) >> 2;
492 dst0[x] = (src0[2*x ] + src0[2*x+1] + src1[2*x ] + src1[2*x+1] + 2) >> 2;
493 dstv[x] = (src1[2*x ] + src1[2*x+1] + src2[2*x ] + src2[2*x+1] + 2) >> 2;
494 dsth[x] = (src0[2*x+1] + src1[2*x+1] + 1) >> 1;
495 dstc[x] = (src1[2*x+1] + src2[2*x+1] + 1) >> 1;
497 for( i = 0; i < 4; i++ )
498 memcpy( &frame->lowres[i][y*i_stride2], &frame->lowres[i][(y-1)*i_stride2], i_width2 );
500 for( y = 0; y < 16; y++ )
501 for( x = 0; x < 16; x++ )
502 frame->i_cost_est[x][y] = -1;
504 x264_frame_expand_border_lowres( frame );