1 /*****************************************************************************
2 * mc.c: motion compensation
3 *****************************************************************************
4 * Copyright (C) 2003-2015 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 * This program is also available under a commercial proprietary license.
24 * For more information, contact us at licensing@x264.com.
25 *****************************************************************************/
39 #include "aarch64/mc.h"
43 static inline void pixel_avg( pixel *dst, intptr_t i_dst_stride,
44 pixel *src1, intptr_t i_src1_stride,
45 pixel *src2, intptr_t i_src2_stride, int i_width, int i_height )
47 for( int y = 0; y < i_height; y++ )
49 for( int x = 0; x < i_width; x++ )
50 dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
52 src1 += i_src1_stride;
53 src2 += i_src2_stride;
57 static inline void pixel_avg_wxh( pixel *dst, intptr_t i_dst,
58 pixel *src1, intptr_t i_src1,
59 pixel *src2, intptr_t i_src2, int width, int height )
61 for( int y = 0; y < height; y++ )
63 for( int x = 0; x < width; x++ )
64 dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
71 /* Implicit weighted bipred only:
72 * assumes log2_denom = 5, offset = 0, weight1 + weight2 = 64 */
73 static inline void pixel_avg_weight_wxh( pixel *dst, intptr_t i_dst,
74 pixel *src1, intptr_t i_src1,
75 pixel *src2, intptr_t i_src2, int width, int height, int i_weight1 )
77 int i_weight2 = 64 - i_weight1;
78 for( int y = 0; y<height; y++, dst += i_dst, src1 += i_src1, src2 += i_src2 )
79 for( int x = 0; x<width; x++ )
80 dst[x] = x264_clip_pixel( (src1[x]*i_weight1 + src2[x]*i_weight2 + (1<<5)) >> 6 );
84 #define PIXEL_AVG_C( name, width, height ) \
85 static void name( pixel *pix1, intptr_t i_stride_pix1, \
86 pixel *pix2, intptr_t i_stride_pix2, \
87 pixel *pix3, intptr_t i_stride_pix3, int weight ) \
90 pixel_avg_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height ); \
92 pixel_avg_weight_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height, weight ); \
94 PIXEL_AVG_C( pixel_avg_16x16, 16, 16 )
95 PIXEL_AVG_C( pixel_avg_16x8, 16, 8 )
96 PIXEL_AVG_C( pixel_avg_8x16, 8, 16 )
97 PIXEL_AVG_C( pixel_avg_8x8, 8, 8 )
98 PIXEL_AVG_C( pixel_avg_8x4, 8, 4 )
99 PIXEL_AVG_C( pixel_avg_4x16, 4, 16 )
100 PIXEL_AVG_C( pixel_avg_4x8, 4, 8 )
101 PIXEL_AVG_C( pixel_avg_4x4, 4, 4 )
102 PIXEL_AVG_C( pixel_avg_4x2, 4, 2 )
103 PIXEL_AVG_C( pixel_avg_2x8, 2, 8 )
104 PIXEL_AVG_C( pixel_avg_2x4, 2, 4 )
105 PIXEL_AVG_C( pixel_avg_2x2, 2, 2 )
107 static void x264_weight_cache( x264_t *h, x264_weight_t *w )
109 w->weightfn = h->mc.weight;
111 #define opscale(x) dst[x] = x264_clip_pixel( ((src[x] * scale + (1<<(denom - 1))) >> denom) + offset )
112 #define opscale_noden(x) dst[x] = x264_clip_pixel( src[x] * scale + offset )
113 static void mc_weight( pixel *dst, intptr_t i_dst_stride, pixel *src, intptr_t i_src_stride,
114 const x264_weight_t *weight, int i_width, int i_height )
116 int offset = weight->i_offset << (BIT_DEPTH-8);
117 int scale = weight->i_scale;
118 int denom = weight->i_denom;
121 for( int y = 0; y < i_height; y++, dst += i_dst_stride, src += i_src_stride )
122 for( int x = 0; x < i_width; x++ )
127 for( int y = 0; y < i_height; y++, dst += i_dst_stride, src += i_src_stride )
128 for( int x = 0; x < i_width; x++ )
133 #define MC_WEIGHT_C( name, width ) \
134 static void name( pixel *dst, intptr_t i_dst_stride, pixel *src, intptr_t i_src_stride, const x264_weight_t *weight, int height ) \
136 mc_weight( dst, i_dst_stride, src, i_src_stride, weight, width, height );\
139 MC_WEIGHT_C( mc_weight_w20, 20 )
140 MC_WEIGHT_C( mc_weight_w16, 16 )
141 MC_WEIGHT_C( mc_weight_w12, 12 )
142 MC_WEIGHT_C( mc_weight_w8, 8 )
143 MC_WEIGHT_C( mc_weight_w4, 4 )
144 MC_WEIGHT_C( mc_weight_w2, 2 )
146 static weight_fn_t x264_mc_weight_wtab[6] =
155 const x264_weight_t x264_weight_none[3] = { {{0}} };
156 static void mc_copy( pixel *src, intptr_t i_src_stride, pixel *dst, intptr_t i_dst_stride, int i_width, int i_height )
158 for( int y = 0; y < i_height; y++ )
160 memcpy( dst, src, i_width * sizeof(pixel) );
167 #define TAPFILTER(pix, d) ((pix)[x-2*d] + (pix)[x+3*d] - 5*((pix)[x-d] + (pix)[x+2*d]) + 20*((pix)[x] + (pix)[x+d]))
168 static void hpel_filter( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,
169 intptr_t stride, int width, int height, int16_t *buf )
171 const int pad = (BIT_DEPTH > 9) ? (-10 * PIXEL_MAX) : 0;
172 for( int y = 0; y < height; y++ )
174 for( int x = -2; x < width+3; x++ )
176 int v = TAPFILTER(src,stride);
177 dstv[x] = x264_clip_pixel( (v + 16) >> 5 );
178 /* transform v for storage in a 16-bit integer */
181 for( int x = 0; x < width; x++ )
182 dstc[x] = x264_clip_pixel( (TAPFILTER(buf+2,1) - 32*pad + 512) >> 10 );
183 for( int x = 0; x < width; x++ )
184 dsth[x] = x264_clip_pixel( (TAPFILTER(src,1) + 16) >> 5 );
192 const uint8_t x264_hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
193 const uint8_t x264_hpel_ref1[16] = {0,0,1,0,2,2,3,2,2,2,3,2,2,2,3,2};
195 static void mc_luma( pixel *dst, intptr_t i_dst_stride,
196 pixel *src[4], intptr_t i_src_stride,
198 int i_width, int i_height, const x264_weight_t *weight )
200 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
201 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
202 pixel *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
204 if( qpel_idx & 5 ) /* qpel interpolation needed */
206 pixel *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
207 pixel_avg( dst, i_dst_stride, src1, i_src_stride,
208 src2, i_src_stride, i_width, i_height );
209 if( weight->weightfn )
210 mc_weight( dst, i_dst_stride, dst, i_dst_stride, weight, i_width, i_height );
212 else if( weight->weightfn )
213 mc_weight( dst, i_dst_stride, src1, i_src_stride, weight, i_width, i_height );
215 mc_copy( src1, i_src_stride, dst, i_dst_stride, i_width, i_height );
218 static pixel *get_ref( pixel *dst, intptr_t *i_dst_stride,
219 pixel *src[4], intptr_t i_src_stride,
221 int i_width, int i_height, const x264_weight_t *weight )
223 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
224 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
225 pixel *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
227 if( qpel_idx & 5 ) /* qpel interpolation needed */
229 pixel *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
230 pixel_avg( dst, *i_dst_stride, src1, i_src_stride,
231 src2, i_src_stride, i_width, i_height );
232 if( weight->weightfn )
233 mc_weight( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_width, i_height );
236 else if( weight->weightfn )
238 mc_weight( dst, *i_dst_stride, src1, i_src_stride, weight, i_width, i_height );
243 *i_dst_stride = i_src_stride;
248 /* full chroma mc (ie until 1/8 pixel)*/
249 static void mc_chroma( pixel *dstu, pixel *dstv, intptr_t i_dst_stride,
250 pixel *src, intptr_t i_src_stride,
252 int i_width, int i_height )
258 int cA = (8-d8x)*(8-d8y);
259 int cB = d8x *(8-d8y);
260 int cC = (8-d8x)*d8y;
263 src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
264 srcp = &src[i_src_stride];
266 for( int y = 0; y < i_height; y++ )
268 for( int x = 0; x < i_width; x++ )
270 dstu[x] = ( cA*src[2*x] + cB*src[2*x+2] +
271 cC*srcp[2*x] + cD*srcp[2*x+2] + 32 ) >> 6;
272 dstv[x] = ( cA*src[2*x+1] + cB*src[2*x+3] +
273 cC*srcp[2*x+1] + cD*srcp[2*x+3] + 32 ) >> 6;
275 dstu += i_dst_stride;
276 dstv += i_dst_stride;
278 srcp += i_src_stride;
283 static void mc_copy_w##W( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int i_height ) \
285 mc_copy( src, i_src, dst, i_dst, W, i_height ); \
291 void x264_plane_copy_c( pixel *dst, intptr_t i_dst,
292 pixel *src, intptr_t i_src, int w, int h )
296 memcpy( dst, src, w * sizeof(pixel) );
302 void x264_plane_copy_interleave_c( pixel *dst, intptr_t i_dst,
303 pixel *srcu, intptr_t i_srcu,
304 pixel *srcv, intptr_t i_srcv, int w, int h )
306 for( int y=0; y<h; y++, dst+=i_dst, srcu+=i_srcu, srcv+=i_srcv )
307 for( int x=0; x<w; x++ )
310 dst[2*x+1] = srcv[x];
314 static void x264_plane_copy_deinterleave_c( pixel *dstu, intptr_t i_dstu,
315 pixel *dstv, intptr_t i_dstv,
316 pixel *src, intptr_t i_src, int w, int h )
318 for( int y=0; y<h; y++, dstu+=i_dstu, dstv+=i_dstv, src+=i_src )
319 for( int x=0; x<w; x++ )
322 dstv[x] = src[2*x+1];
326 static void x264_plane_copy_deinterleave_rgb_c( pixel *dsta, intptr_t i_dsta,
327 pixel *dstb, intptr_t i_dstb,
328 pixel *dstc, intptr_t i_dstc,
329 pixel *src, intptr_t i_src, int pw, int w, int h )
331 for( int y=0; y<h; y++, dsta+=i_dsta, dstb+=i_dstb, dstc+=i_dstc, src+=i_src )
333 for( int x=0; x<w; x++ )
336 dstb[x] = src[x*pw+1];
337 dstc[x] = src[x*pw+2];
342 void x264_plane_copy_deinterleave_v210_c( pixel *dsty, intptr_t i_dsty,
343 pixel *dstc, intptr_t i_dstc,
344 uint32_t *src, intptr_t i_src, int w, int h )
346 for( int l = 0; l < h; l++ )
350 uint32_t *src0 = src;
352 for( int n = 0; n < w; n += 3 )
354 *(dstc0++) = *src0 & 0x03FF;
355 *(dsty0++) = ( *src0 >> 10 ) & 0x03FF;
356 *(dstc0++) = ( *src0 >> 20 ) & 0x03FF;
358 *(dsty0++) = *src0 & 0x03FF;
359 *(dstc0++) = ( *src0 >> 10 ) & 0x03FF;
360 *(dsty0++) = ( *src0 >> 20 ) & 0x03FF;
370 static void store_interleave_chroma( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height )
372 for( int y=0; y<height; y++, dst+=i_dst, srcu+=FDEC_STRIDE, srcv+=FDEC_STRIDE )
373 for( int x=0; x<8; x++ )
376 dst[2*x+1] = srcv[x];
380 static void load_deinterleave_chroma_fenc( pixel *dst, pixel *src, intptr_t i_src, int height )
382 x264_plane_copy_deinterleave_c( dst, FENC_STRIDE, dst+FENC_STRIDE/2, FENC_STRIDE, src, i_src, 8, height );
385 static void load_deinterleave_chroma_fdec( pixel *dst, pixel *src, intptr_t i_src, int height )
387 x264_plane_copy_deinterleave_c( dst, FDEC_STRIDE, dst+FDEC_STRIDE/2, FDEC_STRIDE, src, i_src, 8, height );
390 static void prefetch_fenc_null( pixel *pix_y, intptr_t stride_y,
391 pixel *pix_uv, intptr_t stride_uv, int mb_x )
394 static void prefetch_ref_null( pixel *pix, intptr_t stride, int parity )
397 static void memzero_aligned( void * dst, size_t n )
402 static void integral_init4h( uint16_t *sum, pixel *pix, intptr_t stride )
404 int v = pix[0]+pix[1]+pix[2]+pix[3];
405 for( int x = 0; x < stride-4; x++ )
407 sum[x] = v + sum[x-stride];
408 v += pix[x+4] - pix[x];
412 static void integral_init8h( uint16_t *sum, pixel *pix, intptr_t stride )
414 int v = pix[0]+pix[1]+pix[2]+pix[3]+pix[4]+pix[5]+pix[6]+pix[7];
415 for( int x = 0; x < stride-8; x++ )
417 sum[x] = v + sum[x-stride];
418 v += pix[x+8] - pix[x];
422 static void integral_init4v( uint16_t *sum8, uint16_t *sum4, intptr_t stride )
424 for( int x = 0; x < stride-8; x++ )
425 sum4[x] = sum8[x+4*stride] - sum8[x];
426 for( int x = 0; x < stride-8; x++ )
427 sum8[x] = sum8[x+8*stride] + sum8[x+8*stride+4] - sum8[x] - sum8[x+4];
430 static void integral_init8v( uint16_t *sum8, intptr_t stride )
432 for( int x = 0; x < stride-8; x++ )
433 sum8[x] = sum8[x+8*stride] - sum8[x];
436 void x264_frame_init_lowres( x264_t *h, x264_frame_t *frame )
438 pixel *src = frame->plane[0];
439 int i_stride = frame->i_stride[0];
440 int i_height = frame->i_lines[0];
441 int i_width = frame->i_width[0];
443 // duplicate last row and column so that their interpolation doesn't have to be special-cased
444 for( int y = 0; y < i_height; y++ )
445 src[i_width+y*i_stride] = src[i_width-1+y*i_stride];
446 memcpy( src+i_stride*i_height, src+i_stride*(i_height-1), (i_width+1) * sizeof(pixel) );
447 h->mc.frame_init_lowres_core( src, frame->lowres[0], frame->lowres[1], frame->lowres[2], frame->lowres[3],
448 i_stride, frame->i_stride_lowres, frame->i_width_lowres, frame->i_lines_lowres );
449 x264_frame_expand_border_lowres( frame );
451 memset( frame->i_cost_est, -1, sizeof(frame->i_cost_est) );
453 for( int y = 0; y < h->param.i_bframe + 2; y++ )
454 for( int x = 0; x < h->param.i_bframe + 2; x++ )
455 frame->i_row_satds[y][x][0] = -1;
457 for( int y = 0; y <= !!h->param.i_bframe; y++ )
458 for( int x = 0; x <= h->param.i_bframe; x++ )
459 frame->lowres_mvs[y][x][0][0] = 0x7FFF;
462 static void frame_init_lowres_core( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,
463 intptr_t src_stride, intptr_t dst_stride, int width, int height )
465 for( int y = 0; y < height; y++ )
467 pixel *src1 = src0+src_stride;
468 pixel *src2 = src1+src_stride;
469 for( int x = 0; x<width; x++ )
471 // slower than naive bilinear, but matches asm
472 #define FILTER(a,b,c,d) ((((a+b+1)>>1)+((c+d+1)>>1)+1)>>1)
473 dst0[x] = FILTER(src0[2*x ], src1[2*x ], src0[2*x+1], src1[2*x+1]);
474 dsth[x] = FILTER(src0[2*x+1], src1[2*x+1], src0[2*x+2], src1[2*x+2]);
475 dstv[x] = FILTER(src1[2*x ], src2[2*x ], src1[2*x+1], src2[2*x+1]);
476 dstc[x] = FILTER(src1[2*x+1], src2[2*x+1], src1[2*x+2], src2[2*x+2]);
479 src0 += src_stride*2;
487 /* Estimate the total amount of influence on future quality that could be had if we
488 * were to improve the reference samples used to inter predict any given macroblock. */
489 static void mbtree_propagate_cost( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
490 uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len )
492 float fps = *fps_factor;
493 for( int i = 0; i < len; i++ )
495 int intra_cost = intra_costs[i];
496 int inter_cost = X264_MIN(intra_costs[i], inter_costs[i] & LOWRES_COST_MASK);
497 float propagate_intra = intra_cost * inv_qscales[i];
498 float propagate_amount = propagate_in[i] + propagate_intra*fps;
499 float propagate_num = intra_cost - inter_cost;
500 float propagate_denom = intra_cost;
501 dst[i] = X264_MIN((int)(propagate_amount * propagate_num / propagate_denom + 0.5f), 32767);
505 static void mbtree_propagate_list( x264_t *h, uint16_t *ref_costs, int16_t (*mvs)[2],
506 int16_t *propagate_amount, uint16_t *lowres_costs,
507 int bipred_weight, int mb_y, int len, int list )
509 unsigned stride = h->mb.i_mb_stride;
510 unsigned width = h->mb.i_mb_width;
511 unsigned height = h->mb.i_mb_height;
513 for( unsigned i = 0; i < len; i++ )
515 #define CLIP_ADD(s,x) (s) = X264_MIN((s)+(x),(1<<15)-1)
516 int lists_used = lowres_costs[i]>>LOWRES_COST_SHIFT;
518 if( !(lists_used & (1 << list)) )
521 int listamount = propagate_amount[i];
522 /* Apply bipred weighting. */
523 if( lists_used == 3 )
524 listamount = (listamount * bipred_weight + 32) >> 6;
526 /* Early termination for simple case of mv0. */
529 CLIP_ADD( ref_costs[mb_y*stride + i], listamount );
535 unsigned mbx = (x>>5)+i;
536 unsigned mby = (y>>5)+mb_y;
537 unsigned idx0 = mbx + mby * stride;
538 unsigned idx2 = idx0 + stride;
541 int idx0weight = (32-y)*(32-x);
542 int idx1weight = (32-y)*x;
543 int idx2weight = y*(32-x);
544 int idx3weight = y*x;
545 idx0weight = (idx0weight * listamount + 512) >> 10;
546 idx1weight = (idx1weight * listamount + 512) >> 10;
547 idx2weight = (idx2weight * listamount + 512) >> 10;
548 idx3weight = (idx3weight * listamount + 512) >> 10;
550 if( mbx < width-1 && mby < height-1 )
552 CLIP_ADD( ref_costs[idx0+0], idx0weight );
553 CLIP_ADD( ref_costs[idx0+1], idx1weight );
554 CLIP_ADD( ref_costs[idx2+0], idx2weight );
555 CLIP_ADD( ref_costs[idx2+1], idx3weight );
559 /* Note: this takes advantage of unsigned representation to
560 * catch negative mbx/mby. */
564 CLIP_ADD( ref_costs[idx0+0], idx0weight );
566 CLIP_ADD( ref_costs[idx0+1], idx1weight );
571 CLIP_ADD( ref_costs[idx2+0], idx2weight );
573 CLIP_ADD( ref_costs[idx2+1], idx3weight );
580 void x264_mc_init( int cpu, x264_mc_functions_t *pf, int cpu_independent )
582 pf->mc_luma = mc_luma;
583 pf->get_ref = get_ref;
585 pf->mc_chroma = mc_chroma;
587 pf->avg[PIXEL_16x16]= pixel_avg_16x16;
588 pf->avg[PIXEL_16x8] = pixel_avg_16x8;
589 pf->avg[PIXEL_8x16] = pixel_avg_8x16;
590 pf->avg[PIXEL_8x8] = pixel_avg_8x8;
591 pf->avg[PIXEL_8x4] = pixel_avg_8x4;
592 pf->avg[PIXEL_4x16] = pixel_avg_4x16;
593 pf->avg[PIXEL_4x8] = pixel_avg_4x8;
594 pf->avg[PIXEL_4x4] = pixel_avg_4x4;
595 pf->avg[PIXEL_4x2] = pixel_avg_4x2;
596 pf->avg[PIXEL_2x8] = pixel_avg_2x8;
597 pf->avg[PIXEL_2x4] = pixel_avg_2x4;
598 pf->avg[PIXEL_2x2] = pixel_avg_2x2;
600 pf->weight = x264_mc_weight_wtab;
601 pf->offsetadd = x264_mc_weight_wtab;
602 pf->offsetsub = x264_mc_weight_wtab;
603 pf->weight_cache = x264_weight_cache;
605 pf->copy_16x16_unaligned = mc_copy_w16;
606 pf->copy[PIXEL_16x16] = mc_copy_w16;
607 pf->copy[PIXEL_8x8] = mc_copy_w8;
608 pf->copy[PIXEL_4x4] = mc_copy_w4;
610 pf->store_interleave_chroma = store_interleave_chroma;
611 pf->load_deinterleave_chroma_fenc = load_deinterleave_chroma_fenc;
612 pf->load_deinterleave_chroma_fdec = load_deinterleave_chroma_fdec;
614 pf->plane_copy = x264_plane_copy_c;
615 pf->plane_copy_interleave = x264_plane_copy_interleave_c;
616 pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_c;
617 pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_c;
618 pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_c;
620 pf->hpel_filter = hpel_filter;
622 pf->prefetch_fenc_420 = prefetch_fenc_null;
623 pf->prefetch_fenc_422 = prefetch_fenc_null;
624 pf->prefetch_ref = prefetch_ref_null;
625 pf->memcpy_aligned = memcpy;
626 pf->memzero_aligned = memzero_aligned;
627 pf->frame_init_lowres_core = frame_init_lowres_core;
629 pf->integral_init4h = integral_init4h;
630 pf->integral_init8h = integral_init8h;
631 pf->integral_init4v = integral_init4v;
632 pf->integral_init8v = integral_init8v;
634 pf->mbtree_propagate_cost = mbtree_propagate_cost;
635 pf->mbtree_propagate_list = mbtree_propagate_list;
638 x264_mc_init_mmx( cpu, pf );
641 if( cpu&X264_CPU_ALTIVEC )
642 x264_mc_altivec_init( pf );
645 x264_mc_init_arm( cpu, pf );
648 x264_mc_init_aarch64( cpu, pf );
651 if( cpu_independent )
653 pf->mbtree_propagate_cost = mbtree_propagate_cost;
654 pf->mbtree_propagate_list = mbtree_propagate_list;
658 void x264_frame_filter( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
660 const int b_interlaced = PARAM_INTERLACED;
661 int start = mb_y*16 - 8; // buffer = 4 for deblock + 3 for 6tap, rounded to 8
662 int height = (b_end ? frame->i_lines[0] + 16*PARAM_INTERLACED : (mb_y+b_interlaced)*16) + 8;
664 if( mb_y & b_interlaced )
667 for( int p = 0; p < (CHROMA444 ? 3 : 1); p++ )
669 int stride = frame->i_stride[p];
670 const int width = frame->i_width[p];
671 int offs = start*stride - 8; // buffer = 3 for 6tap, aligned to 8 for simd
673 if( !b_interlaced || h->mb.b_adaptive_mbaff )
675 frame->filtered[p][1] + offs,
676 frame->filtered[p][2] + offs,
677 frame->filtered[p][3] + offs,
678 frame->plane[p] + offs,
679 stride, width + 16, height - start,
684 /* MC must happen between pixels in the same field. */
685 stride = frame->i_stride[p] << 1;
686 start = (mb_y*16 >> 1) - 8;
687 int height_fld = ((b_end ? frame->i_lines[p] : mb_y*16) >> 1) + 8;
688 offs = start*stride - 8;
689 for( int i = 0; i < 2; i++, offs += frame->i_stride[p] )
692 frame->filtered_fld[p][1] + offs,
693 frame->filtered_fld[p][2] + offs,
694 frame->filtered_fld[p][3] + offs,
695 frame->plane_fld[p] + offs,
696 stride, width + 16, height_fld - start,
702 /* generate integral image:
703 * frame->integral contains 2 planes. in the upper plane, each element is
704 * the sum of an 8x8 pixel region with top-left corner on that point.
705 * in the lower plane, 4x4 sums (needed only with --partitions p4x4). */
707 if( frame->integral )
709 int stride = frame->i_stride[0];
712 memset( frame->integral - PADV * stride - PADH, 0, stride * sizeof(uint16_t) );
717 for( int y = start; y < height; y++ )
719 pixel *pix = frame->plane[0] + y * stride - PADH;
720 uint16_t *sum8 = frame->integral + (y+1) * stride - PADH;
722 if( h->frames.b_have_sub8x8_esa )
724 h->mc.integral_init4h( sum8, pix, stride );
726 sum4 = sum8 + stride * (frame->i_lines[0] + PADV*2);
728 h->mc.integral_init4v( sum8, sum4, stride );
732 h->mc.integral_init8h( sum8, pix, stride );
734 h->mc.integral_init8v( sum8-8*stride, stride );