1 /*****************************************************************************
2 * deblock.c: deblocking
3 *****************************************************************************
4 * Copyright (C) 2003-2011 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
30 /* Deblocking filter */
31 static const uint8_t i_alpha_table[52+12*3] =
33 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
34 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
35 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
36 0, 0, 0, 0, 0, 0, 4, 4, 5, 6,
37 7, 8, 9, 10, 12, 13, 15, 17, 20, 22,
38 25, 28, 32, 36, 40, 45, 50, 56, 63, 71,
39 80, 90,101,113,127,144,162,182,203,226,
41 255,255,255,255,255,255,255,255,255,255,255,255,
43 static const uint8_t i_beta_table[52+12*3] =
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
46 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
47 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
48 0, 0, 0, 0, 0, 0, 2, 2, 2, 3,
49 3, 3, 3, 4, 4, 4, 6, 6, 7, 7,
50 8, 8, 9, 9, 10, 10, 11, 11, 12, 12,
51 13, 13, 14, 14, 15, 15, 16, 16, 17, 17,
53 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
55 static const int8_t i_tc0_table[52+12*3][4] =
57 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
58 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
59 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
60 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
61 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
62 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
63 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 1 },
64 {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 1, 1 }, {-1, 0, 1, 1 }, {-1, 1, 1, 1 },
65 {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 },
66 {-1, 1, 1, 2 }, {-1, 1, 2, 3 }, {-1, 1, 2, 3 }, {-1, 2, 2, 3 }, {-1, 2, 2, 4 }, {-1, 2, 3, 4 },
67 {-1, 2, 3, 4 }, {-1, 3, 3, 5 }, {-1, 3, 4, 6 }, {-1, 3, 4, 6 }, {-1, 4, 5, 7 }, {-1, 4, 5, 8 },
68 {-1, 4, 6, 9 }, {-1, 5, 7,10 }, {-1, 6, 8,11 }, {-1, 6, 8,13 }, {-1, 7,10,14 }, {-1, 8,11,16 },
69 {-1, 9,12,18 }, {-1,10,13,20 }, {-1,11,15,23 }, {-1,13,17,25 },
70 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
71 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
73 #define alpha_table(x) i_alpha_table[(x)+24]
74 #define beta_table(x) i_beta_table[(x)+24]
75 #define tc0_table(x) i_tc0_table[(x)+24]
78 static inline void deblock_luma_c( pixel *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0 )
80 for( int i = 0; i < 4; i++ )
87 for( int d = 0; d < 4; d++ )
89 int p2 = pix[-3*xstride];
90 int p1 = pix[-2*xstride];
91 int p0 = pix[-1*xstride];
92 int q0 = pix[ 0*xstride];
93 int q1 = pix[ 1*xstride];
94 int q2 = pix[ 2*xstride];
96 if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta )
100 if( abs( p2 - p0 ) < beta )
103 pix[-2*xstride] = p1 + x264_clip3( (( p2 + ((p0 + q0 + 1) >> 1)) >> 1) - p1, -tc0[i], tc0[i] );
106 if( abs( q2 - q0 ) < beta )
109 pix[ 1*xstride] = q1 + x264_clip3( (( q2 + ((p0 + q0 + 1) >> 1)) >> 1) - q1, -tc0[i], tc0[i] );
113 delta = x264_clip3( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
114 pix[-1*xstride] = x264_clip_pixel( p0 + delta ); /* p0' */
115 pix[ 0*xstride] = x264_clip_pixel( q0 - delta ); /* q0' */
121 static void deblock_v_luma_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
123 deblock_luma_c( pix, stride, 1, alpha, beta, tc0 );
125 static void deblock_h_luma_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
127 deblock_luma_c( pix, 1, stride, alpha, beta, tc0 );
130 static inline void deblock_chroma_c( pixel *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0 )
132 for( int i = 0; i < 4; i++ )
140 for( int d = 0; d < 2; d++, pix += ystride-2 )
141 for( int e = 0; e < 2; e++, pix++ )
143 int p1 = pix[-2*xstride];
144 int p0 = pix[-1*xstride];
145 int q0 = pix[ 0*xstride];
146 int q1 = pix[ 1*xstride];
148 if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta )
150 int delta = x264_clip3( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
151 pix[-1*xstride] = x264_clip_pixel( p0 + delta ); /* p0' */
152 pix[ 0*xstride] = x264_clip_pixel( q0 - delta ); /* q0' */
157 static void deblock_v_chroma_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
159 deblock_chroma_c( pix, stride, 2, alpha, beta, tc0 );
161 static void deblock_h_chroma_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
163 deblock_chroma_c( pix, 2, stride, alpha, beta, tc0 );
166 static inline void deblock_luma_intra_c( pixel *pix, int xstride, int ystride, int alpha, int beta )
168 for( int d = 0; d < 16; d++ )
170 int p2 = pix[-3*xstride];
171 int p1 = pix[-2*xstride];
172 int p0 = pix[-1*xstride];
173 int q0 = pix[ 0*xstride];
174 int q1 = pix[ 1*xstride];
175 int q2 = pix[ 2*xstride];
177 if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta )
179 if(abs( p0 - q0 ) < ((alpha >> 2) + 2) )
181 if( abs( p2 - p0 ) < beta ) /* p0', p1', p2' */
183 const int p3 = pix[-4*xstride];
184 pix[-1*xstride] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3;
185 pix[-2*xstride] = ( p2 + p1 + p0 + q0 + 2 ) >> 2;
186 pix[-3*xstride] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3;
189 pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
190 if( abs( q2 - q0 ) < beta ) /* q0', q1', q2' */
192 const int q3 = pix[3*xstride];
193 pix[0*xstride] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3;
194 pix[1*xstride] = ( p0 + q0 + q1 + q2 + 2 ) >> 2;
195 pix[2*xstride] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3;
198 pix[0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
202 pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
203 pix[ 0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
209 static void deblock_v_luma_intra_c( pixel *pix, int stride, int alpha, int beta )
211 deblock_luma_intra_c( pix, stride, 1, alpha, beta );
213 static void deblock_h_luma_intra_c( pixel *pix, int stride, int alpha, int beta )
215 deblock_luma_intra_c( pix, 1, stride, alpha, beta );
218 static inline void deblock_chroma_intra_c( pixel *pix, int xstride, int ystride, int alpha, int beta, int dir )
220 for( int d = 0; d < (dir?16:8); d++, pix += ystride-2 )
221 for( int e = 0; e < (dir?1:2); e++, pix++ )
223 int p1 = pix[-2*xstride];
224 int p0 = pix[-1*xstride];
225 int q0 = pix[ 0*xstride];
226 int q1 = pix[ 1*xstride];
228 if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta )
230 pix[-1*xstride] = (2*p1 + p0 + q1 + 2) >> 2; /* p0' */
231 pix[ 0*xstride] = (2*q1 + q0 + p1 + 2) >> 2; /* q0' */
235 static void deblock_v_chroma_intra_c( pixel *pix, int stride, int alpha, int beta )
237 deblock_chroma_intra_c( pix, stride, 2, alpha, beta, 1 );
239 static void deblock_h_chroma_intra_c( pixel *pix, int stride, int alpha, int beta )
241 deblock_chroma_intra_c( pix, 2, stride, alpha, beta, 0 );
244 static void deblock_strength_c( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
245 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][4][4], int mvy_limit,
246 int bframe, x264_t *h )
248 for( int dir = 0; dir < 2; dir++ )
250 int s1 = dir ? 1 : 8;
251 int s2 = dir ? 8 : 1;
252 for( int edge = 0; edge < 4; edge++ )
253 for( int i = 0, loc = X264_SCAN8_0+edge*s2; i < 4; i++, loc += s1 )
256 if( nnz[loc] || nnz[locn] )
257 bs[dir][edge][i] = 2;
258 else if( ref[0][loc] != ref[0][locn] ||
259 abs( mv[0][loc][0] - mv[0][locn][0] ) >= 4 ||
260 abs( mv[0][loc][1] - mv[0][locn][1] ) >= mvy_limit ||
261 (bframe && (ref[1][loc] != ref[1][locn] ||
262 abs( mv[1][loc][0] - mv[1][locn][0] ) >= 4 ||
263 abs( mv[1][loc][1] - mv[1][locn][1] ) >= mvy_limit )))
265 bs[dir][edge][i] = 1;
268 bs[dir][edge][i] = 0;
272 void deblock_strength_mbaff_c( uint8_t nnz_cache[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
273 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][4][4],
274 int mvy_limit, int bframe, x264_t *h )
276 int neighbour_field[2];
277 neighbour_field[0] = h->mb.i_mb_left_xy[0] >= 0 && h->mb.field[h->mb.i_mb_left_xy[0]];
278 neighbour_field[1] = h->mb.i_mb_top_xy >= 0 && h->mb.field[h->mb.i_mb_top_xy];
279 int intra_cur = IS_INTRA( h->mb.i_type );
283 for( int dir = 0; dir < 2; dir++ )
285 int edge_stride = dir ? 8 : 1;
286 int part_stride = dir ? 1 : 8;
287 for( int edge = 0; edge < 4; edge++ )
289 for( int i = 0, q = X264_SCAN8_0+edge*edge_stride; i < 4; i++, q += part_stride )
291 int p = q - edge_stride;
292 if( nnz_cache[q] || nnz_cache[p] )
294 bs[dir][edge][i] = 2;
296 else if( (edge == 0 && h->mb.b_interlaced != neighbour_field[dir]) ||
297 ref[0][q] != ref[0][p] ||
298 abs( mv[0][q][0] - mv[0][p][0] ) >= 4 ||
299 abs( mv[0][q][1] - mv[0][p][1] ) >= mvy_limit ||
300 (bframe && (ref[1][q] != ref[1][p] ||
301 abs( mv[1][q][0] - mv[1][p][0] ) >= 4 ||
302 abs( mv[1][q][1] - mv[1][p][1] ) >= mvy_limit )) )
304 bs[dir][edge][i] = 1;
307 bs[dir][edge][i] = 0;
314 static inline void deblock_edge( x264_t *h, pixel *pix, int i_stride, uint8_t bS[4], int i_qp, int b_chroma, x264_deblock_inter_t pf_inter )
316 int index_a = i_qp-QP_BD_OFFSET + h->sh.i_alpha_c0_offset;
317 int index_b = i_qp-QP_BD_OFFSET + h->sh.i_beta_offset;
318 int alpha = alpha_table(index_a) << (BIT_DEPTH-8);
319 int beta = beta_table(index_b) << (BIT_DEPTH-8);
322 if( !M32(bS) || !alpha || !beta )
325 tc[0] = (tc0_table(index_a)[bS[0]] << (BIT_DEPTH-8)) + b_chroma;
326 tc[1] = (tc0_table(index_a)[bS[1]] << (BIT_DEPTH-8)) + b_chroma;
327 tc[2] = (tc0_table(index_a)[bS[2]] << (BIT_DEPTH-8)) + b_chroma;
328 tc[3] = (tc0_table(index_a)[bS[3]] << (BIT_DEPTH-8)) + b_chroma;
330 pf_inter( pix, i_stride, alpha, beta, tc );
333 static inline void deblock_edge_intra( x264_t *h, pixel *pix, int i_stride, uint8_t bS[4], int i_qp, int b_chroma, x264_deblock_intra_t pf_intra )
335 int index_a = i_qp-QP_BD_OFFSET + h->sh.i_alpha_c0_offset;
336 int index_b = i_qp-QP_BD_OFFSET + h->sh.i_beta_offset;
337 int alpha = alpha_table(index_a) << (BIT_DEPTH-8);
338 int beta = beta_table(index_b) << (BIT_DEPTH-8);
340 if( !alpha || !beta )
343 pf_intra( pix, i_stride, alpha, beta );
346 void x264_frame_deblock_row( x264_t *h, int mb_y )
348 int b_interlaced = h->sh.b_mbaff;
349 int qp_thresh = 15 - X264_MIN( h->sh.i_alpha_c0_offset, h->sh.i_beta_offset ) - X264_MAX( 0, h->pps->i_chroma_qp_index_offset );
350 int stridey = h->fdec->i_stride[0];
351 int stride2y = stridey << b_interlaced;
352 int strideuv = h->fdec->i_stride[1];
353 int stride2uv = strideuv << b_interlaced;
355 for( int mb_x = 0; mb_x < h->mb.i_mb_width; mb_x += (~b_interlaced | mb_y)&1, mb_y ^= b_interlaced )
357 x264_prefetch_fenc( h, h->fdec, mb_x, mb_y );
358 x264_macroblock_cache_load_neighbours_deblock( h, mb_x, mb_y );
360 int mb_xy = h->mb.i_mb_xy;
361 int transform_8x8 = h->mb.mb_transform_size[h->mb.i_mb_xy];
362 int intra_cur = IS_INTRA( h->mb.type[mb_xy] );
363 uint8_t (*bs)[4][4] = h->deblock_strength[mb_y&1][mb_x];
365 pixel *pixy = h->fdec->plane[0] + 16*mb_y*stridey + 16*mb_x;
366 pixel *pixuv = h->fdec->plane[1] + 8*mb_y*strideuv + 16*mb_x;
367 if( mb_y & b_interlaced )
373 int qp = h->mb.qp[mb_xy];
374 int qpc = h->chroma_qp_table[qp];
375 int first_edge_only = h->mb.type[mb_xy] == P_SKIP || qp <= qp_thresh;
377 #define FILTER( intra, dir, edge, qp, chroma_qp )\
380 deblock_edge##intra( h, pixy + 4*edge*(dir?stride2y:1),\
381 stride2y, bs[dir][edge], qp, 0,\
382 h->loopf.deblock_luma##intra[dir] );\
384 deblock_edge##intra( h, pixuv + 2*edge*(dir?stride2uv:2),\
385 stride2uv, bs[dir][edge], chroma_qp, 1,\
386 h->loopf.deblock_chroma##intra[dir] );\
389 if( h->mb.i_neighbour & MB_LEFT )
391 int qpl = h->mb.qp[h->mb.i_mb_left_xy[0]];
392 int qp_left = (qp + qpl + 1) >> 1;
393 int qpc_left = (h->chroma_qp_table[qp] + h->chroma_qp_table[qpl] + 1) >> 1;
394 int intra_left = IS_INTRA( h->mb.type[h->mb.i_mb_left_xy[0]] );
395 if( intra_cur || intra_left )
396 FILTER( _intra, 0, 0, qp_left, qpc_left );
398 FILTER( , 0, 0, qp_left, qpc_left );
401 if( !first_edge_only )
403 if( !transform_8x8 ) FILTER( , 0, 1, qp, qpc );
404 FILTER( , 0, 2, qp, qpc );
405 if( !transform_8x8 ) FILTER( , 0, 3, qp, qpc );
408 if( h->mb.i_neighbour & MB_TOP )
410 int qpt = h->mb.qp[h->mb.i_mb_top_xy];
411 int qp_top = (qp + qpt + 1) >> 1;
412 int qpc_top = (h->chroma_qp_table[qp] + h->chroma_qp_table[qpt] + 1) >> 1;
413 int intra_top = IS_INTRA( h->mb.type[h->mb.i_mb_top_xy] );
414 if( ~b_interlaced & (intra_cur | intra_top) )
415 FILTER( _intra, 1, 0, qp_top, qpc_top );
419 M32( bs[1][0] ) = 0x03030303;
420 FILTER( , 1, 0, qp_top, qpc_top );
424 if( !first_edge_only )
426 if( !transform_8x8 ) FILTER( , 1, 1, qp, qpc );
427 FILTER( , 1, 2, qp, qpc );
428 if( !transform_8x8 ) FILTER( , 1, 3, qp, qpc );
435 /* For deblock-aware RD.
437 * deblock macroblock edges
438 * support analysis partitions smaller than 16x16
440 * handle duplicate refs correctly
441 * handle cavlc+8x8dct correctly
443 void x264_macroblock_deblock( x264_t *h )
445 int qp_thresh = 15 - X264_MIN( h->sh.i_alpha_c0_offset, h->sh.i_beta_offset ) - X264_MAX( 0, h->pps->i_chroma_qp_index_offset );
447 if( qp <= qp_thresh || h->mb.i_type == P_SKIP )
450 uint8_t (*bs)[4][4] = h->deblock_strength[h->mb.i_mb_y&1][h->mb.i_mb_x];
451 if( IS_INTRA( h->mb.i_type ) )
452 memset( bs, 3, 2*4*4*sizeof(uint8_t) );
454 h->loopf.deblock_strength( h->mb.cache.non_zero_count, h->mb.cache.ref, h->mb.cache.mv,
455 bs, 4 >> h->sh.b_mbaff, h->sh.i_type == SLICE_TYPE_B, h );
457 int transform_8x8 = h->mb.b_transform_8x8;
458 pixel *fdec = h->mb.pic.p_fdec[0];
460 #define FILTER( dir, edge )\
463 deblock_edge( h, fdec + 4*edge*(dir?FDEC_STRIDE:1),\
464 FDEC_STRIDE, bs[dir][edge], qp, 0,\
465 h->loopf.deblock_luma[dir] );\
468 if( !transform_8x8 ) FILTER( 0, 1 );
470 if( !transform_8x8 ) FILTER( 0, 3 );
472 if( !transform_8x8 ) FILTER( 1, 1 );
474 if( !transform_8x8 ) FILTER( 1, 3 );
480 void x264_deblock_v_luma_sse2( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
481 void x264_deblock_v_luma_avx ( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
482 void x264_deblock_h_luma_sse2( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
483 void x264_deblock_h_luma_avx ( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
484 void x264_deblock_v_chroma_sse2( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
485 void x264_deblock_v_chroma_avx ( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
486 void x264_deblock_h_chroma_sse2( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
487 void x264_deblock_h_chroma_avx ( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
488 void x264_deblock_v_luma_intra_sse2( pixel *pix, int stride, int alpha, int beta );
489 void x264_deblock_v_luma_intra_avx ( pixel *pix, int stride, int alpha, int beta );
490 void x264_deblock_h_luma_intra_sse2( pixel *pix, int stride, int alpha, int beta );
491 void x264_deblock_h_luma_intra_avx ( pixel *pix, int stride, int alpha, int beta );
492 void x264_deblock_v_chroma_intra_sse2( pixel *pix, int stride, int alpha, int beta );
493 void x264_deblock_v_chroma_intra_avx ( pixel *pix, int stride, int alpha, int beta );
494 void x264_deblock_h_chroma_intra_sse2( pixel *pix, int stride, int alpha, int beta );
495 void x264_deblock_h_chroma_intra_avx ( pixel *pix, int stride, int alpha, int beta );
496 void x264_deblock_strength_mmxext( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
497 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][4][4],
498 int mvy_limit, int bframe, x264_t *h );
499 void x264_deblock_strength_sse2 ( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
500 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][4][4],
501 int mvy_limit, int bframe, x264_t *h );
502 void x264_deblock_strength_ssse3 ( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
503 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][4][4],
504 int mvy_limit, int bframe, x264_t *h );
505 void x264_deblock_strength_avx ( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
506 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][4][4],
507 int mvy_limit, int bframe, x264_t *h );
509 void x264_deblock_h_luma_mmxext( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
510 void x264_deblock_v8_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
511 void x264_deblock_v_chroma_mmxext( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
512 void x264_deblock_h_chroma_mmxext( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
513 void x264_deblock_h_luma_intra_mmxext( pixel *pix, int stride, int alpha, int beta );
514 void x264_deblock_v8_luma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta );
515 void x264_deblock_v_chroma_intra_mmxext( pixel *pix, int stride, int alpha, int beta );
516 void x264_deblock_h_chroma_intra_mmxext( pixel *pix, int stride, int alpha, int beta );
519 void x264_deblock_v_luma_mmxext( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
520 void x264_deblock_v_luma_intra_mmxext( pixel *pix, int stride, int alpha, int beta );
522 // FIXME this wrapper has a significant cpu cost
523 static void x264_deblock_v_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
525 x264_deblock_v8_luma_mmxext( pix, stride, alpha, beta, tc0 );
526 x264_deblock_v8_luma_mmxext( pix+8, stride, alpha, beta, tc0+2 );
528 static void x264_deblock_v_luma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta )
530 x264_deblock_v8_luma_intra_mmxext( pix, stride, alpha, beta );
531 x264_deblock_v8_luma_intra_mmxext( pix+8, stride, alpha, beta );
533 #endif // HIGH_BIT_DEPTH
538 void x264_deblock_v_luma_altivec( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
539 void x264_deblock_h_luma_altivec( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
543 void x264_deblock_v_luma_neon( uint8_t *, int, int, int, int8_t * );
544 void x264_deblock_h_luma_neon( uint8_t *, int, int, int, int8_t * );
545 void x264_deblock_v_chroma_neon( uint8_t *, int, int, int, int8_t * );
546 void x264_deblock_h_chroma_neon( uint8_t *, int, int, int, int8_t * );
549 void x264_deblock_init( int cpu, x264_deblock_function_t *pf, int b_mbaff )
551 pf->deblock_luma[1] = deblock_v_luma_c;
552 pf->deblock_luma[0] = deblock_h_luma_c;
553 pf->deblock_chroma[1] = deblock_v_chroma_c;
554 pf->deblock_chroma[0] = deblock_h_chroma_c;
555 pf->deblock_luma_intra[1] = deblock_v_luma_intra_c;
556 pf->deblock_luma_intra[0] = deblock_h_luma_intra_c;
557 pf->deblock_chroma_intra[1] = deblock_v_chroma_intra_c;
558 pf->deblock_chroma_intra[0] = deblock_h_chroma_intra_c;
559 pf->deblock_strength = deblock_strength_c;
562 if( cpu&X264_CPU_MMXEXT )
565 pf->deblock_luma[1] = x264_deblock_v_luma_mmxext;
566 pf->deblock_luma[0] = x264_deblock_h_luma_mmxext;
567 pf->deblock_chroma[1] = x264_deblock_v_chroma_mmxext;
568 pf->deblock_chroma[0] = x264_deblock_h_chroma_mmxext;
569 pf->deblock_luma_intra[1] = x264_deblock_v_luma_intra_mmxext;
570 pf->deblock_luma_intra[0] = x264_deblock_h_luma_intra_mmxext;
571 pf->deblock_chroma_intra[1] = x264_deblock_v_chroma_intra_mmxext;
572 pf->deblock_chroma_intra[0] = x264_deblock_h_chroma_intra_mmxext;
574 pf->deblock_strength = x264_deblock_strength_mmxext;
575 if( cpu&X264_CPU_SSE2 )
577 pf->deblock_strength = x264_deblock_strength_sse2;
578 if( !(cpu&X264_CPU_STACK_MOD4) )
580 pf->deblock_luma[1] = x264_deblock_v_luma_sse2;
581 pf->deblock_luma[0] = x264_deblock_h_luma_sse2;
582 pf->deblock_chroma[1] = x264_deblock_v_chroma_sse2;
583 pf->deblock_chroma[0] = x264_deblock_h_chroma_sse2;
584 pf->deblock_luma_intra[1] = x264_deblock_v_luma_intra_sse2;
585 pf->deblock_luma_intra[0] = x264_deblock_h_luma_intra_sse2;
586 pf->deblock_chroma_intra[1] = x264_deblock_v_chroma_intra_sse2;
587 pf->deblock_chroma_intra[0] = x264_deblock_h_chroma_intra_sse2;
590 if( cpu&X264_CPU_SSSE3 )
591 pf->deblock_strength = x264_deblock_strength_ssse3;
592 if( cpu&X264_CPU_AVX )
594 pf->deblock_strength = x264_deblock_strength_avx;
595 if( !(cpu&X264_CPU_STACK_MOD4) )
597 pf->deblock_luma[1] = x264_deblock_v_luma_avx;
598 pf->deblock_luma[0] = x264_deblock_h_luma_avx;
599 pf->deblock_chroma[1] = x264_deblock_v_chroma_avx;
600 pf->deblock_chroma[0] = x264_deblock_h_chroma_avx;
601 pf->deblock_luma_intra[1] = x264_deblock_v_luma_intra_avx;
602 pf->deblock_luma_intra[0] = x264_deblock_h_luma_intra_avx;
603 pf->deblock_chroma_intra[1] = x264_deblock_v_chroma_intra_avx;
604 pf->deblock_chroma_intra[0] = x264_deblock_h_chroma_intra_avx;
612 if( cpu&X264_CPU_ALTIVEC )
614 pf->deblock_luma[1] = x264_deblock_v_luma_altivec;
615 pf->deblock_luma[0] = x264_deblock_h_luma_altivec;
617 #endif // HAVE_ALTIVEC
620 if( cpu&X264_CPU_NEON )
622 pf->deblock_luma[1] = x264_deblock_v_luma_neon;
623 pf->deblock_luma[0] = x264_deblock_h_luma_neon;
624 // pf->deblock_chroma[1] = x264_deblock_v_chroma_neon;
625 // pf->deblock_chroma[0] = x264_deblock_h_chroma_neon;
628 #endif // !HIGH_BIT_DEPTH
630 if( b_mbaff ) pf->deblock_strength = deblock_strength_mbaff_c;