1 /*****************************************************************************
2 * deblock.c: deblocking
3 *****************************************************************************
4 * Copyright (C) 2003-2011 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
30 /* Deblocking filter */
31 static const uint8_t i_alpha_table[52+12*3] =
33 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
34 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
35 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
36 0, 0, 0, 0, 0, 0, 4, 4, 5, 6,
37 7, 8, 9, 10, 12, 13, 15, 17, 20, 22,
38 25, 28, 32, 36, 40, 45, 50, 56, 63, 71,
39 80, 90,101,113,127,144,162,182,203,226,
41 255,255,255,255,255,255,255,255,255,255,255,255,
43 static const uint8_t i_beta_table[52+12*3] =
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
46 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
47 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
48 0, 0, 0, 0, 0, 0, 2, 2, 2, 3,
49 3, 3, 3, 4, 4, 4, 6, 6, 7, 7,
50 8, 8, 9, 9, 10, 10, 11, 11, 12, 12,
51 13, 13, 14, 14, 15, 15, 16, 16, 17, 17,
53 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
55 static const int8_t i_tc0_table[52+12*3][4] =
57 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
58 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
59 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
60 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
61 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
62 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
63 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 1 },
64 {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 1, 1 }, {-1, 0, 1, 1 }, {-1, 1, 1, 1 },
65 {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 },
66 {-1, 1, 1, 2 }, {-1, 1, 2, 3 }, {-1, 1, 2, 3 }, {-1, 2, 2, 3 }, {-1, 2, 2, 4 }, {-1, 2, 3, 4 },
67 {-1, 2, 3, 4 }, {-1, 3, 3, 5 }, {-1, 3, 4, 6 }, {-1, 3, 4, 6 }, {-1, 4, 5, 7 }, {-1, 4, 5, 8 },
68 {-1, 4, 6, 9 }, {-1, 5, 7,10 }, {-1, 6, 8,11 }, {-1, 6, 8,13 }, {-1, 7,10,14 }, {-1, 8,11,16 },
69 {-1, 9,12,18 }, {-1,10,13,20 }, {-1,11,15,23 }, {-1,13,17,25 },
70 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
71 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
73 #define alpha_table(x) i_alpha_table[(x)+24]
74 #define beta_table(x) i_beta_table[(x)+24]
75 #define tc0_table(x) i_tc0_table[(x)+24]
78 static ALWAYS_INLINE void deblock_edge_luma_c( pixel *pix, int xstride, int alpha, int beta, int8_t tc0 )
80 int p2 = pix[-3*xstride];
81 int p1 = pix[-2*xstride];
82 int p0 = pix[-1*xstride];
83 int q0 = pix[ 0*xstride];
84 int q1 = pix[ 1*xstride];
85 int q2 = pix[ 2*xstride];
87 if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta )
91 if( abs( p2 - p0 ) < beta )
94 pix[-2*xstride] = p1 + x264_clip3( (( p2 + ((p0 + q0 + 1) >> 1)) >> 1) - p1, -tc0, tc0 );
97 if( abs( q2 - q0 ) < beta )
100 pix[ 1*xstride] = q1 + x264_clip3( (( q2 + ((p0 + q0 + 1) >> 1)) >> 1) - q1, -tc0, tc0 );
104 delta = x264_clip3( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
105 pix[-1*xstride] = x264_clip_pixel( p0 + delta ); /* p0' */
106 pix[ 0*xstride] = x264_clip_pixel( q0 - delta ); /* q0' */
109 static inline void deblock_luma_c( pixel *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0 )
111 for( int i = 0; i < 4; i++ )
118 for( int d = 0; d < 4; d++, pix += ystride )
119 deblock_edge_luma_c( pix, xstride, alpha, beta, tc0[i] );
122 static inline void deblock_v_luma_mbaff_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
124 for( int d = 0; d < 8; d++, pix += stride )
125 deblock_edge_luma_c( pix, 1, alpha, beta, tc0[d>>1] );
127 static void deblock_v_luma_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
129 deblock_luma_c( pix, stride, 1, alpha, beta, tc0 );
131 static void deblock_h_luma_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
133 deblock_luma_c( pix, 1, stride, alpha, beta, tc0 );
136 static ALWAYS_INLINE void deblock_edge_chroma_c( pixel *pix, int xstride, int alpha, int beta, int8_t tc )
138 int p1 = pix[-2*xstride];
139 int p0 = pix[-1*xstride];
140 int q0 = pix[ 0*xstride];
141 int q1 = pix[ 1*xstride];
143 if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta )
145 int delta = x264_clip3( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
146 pix[-1*xstride] = x264_clip_pixel( p0 + delta ); /* p0' */
147 pix[ 0*xstride] = x264_clip_pixel( q0 - delta ); /* q0' */
150 static inline void deblock_chroma_c( pixel *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0 )
152 for( int i = 0; i < 4; i++ )
160 for( int d = 0; d < 2; d++, pix += ystride-2 )
161 for( int e = 0; e < 2; e++, pix++ )
162 deblock_edge_chroma_c( pix, xstride, alpha, beta, tc0[i] );
165 static inline void deblock_v_chroma_mbaff_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
167 for( int i = 0; i < 4; i++, pix += stride )
168 deblock_edge_chroma_c( pix, 2, alpha, beta, tc0[i] );
170 static void deblock_v_chroma_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
172 deblock_chroma_c( pix, stride, 2, alpha, beta, tc0 );
174 static void deblock_h_chroma_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
176 deblock_chroma_c( pix, 2, stride, alpha, beta, tc0 );
179 static ALWAYS_INLINE void deblock_edge_luma_intra_c( pixel *pix, int xstride, int alpha, int beta )
181 int p2 = pix[-3*xstride];
182 int p1 = pix[-2*xstride];
183 int p0 = pix[-1*xstride];
184 int q0 = pix[ 0*xstride];
185 int q1 = pix[ 1*xstride];
186 int q2 = pix[ 2*xstride];
188 if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta )
190 if( abs( p0 - q0 ) < ((alpha >> 2) + 2) )
192 if( abs( p2 - p0 ) < beta ) /* p0', p1', p2' */
194 const int p3 = pix[-4*xstride];
195 pix[-1*xstride] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3;
196 pix[-2*xstride] = ( p2 + p1 + p0 + q0 + 2 ) >> 2;
197 pix[-3*xstride] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3;
200 pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
201 if( abs( q2 - q0 ) < beta ) /* q0', q1', q2' */
203 const int q3 = pix[3*xstride];
204 pix[0*xstride] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3;
205 pix[1*xstride] = ( p0 + q0 + q1 + q2 + 2 ) >> 2;
206 pix[2*xstride] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3;
209 pix[0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
213 pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
214 pix[ 0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
218 static inline void deblock_luma_intra_c( pixel *pix, int xstride, int ystride, int alpha, int beta )
220 for( int d = 0; d < 16; d++, pix += ystride )
221 deblock_edge_luma_intra_c( pix, xstride, alpha, beta );
223 static inline void deblock_v_luma_intra_mbaff_c( pixel *pix, int ystride, int alpha, int beta )
225 for( int d = 0; d < 8; d++, pix += ystride )
226 deblock_edge_luma_intra_c( pix, 1, alpha, beta );
228 static void deblock_v_luma_intra_c( pixel *pix, int stride, int alpha, int beta )
230 deblock_luma_intra_c( pix, stride, 1, alpha, beta );
232 static void deblock_h_luma_intra_c( pixel *pix, int stride, int alpha, int beta )
234 deblock_luma_intra_c( pix, 1, stride, alpha, beta );
237 static ALWAYS_INLINE void deblock_edge_chroma_intra_c( pixel *pix, int xstride, int alpha, int beta )
239 int p1 = pix[-2*xstride];
240 int p0 = pix[-1*xstride];
241 int q0 = pix[ 0*xstride];
242 int q1 = pix[ 1*xstride];
244 if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta )
246 pix[-1*xstride] = (2*p1 + p0 + q1 + 2) >> 2; /* p0' */
247 pix[ 0*xstride] = (2*q1 + q0 + p1 + 2) >> 2; /* q0' */
250 static inline void deblock_chroma_intra_c( pixel *pix, int xstride, int ystride, int alpha, int beta, int dir )
252 for( int d = 0; d < (dir?16:8); d++, pix += ystride-2 )
253 for( int e = 0; e < (dir?1:2); e++, pix++ )
254 deblock_edge_chroma_intra_c( pix, xstride, alpha, beta );
256 static inline void deblock_v_chroma_intra_mbaff_c( pixel *pix, int stride, int alpha, int beta )
258 for( int i = 0; i < 4; i++, pix += stride )
259 deblock_edge_chroma_intra_c( pix, 2, alpha, beta );
261 static void deblock_v_chroma_intra_c( pixel *pix, int stride, int alpha, int beta )
263 deblock_chroma_intra_c( pix, stride, 2, alpha, beta, 1 );
265 static void deblock_h_chroma_intra_c( pixel *pix, int stride, int alpha, int beta )
267 deblock_chroma_intra_c( pix, 2, stride, alpha, beta, 0 );
270 static void deblock_strength_c( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
271 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4], int mvy_limit,
272 int bframe, x264_t *h )
274 for( int dir = 0; dir < 2; dir++ )
276 int s1 = dir ? 1 : 8;
277 int s2 = dir ? 8 : 1;
278 for( int edge = 0; edge < 4; edge++ )
279 for( int i = 0, loc = X264_SCAN8_0+edge*s2; i < 4; i++, loc += s1 )
282 if( nnz[loc] || nnz[locn] )
283 bs[dir][edge][i] = 2;
284 else if( ref[0][loc] != ref[0][locn] ||
285 abs( mv[0][loc][0] - mv[0][locn][0] ) >= 4 ||
286 abs( mv[0][loc][1] - mv[0][locn][1] ) >= mvy_limit ||
287 (bframe && (ref[1][loc] != ref[1][locn] ||
288 abs( mv[1][loc][0] - mv[1][locn][0] ) >= 4 ||
289 abs( mv[1][loc][1] - mv[1][locn][1] ) >= mvy_limit )))
291 bs[dir][edge][i] = 1;
294 bs[dir][edge][i] = 0;
299 void deblock_strength_mbaff_c( uint8_t nnz_cache[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
300 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4],
301 int mvy_limit, int bframe, x264_t *h )
303 int neighbour_field[2];
304 neighbour_field[0] = h->mb.i_mb_left_xy[0] >= 0 && h->mb.field[h->mb.i_mb_left_xy[0]];
305 neighbour_field[1] = h->mb.i_mb_top_xy >= 0 && h->mb.field[h->mb.i_mb_top_xy];
306 int intra_cur = IS_INTRA( h->mb.i_type );
310 for( int dir = 0; dir < 2; dir++ )
312 int edge_stride = dir ? 8 : 1;
313 int part_stride = dir ? 1 : 8;
314 for( int edge = 0; edge < 4; edge++ )
316 for( int i = 0, q = X264_SCAN8_0+edge*edge_stride; i < 4; i++, q += part_stride )
318 int p = q - edge_stride;
319 if( nnz_cache[q] || nnz_cache[p] )
321 bs[dir][edge][i] = 2;
323 else if( (edge == 0 && MB_INTERLACED != neighbour_field[dir]) ||
324 ref[0][q] != ref[0][p] ||
325 abs( mv[0][q][0] - mv[0][p][0] ) >= 4 ||
326 abs( mv[0][q][1] - mv[0][p][1] ) >= mvy_limit ||
327 (bframe && (ref[1][q] != ref[1][p] ||
328 abs( mv[1][q][0] - mv[1][p][0] ) >= 4 ||
329 abs( mv[1][q][1] - mv[1][p][1] ) >= mvy_limit )) )
331 bs[dir][edge][i] = 1;
334 bs[dir][edge][i] = 0;
340 if( h->mb.i_neighbour & MB_LEFT )
342 if( h->mb.field[h->mb.i_mb_left_xy[0]] != MB_INTERLACED )
344 static const uint8_t offset[2][2][8] = {
345 { { 0, 0, 0, 0, 1, 1, 1, 1 },
346 { 2, 2, 2, 2, 3, 3, 3, 3 }, },
347 { { 0, 1, 2, 3, 0, 1, 2, 3 },
348 { 0, 1, 2, 3, 0, 1, 2, 3 }, }
356 const uint8_t *off = offset[MB_INTERLACED][h->mb.i_mb_y&1];
357 uint8_t (*nnz)[48] = h->mb.non_zero_count;
359 for( int i = 0; i < 8; i++ )
361 int left = h->mb.i_mb_left_xy[MB_INTERLACED ? i>>2 : i&1];
362 int nnz_this = h->mb.cache.non_zero_count[x264_scan8[0]+8*(i>>1)];
363 int nnz_left = nnz[left][3 + 4*off[i]];
364 if( !h->param.b_cabac && h->pps->b_transform_8x8_mode )
367 if( h->mb.mb_transform_size[left] )
368 nnz_left = !!(M16( &nnz[left][2+4*j] ) | M16( &nnz[left][2+4*(1+j)] ));
370 if( IS_INTRA( h->mb.type[left] ) )
372 else if( nnz_left || nnz_this )
374 else // As left is different interlaced.
381 for( int i = 0; i < 4; i++ ) bs[0][0][i] = bS[i];
382 for( int i = 0; i < 4; i++ ) bs[0][4][i] = bS[4+i];
386 for( int i = 0; i < 4; i++ ) bs[0][0][i] = bS[2*i];
387 for( int i = 0; i < 4; i++ ) bs[0][4][i] = bS[1+2*i];
392 if( h->mb.i_neighbour & MB_TOP )
394 if( !(h->mb.i_mb_y&1) && !MB_INTERLACED && h->mb.field[h->mb.i_mb_top_xy] )
396 /* Need to filter both fields (even for frame macroblocks).
397 * Filter top two rows using the top macroblock of the above
398 * pair and then the bottom one. */
399 int mbn_xy = h->mb.i_mb_xy - 2 * h->mb.i_mb_stride;
401 nnz_cur[0] = h->mb.cache.non_zero_count[x264_scan8[0]+0];
402 nnz_cur[1] = h->mb.cache.non_zero_count[x264_scan8[0]+1];
403 nnz_cur[2] = h->mb.cache.non_zero_count[x264_scan8[0]+2];
404 nnz_cur[3] = h->mb.cache.non_zero_count[x264_scan8[0]+3];
405 /* Munge NNZ for cavlc + 8x8dct */
406 if( !h->param.b_cabac && h->pps->b_transform_8x8_mode &&
407 h->mb.mb_transform_size[h->mb.i_mb_xy] )
409 int nnz0 = M16( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[ 2]] );
410 int nnz1 = M16( &h->mb.cache.non_zero_count[x264_scan8[ 4]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[ 6]] );
411 nnz_cur[0] = nnz_cur[1] = !!nnz0;
412 nnz_cur[2] = nnz_cur[3] = !!nnz1;
415 for( int j = 0; j < 2; j++, mbn_xy += h->mb.i_mb_stride )
417 int mbn_intra = IS_INTRA( h->mb.type[mbn_xy] );
418 uint8_t (*nnz)[48] = h->mb.non_zero_count;
421 nnz_top[0] = nnz[mbn_xy][3*4+0];
422 nnz_top[1] = nnz[mbn_xy][3*4+1];
423 nnz_top[2] = nnz[mbn_xy][3*4+2];
424 nnz_top[3] = nnz[mbn_xy][3*4+3];
426 if( !h->param.b_cabac && h->pps->b_transform_8x8_mode &&
427 (h->mb.i_neighbour & MB_TOP) && h->mb.mb_transform_size[mbn_xy] )
429 int nnz_top0 = M16( &nnz[mbn_xy][8] ) | M16( &nnz[mbn_xy][12] );
430 int nnz_top1 = M16( &nnz[mbn_xy][10] ) | M16( &nnz[mbn_xy][14] );
431 nnz_top[0] = nnz_top[1] = nnz_top0 ? 0x0101 : 0;
432 nnz_top[2] = nnz_top[3] = nnz_top1 ? 0x0101 : 0;
436 if( intra_cur || mbn_intra )
437 M32( bS ) = 0x03030303;
440 for( int i = 0; i < 4; i++ )
442 if( nnz_cur[i] || nnz_top[i] )
448 for( int i = 0; i < 4; i++ )
449 bs[1][4*j][i] = bS[i];
455 static ALWAYS_INLINE void deblock_edge( x264_t *h, pixel *pix, int i_stride, uint8_t bS[4], int i_qp, int a, int b, int b_chroma, x264_deblock_inter_t pf_inter )
457 int index_a = i_qp + a;
458 int index_b = i_qp + b;
459 int alpha = alpha_table(index_a) << (BIT_DEPTH-8);
460 int beta = beta_table(index_b) << (BIT_DEPTH-8);
463 if( !M32(bS) || !alpha || !beta )
466 tc[0] = (tc0_table(index_a)[bS[0]] << (BIT_DEPTH-8)) + b_chroma;
467 tc[1] = (tc0_table(index_a)[bS[1]] << (BIT_DEPTH-8)) + b_chroma;
468 tc[2] = (tc0_table(index_a)[bS[2]] << (BIT_DEPTH-8)) + b_chroma;
469 tc[3] = (tc0_table(index_a)[bS[3]] << (BIT_DEPTH-8)) + b_chroma;
471 pf_inter( pix, i_stride, alpha, beta, tc );
474 static ALWAYS_INLINE void deblock_edge_intra( x264_t *h, pixel *pix, int i_stride, uint8_t bS[4], int i_qp, int a, int b, int b_chroma, x264_deblock_intra_t pf_intra )
476 int index_a = i_qp + a;
477 int index_b = i_qp + b;
478 int alpha = alpha_table(index_a) << (BIT_DEPTH-8);
479 int beta = beta_table(index_b) << (BIT_DEPTH-8);
481 if( !alpha || !beta )
484 pf_intra( pix, i_stride, alpha, beta );
487 void x264_frame_deblock_row( x264_t *h, int mb_y )
489 int b_interlaced = SLICE_MBAFF;
490 int a = h->sh.i_alpha_c0_offset - QP_BD_OFFSET;
491 int b = h->sh.i_beta_offset - QP_BD_OFFSET;
492 int qp_thresh = 15 - X264_MIN( a, b ) - X264_MAX( 0, h->pps->i_chroma_qp_index_offset );
493 int stridey = h->fdec->i_stride[0];
494 int strideuv = h->fdec->i_stride[1];
495 int chroma444 = CHROMA444;
496 intptr_t uvdiff = chroma444 ? h->fdec->plane[2] - h->fdec->plane[1] : 1;
498 for( int mb_x = 0; mb_x < h->mb.i_mb_width; mb_x += (~b_interlaced | mb_y)&1, mb_y ^= b_interlaced )
500 x264_prefetch_fenc( h, h->fdec, mb_x, mb_y );
501 x264_macroblock_cache_load_neighbours_deblock( h, mb_x, mb_y );
503 int mb_xy = h->mb.i_mb_xy;
504 int transform_8x8 = h->mb.mb_transform_size[h->mb.i_mb_xy];
505 int intra_cur = IS_INTRA( h->mb.type[mb_xy] );
506 uint8_t (*bs)[8][4] = h->deblock_strength[mb_y&1][mb_x];
508 pixel *pixy = h->fdec->plane[0] + 16*mb_y*stridey + 16*mb_x;
509 pixel *pixuv = h->fdec->plane[1] + (8<<chroma444)*mb_y*strideuv + 16*mb_x;
511 if( mb_y & MB_INTERLACED )
514 pixuv -= ((8<<chroma444)-1)*strideuv;
517 int stride2y = stridey << MB_INTERLACED;
518 int stride2uv = strideuv << MB_INTERLACED;
519 int qp = h->mb.qp[mb_xy];
520 int qpc = h->chroma_qp_table[qp];
521 int first_edge_only = (h->mb.partition[mb_xy] == D_16x16 && !h->mb.cbp[mb_xy] && !intra_cur) || qp <= qp_thresh;
523 #define FILTER( intra, dir, edge, qp, chroma_qp )\
526 deblock_edge##intra( h, pixy + 4*edge*(dir?stride2y:1),\
527 stride2y, bs[dir][edge], qp, a, b, 0,\
528 h->loopf.deblock_luma##intra[dir] );\
531 deblock_edge##intra( h, pixuv + 4*edge*(dir?stride2uv:1),\
532 stride2uv, bs[dir][edge], chroma_qp, a, b, 0,\
533 h->loopf.deblock_luma##intra[dir] );\
534 deblock_edge##intra( h, pixuv + uvdiff + 4*edge*(dir?stride2uv:1),\
535 stride2uv, bs[dir][edge], chroma_qp, a, b, 0,\
536 h->loopf.deblock_luma##intra[dir] );\
538 else if( !(edge & 1) )\
539 deblock_edge##intra( h, pixuv + 2*edge*(dir?stride2uv:2),\
540 stride2uv, bs[dir][edge], chroma_qp, a, b, 1,\
541 h->loopf.deblock_chroma##intra[dir] );\
544 if( h->mb.i_neighbour & MB_LEFT )
546 if( b_interlaced && h->mb.field[h->mb.i_mb_left_xy[0]] != MB_INTERLACED )
551 x264_deblock_inter_t luma_deblock = deblock_v_luma_mbaff_c;
552 x264_deblock_inter_t chroma_deblock = chroma444 ? deblock_v_luma_mbaff_c : deblock_v_chroma_mbaff_c;
553 x264_deblock_intra_t luma_intra_deblock = deblock_v_luma_intra_mbaff_c;
554 x264_deblock_intra_t chroma_intra_deblock = chroma444 ? deblock_v_luma_intra_mbaff_c : deblock_v_chroma_intra_mbaff_c;
555 int c = chroma444 ? 0 : 1;
557 left_qp[0] = h->mb.qp[h->mb.i_mb_left_xy[0]];
558 luma_qp[0] = (qp + left_qp[0] + 1) >> 1;
559 chroma_qp[0] = (qpc + h->chroma_qp_table[left_qp[0]] + 1) >> 1;
560 if( bs[0][0][0] == 4 )
562 deblock_edge_intra( h, pixy, 2*stridey, bs[0][0], luma_qp[0], a, b, 0, luma_intra_deblock );
563 deblock_edge_intra( h, pixuv, 2*strideuv, bs[0][0], chroma_qp[0], a, b, c, chroma_intra_deblock );
564 deblock_edge_intra( h, pixuv + uvdiff, 2*strideuv, bs[0][0], chroma_qp[0], a, b, c, chroma_intra_deblock );
568 deblock_edge( h, pixy, 2*stridey, bs[0][0], luma_qp[0], a, b, 0, luma_deblock );
569 deblock_edge( h, pixuv, 2*strideuv, bs[0][0], chroma_qp[0], a, b, c, chroma_deblock );
570 deblock_edge( h, pixuv + uvdiff, 2*strideuv, bs[0][0], chroma_qp[0], a, b, c, chroma_deblock );
573 int offy = MB_INTERLACED ? 4 : 0;
574 int offuv = MB_INTERLACED ? 3 : 0;
575 if( chroma444 ) offuv = offy;
576 left_qp[1] = h->mb.qp[h->mb.i_mb_left_xy[1]];
577 luma_qp[1] = (qp + left_qp[1] + 1) >> 1;
578 chroma_qp[1] = (qpc + h->chroma_qp_table[left_qp[1]] + 1) >> 1;
579 if( bs[0][4][0] == 4)
581 deblock_edge_intra( h, pixy + (stridey<<offy), 2*stridey, bs[0][4], luma_qp[1], a, b, 0, luma_intra_deblock );
582 deblock_edge_intra( h, pixuv + (strideuv<<offuv), 2*strideuv, bs[0][4], chroma_qp[1], a, b, c, chroma_intra_deblock );
583 deblock_edge_intra( h, pixuv + uvdiff + (strideuv<<offuv), 2*strideuv, bs[0][4], chroma_qp[1], a, b, c, chroma_intra_deblock );
587 deblock_edge( h, pixy + (stridey<<offy), 2*stridey, bs[0][4], luma_qp[1], a, b, 0, luma_deblock );
588 deblock_edge( h, pixuv + (strideuv<<offuv), 2*strideuv, bs[0][4], chroma_qp[1], a, b, c, chroma_deblock );
589 deblock_edge( h, pixuv + uvdiff + (strideuv<<offuv), 2*strideuv, bs[0][4], chroma_qp[1], a, b, c, chroma_deblock );
594 int qpl = h->mb.qp[h->mb.i_mb_xy-1];
595 int qp_left = (qp + qpl + 1) >> 1;
596 int qpc_left = (qpc + h->chroma_qp_table[qpl] + 1) >> 1;
597 int intra_left = IS_INTRA( h->mb.type[h->mb.i_mb_xy-1] );
599 if( intra_cur || intra_left )
600 FILTER( _intra, 0, 0, qp_left, qpc_left );
602 FILTER( , 0, 0, qp_left, qpc_left );
605 if( !first_edge_only )
607 if( !transform_8x8 ) FILTER( , 0, 1, qp, qpc );
608 FILTER( , 0, 2, qp, qpc );
609 if( !transform_8x8 ) FILTER( , 0, 3, qp, qpc );
612 if( h->mb.i_neighbour & MB_TOP )
614 if( b_interlaced && !(mb_y&1) && !MB_INTERLACED && h->mb.field[h->mb.i_mb_top_xy] )
616 int mbn_xy = mb_xy - 2 * h->mb.i_mb_stride;
618 for(int j=0; j<2; j++, mbn_xy += h->mb.i_mb_stride)
620 int qpt = h->mb.qp[mbn_xy];
621 int qp_top = (qp + qpt + 1) >> 1;
622 int qpc_top = (qpc + h->chroma_qp_table[qpt] + 1) >> 1;
624 // deblock the first horizontal edge of the even rows, then the first horizontal edge of the odd rows
625 deblock_edge( h, pixy + j*stridey, 2* stridey, bs[1][4*j], qp_top, a, b, 0, deblock_v_luma_c );
628 deblock_edge( h, pixuv + j*strideuv, 2*strideuv, bs[1][4*j], qpc_top, a, b, 0, deblock_v_luma_c );
629 deblock_edge( h, pixuv + uvdiff + j*strideuv, 2*strideuv, bs[1][4*j], qpc_top, a, b, 0, deblock_v_luma_c );
632 deblock_edge( h, pixuv + j*strideuv, 2*strideuv, bs[1][4*j], qpc_top, a, b, 1, deblock_v_chroma_c );
637 int qpt = h->mb.qp[h->mb.i_mb_top_xy];
638 int qp_top = (qp + qpt + 1) >> 1;
639 int qpc_top = (qpc + h->chroma_qp_table[qpt] + 1) >> 1;
640 int intra_top = IS_INTRA( h->mb.type[h->mb.i_mb_top_xy] );
642 if( (!b_interlaced || (!MB_INTERLACED && !h->mb.field[h->mb.i_mb_top_xy]))
643 && (intra_cur || intra_top) )
645 FILTER( _intra, 1, 0, qp_top, qpc_top );
650 M32( bs[1][0] ) = 0x03030303;
651 FILTER( , 1, 0, qp_top, qpc_top );
656 if( !first_edge_only )
658 if( !transform_8x8 ) FILTER( , 1, 1, qp, qpc );
659 FILTER( , 1, 2, qp, qpc );
660 if( !transform_8x8 ) FILTER( , 1, 3, qp, qpc );
667 /* For deblock-aware RD.
669 * deblock macroblock edges
670 * support analysis partitions smaller than 16x16
671 * deblock chroma for 4:2:0
672 * handle duplicate refs correctly
673 * handle cavlc+8x8dct correctly
675 void x264_macroblock_deblock( x264_t *h )
677 int a = h->sh.i_alpha_c0_offset - QP_BD_OFFSET;
678 int b = h->sh.i_beta_offset - QP_BD_OFFSET;
679 int qp_thresh = 15 - X264_MIN( a, b ) - X264_MAX( 0, h->pps->i_chroma_qp_index_offset );
680 int intra_cur = IS_INTRA( h->mb.i_type );
682 int qpc = h->mb.i_chroma_qp;
683 if( (h->mb.i_partition == D_16x16 && !h->mb.i_cbp_luma && !intra_cur) || qp <= qp_thresh )
686 uint8_t (*bs)[8][4] = h->deblock_strength[h->mb.i_mb_y&1][h->mb.i_mb_x];
689 memset( &bs[0][1], 3, 3*4*sizeof(uint8_t) );
690 memset( &bs[1][1], 3, 3*4*sizeof(uint8_t) );
693 h->loopf.deblock_strength( h->mb.cache.non_zero_count, h->mb.cache.ref, h->mb.cache.mv,
694 bs, 4 >> SLICE_MBAFF, h->sh.i_type == SLICE_TYPE_B, h );
696 int transform_8x8 = h->mb.b_transform_8x8;
698 #define FILTER( dir, edge )\
701 deblock_edge( h, h->mb.pic.p_fdec[0] + 4*edge*(dir?FDEC_STRIDE:1),\
702 FDEC_STRIDE, bs[dir][edge], qp, a, b, 0,\
703 h->loopf.deblock_luma[dir] );\
706 deblock_edge( h, h->mb.pic.p_fdec[1] + 4*edge*(dir?FDEC_STRIDE:1),\
707 FDEC_STRIDE, bs[dir][edge], qpc, a, b, 0,\
708 h->loopf.deblock_luma[dir] );\
709 deblock_edge( h, h->mb.pic.p_fdec[2] + 4*edge*(dir?FDEC_STRIDE:1),\
710 FDEC_STRIDE, bs[dir][edge], qpc, a, b, 0,\
711 h->loopf.deblock_luma[dir] );\
715 if( !transform_8x8 ) FILTER( 0, 1 );
717 if( !transform_8x8 ) FILTER( 0, 3 );
719 if( !transform_8x8 ) FILTER( 1, 1 );
721 if( !transform_8x8 ) FILTER( 1, 3 );
727 void x264_deblock_v_luma_sse2( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
728 void x264_deblock_v_luma_avx ( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
729 void x264_deblock_h_luma_sse2( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
730 void x264_deblock_h_luma_avx ( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
731 void x264_deblock_v_chroma_sse2( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
732 void x264_deblock_v_chroma_avx ( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
733 void x264_deblock_h_chroma_sse2( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
734 void x264_deblock_h_chroma_avx ( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
735 void x264_deblock_v_luma_intra_sse2( pixel *pix, int stride, int alpha, int beta );
736 void x264_deblock_v_luma_intra_avx ( pixel *pix, int stride, int alpha, int beta );
737 void x264_deblock_h_luma_intra_sse2( pixel *pix, int stride, int alpha, int beta );
738 void x264_deblock_h_luma_intra_avx ( pixel *pix, int stride, int alpha, int beta );
739 void x264_deblock_v_chroma_intra_sse2( pixel *pix, int stride, int alpha, int beta );
740 void x264_deblock_v_chroma_intra_avx ( pixel *pix, int stride, int alpha, int beta );
741 void x264_deblock_h_chroma_intra_sse2( pixel *pix, int stride, int alpha, int beta );
742 void x264_deblock_h_chroma_intra_avx ( pixel *pix, int stride, int alpha, int beta );
743 void x264_deblock_strength_mmxext( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
744 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4],
745 int mvy_limit, int bframe, x264_t *h );
746 void x264_deblock_strength_sse2 ( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
747 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4],
748 int mvy_limit, int bframe, x264_t *h );
749 void x264_deblock_strength_ssse3 ( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
750 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4],
751 int mvy_limit, int bframe, x264_t *h );
752 void x264_deblock_strength_avx ( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
753 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4],
754 int mvy_limit, int bframe, x264_t *h );
756 void x264_deblock_h_luma_mmxext( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
757 void x264_deblock_v8_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
758 void x264_deblock_v_chroma_mmxext( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
759 void x264_deblock_h_chroma_mmxext( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
760 void x264_deblock_h_luma_intra_mmxext( pixel *pix, int stride, int alpha, int beta );
761 void x264_deblock_v8_luma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta );
762 void x264_deblock_v_chroma_intra_mmxext( pixel *pix, int stride, int alpha, int beta );
763 void x264_deblock_h_chroma_intra_mmxext( pixel *pix, int stride, int alpha, int beta );
766 void x264_deblock_v_luma_mmxext( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
767 void x264_deblock_v_luma_intra_mmxext( pixel *pix, int stride, int alpha, int beta );
769 // FIXME this wrapper has a significant cpu cost
770 static void x264_deblock_v_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
772 x264_deblock_v8_luma_mmxext( pix, stride, alpha, beta, tc0 );
773 x264_deblock_v8_luma_mmxext( pix+8, stride, alpha, beta, tc0+2 );
775 static void x264_deblock_v_luma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta )
777 x264_deblock_v8_luma_intra_mmxext( pix, stride, alpha, beta );
778 x264_deblock_v8_luma_intra_mmxext( pix+8, stride, alpha, beta );
780 #endif // HIGH_BIT_DEPTH
785 void x264_deblock_v_luma_altivec( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
786 void x264_deblock_h_luma_altivec( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
790 void x264_deblock_v_luma_neon( uint8_t *, int, int, int, int8_t * );
791 void x264_deblock_h_luma_neon( uint8_t *, int, int, int, int8_t * );
792 void x264_deblock_v_chroma_neon( uint8_t *, int, int, int, int8_t * );
793 void x264_deblock_h_chroma_neon( uint8_t *, int, int, int, int8_t * );
796 void x264_deblock_init( int cpu, x264_deblock_function_t *pf, int b_mbaff )
798 pf->deblock_luma[1] = deblock_v_luma_c;
799 pf->deblock_luma[0] = deblock_h_luma_c;
800 pf->deblock_chroma[1] = deblock_v_chroma_c;
801 pf->deblock_chroma[0] = deblock_h_chroma_c;
802 pf->deblock_luma_intra[1] = deblock_v_luma_intra_c;
803 pf->deblock_luma_intra[0] = deblock_h_luma_intra_c;
804 pf->deblock_chroma_intra[1] = deblock_v_chroma_intra_c;
805 pf->deblock_chroma_intra[0] = deblock_h_chroma_intra_c;
806 pf->deblock_strength = deblock_strength_c;
809 if( cpu&X264_CPU_MMXEXT )
812 pf->deblock_luma[1] = x264_deblock_v_luma_mmxext;
813 pf->deblock_luma[0] = x264_deblock_h_luma_mmxext;
814 pf->deblock_chroma[1] = x264_deblock_v_chroma_mmxext;
815 pf->deblock_chroma[0] = x264_deblock_h_chroma_mmxext;
816 pf->deblock_luma_intra[1] = x264_deblock_v_luma_intra_mmxext;
817 pf->deblock_luma_intra[0] = x264_deblock_h_luma_intra_mmxext;
818 pf->deblock_chroma_intra[1] = x264_deblock_v_chroma_intra_mmxext;
819 pf->deblock_chroma_intra[0] = x264_deblock_h_chroma_intra_mmxext;
821 pf->deblock_strength = x264_deblock_strength_mmxext;
822 if( cpu&X264_CPU_SSE2 )
824 pf->deblock_strength = x264_deblock_strength_sse2;
825 if( !(cpu&X264_CPU_STACK_MOD4) )
827 pf->deblock_luma[1] = x264_deblock_v_luma_sse2;
828 pf->deblock_luma[0] = x264_deblock_h_luma_sse2;
829 pf->deblock_chroma[1] = x264_deblock_v_chroma_sse2;
830 pf->deblock_chroma[0] = x264_deblock_h_chroma_sse2;
831 pf->deblock_luma_intra[1] = x264_deblock_v_luma_intra_sse2;
832 pf->deblock_luma_intra[0] = x264_deblock_h_luma_intra_sse2;
833 pf->deblock_chroma_intra[1] = x264_deblock_v_chroma_intra_sse2;
834 pf->deblock_chroma_intra[0] = x264_deblock_h_chroma_intra_sse2;
837 if( cpu&X264_CPU_SSSE3 )
838 pf->deblock_strength = x264_deblock_strength_ssse3;
839 if( cpu&X264_CPU_AVX )
841 pf->deblock_strength = x264_deblock_strength_avx;
842 if( !(cpu&X264_CPU_STACK_MOD4) )
844 pf->deblock_luma[1] = x264_deblock_v_luma_avx;
845 pf->deblock_luma[0] = x264_deblock_h_luma_avx;
846 pf->deblock_chroma[1] = x264_deblock_v_chroma_avx;
847 pf->deblock_chroma[0] = x264_deblock_h_chroma_avx;
848 pf->deblock_luma_intra[1] = x264_deblock_v_luma_intra_avx;
849 pf->deblock_luma_intra[0] = x264_deblock_h_luma_intra_avx;
850 pf->deblock_chroma_intra[1] = x264_deblock_v_chroma_intra_avx;
851 pf->deblock_chroma_intra[0] = x264_deblock_h_chroma_intra_avx;
859 if( cpu&X264_CPU_ALTIVEC )
861 pf->deblock_luma[1] = x264_deblock_v_luma_altivec;
862 pf->deblock_luma[0] = x264_deblock_h_luma_altivec;
864 #endif // HAVE_ALTIVEC
867 if( cpu&X264_CPU_NEON )
869 pf->deblock_luma[1] = x264_deblock_v_luma_neon;
870 pf->deblock_luma[0] = x264_deblock_h_luma_neon;
871 // pf->deblock_chroma[1] = x264_deblock_v_chroma_neon;
872 // pf->deblock_chroma[0] = x264_deblock_h_chroma_neon;
875 #endif // !HIGH_BIT_DEPTH
877 if( b_mbaff ) pf->deblock_strength = deblock_strength_mbaff_c;