1 /*****************************************************************************
2 * deblock.c: deblocking
3 *****************************************************************************
4 * Copyright (C) 2003-2011 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
30 /* Deblocking filter */
31 static const uint8_t i_alpha_table[52+12*3] =
33 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
34 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
35 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
36 0, 0, 0, 0, 0, 0, 4, 4, 5, 6,
37 7, 8, 9, 10, 12, 13, 15, 17, 20, 22,
38 25, 28, 32, 36, 40, 45, 50, 56, 63, 71,
39 80, 90,101,113,127,144,162,182,203,226,
41 255,255,255,255,255,255,255,255,255,255,255,255,
43 static const uint8_t i_beta_table[52+12*3] =
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
46 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
47 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
48 0, 0, 0, 0, 0, 0, 2, 2, 2, 3,
49 3, 3, 3, 4, 4, 4, 6, 6, 7, 7,
50 8, 8, 9, 9, 10, 10, 11, 11, 12, 12,
51 13, 13, 14, 14, 15, 15, 16, 16, 17, 17,
53 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
55 static const int8_t i_tc0_table[52+12*3][4] =
57 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
58 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
59 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
60 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
61 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
62 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
63 {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 1 },
64 {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 1, 1 }, {-1, 0, 1, 1 }, {-1, 1, 1, 1 },
65 {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 },
66 {-1, 1, 1, 2 }, {-1, 1, 2, 3 }, {-1, 1, 2, 3 }, {-1, 2, 2, 3 }, {-1, 2, 2, 4 }, {-1, 2, 3, 4 },
67 {-1, 2, 3, 4 }, {-1, 3, 3, 5 }, {-1, 3, 4, 6 }, {-1, 3, 4, 6 }, {-1, 4, 5, 7 }, {-1, 4, 5, 8 },
68 {-1, 4, 6, 9 }, {-1, 5, 7,10 }, {-1, 6, 8,11 }, {-1, 6, 8,13 }, {-1, 7,10,14 }, {-1, 8,11,16 },
69 {-1, 9,12,18 }, {-1,10,13,20 }, {-1,11,15,23 }, {-1,13,17,25 },
70 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
71 {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
73 #define alpha_table(x) i_alpha_table[(x)+24]
74 #define beta_table(x) i_beta_table[(x)+24]
75 #define tc0_table(x) i_tc0_table[(x)+24]
78 static ALWAYS_INLINE void deblock_edge_luma_c( pixel *pix, int xstride, int alpha, int beta, int8_t tc0 )
80 int p2 = pix[-3*xstride];
81 int p1 = pix[-2*xstride];
82 int p0 = pix[-1*xstride];
83 int q0 = pix[ 0*xstride];
84 int q1 = pix[ 1*xstride];
85 int q2 = pix[ 2*xstride];
87 if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta )
91 if( abs( p2 - p0 ) < beta )
94 pix[-2*xstride] = p1 + x264_clip3( (( p2 + ((p0 + q0 + 1) >> 1)) >> 1) - p1, -tc0, tc0 );
97 if( abs( q2 - q0 ) < beta )
100 pix[ 1*xstride] = q1 + x264_clip3( (( q2 + ((p0 + q0 + 1) >> 1)) >> 1) - q1, -tc0, tc0 );
104 delta = x264_clip3( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
105 pix[-1*xstride] = x264_clip_pixel( p0 + delta ); /* p0' */
106 pix[ 0*xstride] = x264_clip_pixel( q0 - delta ); /* q0' */
109 static inline void deblock_luma_c( pixel *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0 )
111 for( int i = 0; i < 4; i++ )
118 for( int d = 0; d < 4; d++, pix += ystride )
119 deblock_edge_luma_c( pix, xstride, alpha, beta, tc0[i] );
122 static void deblock_v_luma_mbaff_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
124 for( int d = 0; d < 8; d++, pix += stride )
125 deblock_edge_luma_c( pix, 1, alpha, beta, tc0[d>>1] );
127 static void deblock_v_luma_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
129 deblock_luma_c( pix, stride, 1, alpha, beta, tc0 );
131 static void deblock_h_luma_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
133 deblock_luma_c( pix, 1, stride, alpha, beta, tc0 );
136 static ALWAYS_INLINE void deblock_edge_chroma_c( pixel *pix, int xstride, int alpha, int beta, int8_t tc )
138 int p1 = pix[-2*xstride];
139 int p0 = pix[-1*xstride];
140 int q0 = pix[ 0*xstride];
141 int q1 = pix[ 1*xstride];
143 if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta )
145 int delta = x264_clip3( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
146 pix[-1*xstride] = x264_clip_pixel( p0 + delta ); /* p0' */
147 pix[ 0*xstride] = x264_clip_pixel( q0 - delta ); /* q0' */
150 static inline void deblock_chroma_c( pixel *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0 )
152 for( int i = 0; i < 4; i++ )
160 for( int d = 0; d < 2; d++, pix += ystride-2 )
161 for( int e = 0; e < 2; e++, pix++ )
162 deblock_edge_chroma_c( pix, xstride, alpha, beta, tc0[i] );
165 static void deblock_v_chroma_mbaff_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
167 for( int i = 0; i < 4; i++, pix += stride )
168 deblock_edge_chroma_c( pix, 2, alpha, beta, tc0[i] );
170 static void deblock_v_chroma_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
172 deblock_chroma_c( pix, stride, 2, alpha, beta, tc0 );
174 static void deblock_h_chroma_c( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 )
176 deblock_chroma_c( pix, 2, stride, alpha, beta, tc0 );
179 static ALWAYS_INLINE void deblock_edge_luma_intra_c( pixel *pix, int xstride, int alpha, int beta )
181 int p2 = pix[-3*xstride];
182 int p1 = pix[-2*xstride];
183 int p0 = pix[-1*xstride];
184 int q0 = pix[ 0*xstride];
185 int q1 = pix[ 1*xstride];
186 int q2 = pix[ 2*xstride];
188 if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta )
190 if( abs( p0 - q0 ) < ((alpha >> 2) + 2) )
192 if( abs( p2 - p0 ) < beta ) /* p0', p1', p2' */
194 const int p3 = pix[-4*xstride];
195 pix[-1*xstride] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3;
196 pix[-2*xstride] = ( p2 + p1 + p0 + q0 + 2 ) >> 2;
197 pix[-3*xstride] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3;
200 pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
201 if( abs( q2 - q0 ) < beta ) /* q0', q1', q2' */
203 const int q3 = pix[3*xstride];
204 pix[0*xstride] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3;
205 pix[1*xstride] = ( p0 + q0 + q1 + q2 + 2 ) >> 2;
206 pix[2*xstride] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3;
209 pix[0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
213 pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
214 pix[ 0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
218 static inline void deblock_luma_intra_c( pixel *pix, int xstride, int ystride, int alpha, int beta )
220 for( int d = 0; d < 16; d++, pix += ystride )
221 deblock_edge_luma_intra_c( pix, xstride, alpha, beta );
223 static void deblock_v_luma_intra_mbaff_c( pixel *pix, int ystride, int alpha, int beta )
225 for( int d = 0; d < 8; d++, pix += ystride )
226 deblock_edge_luma_intra_c( pix, 1, alpha, beta );
228 static void deblock_v_luma_intra_c( pixel *pix, int stride, int alpha, int beta )
230 deblock_luma_intra_c( pix, stride, 1, alpha, beta );
232 static void deblock_h_luma_intra_c( pixel *pix, int stride, int alpha, int beta )
234 deblock_luma_intra_c( pix, 1, stride, alpha, beta );
237 static ALWAYS_INLINE void deblock_edge_chroma_intra_c( pixel *pix, int xstride, int alpha, int beta )
239 int p1 = pix[-2*xstride];
240 int p0 = pix[-1*xstride];
241 int q0 = pix[ 0*xstride];
242 int q1 = pix[ 1*xstride];
244 if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta )
246 pix[-1*xstride] = (2*p1 + p0 + q1 + 2) >> 2; /* p0' */
247 pix[ 0*xstride] = (2*q1 + q0 + p1 + 2) >> 2; /* q0' */
250 static inline void deblock_chroma_intra_c( pixel *pix, int xstride, int ystride, int alpha, int beta, int dir )
252 for( int d = 0; d < (dir?16:8); d++, pix += ystride-2 )
253 for( int e = 0; e < (dir?1:2); e++, pix++ )
254 deblock_edge_chroma_intra_c( pix, xstride, alpha, beta );
256 static void deblock_v_chroma_intra_mbaff_c( pixel *pix, int stride, int alpha, int beta )
258 for( int i = 0; i < 4; i++, pix += stride )
259 deblock_edge_chroma_intra_c( pix, 2, alpha, beta );
261 static void deblock_v_chroma_intra_c( pixel *pix, int stride, int alpha, int beta )
263 deblock_chroma_intra_c( pix, stride, 2, alpha, beta, 1 );
265 static void deblock_h_chroma_intra_c( pixel *pix, int stride, int alpha, int beta )
267 deblock_chroma_intra_c( pix, 2, stride, alpha, beta, 0 );
270 static void deblock_strength_c( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
271 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4], int mvy_limit,
274 for( int dir = 0; dir < 2; dir++ )
276 int s1 = dir ? 1 : 8;
277 int s2 = dir ? 8 : 1;
278 for( int edge = 0; edge < 4; edge++ )
279 for( int i = 0, loc = X264_SCAN8_0+edge*s2; i < 4; i++, loc += s1 )
282 if( nnz[loc] || nnz[locn] )
283 bs[dir][edge][i] = 2;
284 else if( ref[0][loc] != ref[0][locn] ||
285 abs( mv[0][loc][0] - mv[0][locn][0] ) >= 4 ||
286 abs( mv[0][loc][1] - mv[0][locn][1] ) >= mvy_limit ||
287 (bframe && (ref[1][loc] != ref[1][locn] ||
288 abs( mv[1][loc][0] - mv[1][locn][0] ) >= 4 ||
289 abs( mv[1][loc][1] - mv[1][locn][1] ) >= mvy_limit )))
291 bs[dir][edge][i] = 1;
294 bs[dir][edge][i] = 0;
299 static ALWAYS_INLINE void deblock_edge( x264_t *h, pixel *pix, int i_stride, uint8_t bS[4], int i_qp, int a, int b, int b_chroma, x264_deblock_inter_t pf_inter )
301 int index_a = i_qp + a;
302 int index_b = i_qp + b;
303 int alpha = alpha_table(index_a) << (BIT_DEPTH-8);
304 int beta = beta_table(index_b) << (BIT_DEPTH-8);
307 if( !M32(bS) || !alpha || !beta )
310 tc[0] = (tc0_table(index_a)[bS[0]] << (BIT_DEPTH-8)) + b_chroma;
311 tc[1] = (tc0_table(index_a)[bS[1]] << (BIT_DEPTH-8)) + b_chroma;
312 tc[2] = (tc0_table(index_a)[bS[2]] << (BIT_DEPTH-8)) + b_chroma;
313 tc[3] = (tc0_table(index_a)[bS[3]] << (BIT_DEPTH-8)) + b_chroma;
315 pf_inter( pix, i_stride, alpha, beta, tc );
318 static ALWAYS_INLINE void deblock_edge_intra( x264_t *h, pixel *pix, int i_stride, uint8_t bS[4], int i_qp, int a, int b, int b_chroma, x264_deblock_intra_t pf_intra )
320 int index_a = i_qp + a;
321 int index_b = i_qp + b;
322 int alpha = alpha_table(index_a) << (BIT_DEPTH-8);
323 int beta = beta_table(index_b) << (BIT_DEPTH-8);
325 if( !alpha || !beta )
328 pf_intra( pix, i_stride, alpha, beta );
331 void x264_frame_deblock_row( x264_t *h, int mb_y )
333 int b_interlaced = SLICE_MBAFF;
334 int a = h->sh.i_alpha_c0_offset - QP_BD_OFFSET;
335 int b = h->sh.i_beta_offset - QP_BD_OFFSET;
336 int qp_thresh = 15 - X264_MIN( a, b ) - X264_MAX( 0, h->pps->i_chroma_qp_index_offset );
337 int stridey = h->fdec->i_stride[0];
338 int strideuv = h->fdec->i_stride[1];
339 int chroma444 = CHROMA444;
340 intptr_t uvdiff = chroma444 ? h->fdec->plane[2] - h->fdec->plane[1] : 1;
342 for( int mb_x = 0; mb_x < h->mb.i_mb_width; mb_x += (~b_interlaced | mb_y)&1, mb_y ^= b_interlaced )
344 x264_prefetch_fenc( h, h->fdec, mb_x, mb_y );
345 x264_macroblock_cache_load_neighbours_deblock( h, mb_x, mb_y );
347 int mb_xy = h->mb.i_mb_xy;
348 int transform_8x8 = h->mb.mb_transform_size[h->mb.i_mb_xy];
349 int intra_cur = IS_INTRA( h->mb.type[mb_xy] );
350 uint8_t (*bs)[8][4] = h->deblock_strength[mb_y&1][mb_x];
352 pixel *pixy = h->fdec->plane[0] + 16*mb_y*stridey + 16*mb_x;
353 pixel *pixuv = h->fdec->plane[1] + (8<<chroma444)*mb_y*strideuv + 16*mb_x;
355 if( mb_y & MB_INTERLACED )
358 pixuv -= ((8<<chroma444)-1)*strideuv;
361 int stride2y = stridey << MB_INTERLACED;
362 int stride2uv = strideuv << MB_INTERLACED;
363 int qp = h->mb.qp[mb_xy];
364 int qpc = h->chroma_qp_table[qp];
365 int first_edge_only = (h->mb.partition[mb_xy] == D_16x16 && !h->mb.cbp[mb_xy] && !intra_cur) || qp <= qp_thresh;
367 #define FILTER( intra, dir, edge, qp, chroma_qp )\
370 deblock_edge##intra( h, pixy + 4*edge*(dir?stride2y:1),\
371 stride2y, bs[dir][edge], qp, a, b, 0,\
372 h->loopf.deblock_luma##intra[dir] );\
375 deblock_edge##intra( h, pixuv + 4*edge*(dir?stride2uv:1),\
376 stride2uv, bs[dir][edge], chroma_qp, a, b, 0,\
377 h->loopf.deblock_luma##intra[dir] );\
378 deblock_edge##intra( h, pixuv + uvdiff + 4*edge*(dir?stride2uv:1),\
379 stride2uv, bs[dir][edge], chroma_qp, a, b, 0,\
380 h->loopf.deblock_luma##intra[dir] );\
382 else if( !(edge & 1) )\
383 deblock_edge##intra( h, pixuv + 2*edge*(dir?stride2uv:2),\
384 stride2uv, bs[dir][edge], chroma_qp, a, b, 1,\
385 h->loopf.deblock_chroma##intra[dir] );\
388 if( h->mb.i_neighbour & MB_LEFT )
390 if( b_interlaced && h->mb.field[h->mb.i_mb_left_xy[0]] != MB_INTERLACED )
395 x264_deblock_inter_t luma_deblock = h->loopf.deblock_luma_mbaff;
396 x264_deblock_inter_t chroma_deblock = chroma444 ? h->loopf.deblock_luma_mbaff : h->loopf.deblock_chroma_mbaff;
397 x264_deblock_intra_t luma_intra_deblock = h->loopf.deblock_luma_intra_mbaff;
398 x264_deblock_intra_t chroma_intra_deblock = chroma444 ? h->loopf.deblock_luma_intra_mbaff : h->loopf.deblock_chroma_intra_mbaff;
399 int c = chroma444 ? 0 : 1;
401 left_qp[0] = h->mb.qp[h->mb.i_mb_left_xy[0]];
402 luma_qp[0] = (qp + left_qp[0] + 1) >> 1;
403 chroma_qp[0] = (qpc + h->chroma_qp_table[left_qp[0]] + 1) >> 1;
404 if( intra_cur || IS_INTRA( h->mb.type[h->mb.i_mb_left_xy[0]] ) )
406 deblock_edge_intra( h, pixy, 2*stridey, bs[0][0], luma_qp[0], a, b, 0, luma_intra_deblock );
407 deblock_edge_intra( h, pixuv, 2*strideuv, bs[0][0], chroma_qp[0], a, b, c, chroma_intra_deblock );
408 deblock_edge_intra( h, pixuv + uvdiff, 2*strideuv, bs[0][0], chroma_qp[0], a, b, c, chroma_intra_deblock );
412 deblock_edge( h, pixy, 2*stridey, bs[0][0], luma_qp[0], a, b, 0, luma_deblock );
413 deblock_edge( h, pixuv, 2*strideuv, bs[0][0], chroma_qp[0], a, b, c, chroma_deblock );
414 deblock_edge( h, pixuv + uvdiff, 2*strideuv, bs[0][0], chroma_qp[0], a, b, c, chroma_deblock );
417 int offy = MB_INTERLACED ? 4 : 0;
418 int offuv = MB_INTERLACED ? 3 : 0;
419 if( chroma444 ) offuv = offy;
420 left_qp[1] = h->mb.qp[h->mb.i_mb_left_xy[1]];
421 luma_qp[1] = (qp + left_qp[1] + 1) >> 1;
422 chroma_qp[1] = (qpc + h->chroma_qp_table[left_qp[1]] + 1) >> 1;
423 if( intra_cur || IS_INTRA( h->mb.type[h->mb.i_mb_left_xy[1]] ) )
425 deblock_edge_intra( h, pixy + (stridey<<offy), 2*stridey, bs[0][4], luma_qp[1], a, b, 0, luma_intra_deblock );
426 deblock_edge_intra( h, pixuv + (strideuv<<offuv), 2*strideuv, bs[0][4], chroma_qp[1], a, b, c, chroma_intra_deblock );
427 deblock_edge_intra( h, pixuv + uvdiff + (strideuv<<offuv), 2*strideuv, bs[0][4], chroma_qp[1], a, b, c, chroma_intra_deblock );
431 deblock_edge( h, pixy + (stridey<<offy), 2*stridey, bs[0][4], luma_qp[1], a, b, 0, luma_deblock );
432 deblock_edge( h, pixuv + (strideuv<<offuv), 2*strideuv, bs[0][4], chroma_qp[1], a, b, c, chroma_deblock );
433 deblock_edge( h, pixuv + uvdiff + (strideuv<<offuv), 2*strideuv, bs[0][4], chroma_qp[1], a, b, c, chroma_deblock );
438 int qpl = h->mb.qp[h->mb.i_mb_xy-1];
439 int qp_left = (qp + qpl + 1) >> 1;
440 int qpc_left = (qpc + h->chroma_qp_table[qpl] + 1) >> 1;
441 int intra_left = IS_INTRA( h->mb.type[h->mb.i_mb_xy-1] );
443 if( intra_cur || intra_left )
444 FILTER( _intra, 0, 0, qp_left, qpc_left );
446 FILTER( , 0, 0, qp_left, qpc_left );
449 if( !first_edge_only )
451 if( !transform_8x8 ) FILTER( , 0, 1, qp, qpc );
452 FILTER( , 0, 2, qp, qpc );
453 if( !transform_8x8 ) FILTER( , 0, 3, qp, qpc );
456 if( h->mb.i_neighbour & MB_TOP )
458 if( b_interlaced && !(mb_y&1) && !MB_INTERLACED && h->mb.field[h->mb.i_mb_top_xy] )
460 int mbn_xy = mb_xy - 2 * h->mb.i_mb_stride;
462 for( int j = 0; j < 2; j++, mbn_xy += h->mb.i_mb_stride )
464 int qpt = h->mb.qp[mbn_xy];
465 int qp_top = (qp + qpt + 1) >> 1;
466 int qpc_top = (qpc + h->chroma_qp_table[qpt] + 1) >> 1;
467 int intra_top = IS_INTRA( h->mb.type[mbn_xy] );
468 if( intra_cur || intra_top )
469 M32( bs[1][4*j] ) = 0x03030303;
471 // deblock the first horizontal edge of the even rows, then the first horizontal edge of the odd rows
472 deblock_edge( h, pixy + j*stridey, 2* stridey, bs[1][4*j], qp_top, a, b, 0, deblock_v_luma_c );
475 deblock_edge( h, pixuv + j*strideuv, 2*strideuv, bs[1][4*j], qpc_top, a, b, 0, deblock_v_luma_c );
476 deblock_edge( h, pixuv + uvdiff + j*strideuv, 2*strideuv, bs[1][4*j], qpc_top, a, b, 0, deblock_v_luma_c );
479 deblock_edge( h, pixuv + j*strideuv, 2*strideuv, bs[1][4*j], qpc_top, a, b, 1, deblock_v_chroma_c );
484 int qpt = h->mb.qp[h->mb.i_mb_top_xy];
485 int qp_top = (qp + qpt + 1) >> 1;
486 int qpc_top = (qpc + h->chroma_qp_table[qpt] + 1) >> 1;
487 int intra_top = IS_INTRA( h->mb.type[h->mb.i_mb_top_xy] );
489 if( (!b_interlaced || (!MB_INTERLACED && !h->mb.field[h->mb.i_mb_top_xy]))
490 && (intra_cur || intra_top) )
492 FILTER( _intra, 1, 0, qp_top, qpc_top );
496 if( intra_cur || intra_top )
497 M32( bs[1][0] ) = 0x03030303;
498 FILTER( , 1, 0, qp_top, qpc_top );
503 if( !first_edge_only )
505 if( !transform_8x8 ) FILTER( , 1, 1, qp, qpc );
506 FILTER( , 1, 2, qp, qpc );
507 if( !transform_8x8 ) FILTER( , 1, 3, qp, qpc );
514 /* For deblock-aware RD.
516 * deblock macroblock edges
517 * support analysis partitions smaller than 16x16
518 * deblock chroma for 4:2:0
519 * handle duplicate refs correctly
520 * handle cavlc+8x8dct correctly
522 void x264_macroblock_deblock( x264_t *h )
524 int a = h->sh.i_alpha_c0_offset - QP_BD_OFFSET;
525 int b = h->sh.i_beta_offset - QP_BD_OFFSET;
526 int qp_thresh = 15 - X264_MIN( a, b ) - X264_MAX( 0, h->pps->i_chroma_qp_index_offset );
527 int intra_cur = IS_INTRA( h->mb.i_type );
529 int qpc = h->mb.i_chroma_qp;
530 if( (h->mb.i_partition == D_16x16 && !h->mb.i_cbp_luma && !intra_cur) || qp <= qp_thresh )
533 uint8_t (*bs)[8][4] = h->deblock_strength[h->mb.i_mb_y&1][h->mb.i_mb_x];
536 memset( &bs[0][1], 3, 3*4*sizeof(uint8_t) );
537 memset( &bs[1][1], 3, 3*4*sizeof(uint8_t) );
540 h->loopf.deblock_strength( h->mb.cache.non_zero_count, h->mb.cache.ref, h->mb.cache.mv,
541 bs, 4 >> MB_INTERLACED, h->sh.i_type == SLICE_TYPE_B );
543 int transform_8x8 = h->mb.b_transform_8x8;
545 #define FILTER( dir, edge )\
548 deblock_edge( h, h->mb.pic.p_fdec[0] + 4*edge*(dir?FDEC_STRIDE:1),\
549 FDEC_STRIDE, bs[dir][edge], qp, a, b, 0,\
550 h->loopf.deblock_luma[dir] );\
553 deblock_edge( h, h->mb.pic.p_fdec[1] + 4*edge*(dir?FDEC_STRIDE:1),\
554 FDEC_STRIDE, bs[dir][edge], qpc, a, b, 0,\
555 h->loopf.deblock_luma[dir] );\
556 deblock_edge( h, h->mb.pic.p_fdec[2] + 4*edge*(dir?FDEC_STRIDE:1),\
557 FDEC_STRIDE, bs[dir][edge], qpc, a, b, 0,\
558 h->loopf.deblock_luma[dir] );\
562 if( !transform_8x8 ) FILTER( 0, 1 );
564 if( !transform_8x8 ) FILTER( 0, 3 );
566 if( !transform_8x8 ) FILTER( 1, 1 );
568 if( !transform_8x8 ) FILTER( 1, 3 );
574 void x264_deblock_v_luma_sse2( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
575 void x264_deblock_v_luma_avx ( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
576 void x264_deblock_h_luma_sse2( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
577 void x264_deblock_h_luma_avx ( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
578 void x264_deblock_v_chroma_sse2( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
579 void x264_deblock_v_chroma_avx ( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
580 void x264_deblock_h_chroma_sse2( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
581 void x264_deblock_h_chroma_avx ( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
582 void x264_deblock_v_luma_intra_sse2( pixel *pix, int stride, int alpha, int beta );
583 void x264_deblock_v_luma_intra_avx ( pixel *pix, int stride, int alpha, int beta );
584 void x264_deblock_h_luma_intra_sse2( pixel *pix, int stride, int alpha, int beta );
585 void x264_deblock_h_luma_intra_avx ( pixel *pix, int stride, int alpha, int beta );
586 void x264_deblock_v_chroma_intra_sse2( pixel *pix, int stride, int alpha, int beta );
587 void x264_deblock_v_chroma_intra_avx ( pixel *pix, int stride, int alpha, int beta );
588 void x264_deblock_h_chroma_intra_sse2( pixel *pix, int stride, int alpha, int beta );
589 void x264_deblock_h_chroma_intra_avx ( pixel *pix, int stride, int alpha, int beta );
590 void x264_deblock_strength_mmxext( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
591 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4],
592 int mvy_limit, int bframe );
593 void x264_deblock_strength_sse2 ( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
594 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4],
595 int mvy_limit, int bframe );
596 void x264_deblock_strength_ssse3 ( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
597 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4],
598 int mvy_limit, int bframe );
599 void x264_deblock_strength_avx ( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
600 int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4],
601 int mvy_limit, int bframe );
603 void x264_deblock_h_luma_mmxext( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
604 void x264_deblock_v8_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
605 void x264_deblock_v_chroma_mmxext( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
606 void x264_deblock_h_chroma_mmxext( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
607 void x264_deblock_h_luma_intra_mmxext( pixel *pix, int stride, int alpha, int beta );
608 void x264_deblock_v8_luma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta );
609 void x264_deblock_v_chroma_intra_mmxext( pixel *pix, int stride, int alpha, int beta );
610 void x264_deblock_h_chroma_intra_mmxext( pixel *pix, int stride, int alpha, int beta );
613 void x264_deblock_v_luma_mmxext( pixel *pix, int stride, int alpha, int beta, int8_t *tc0 );
614 void x264_deblock_v_luma_intra_mmxext( pixel *pix, int stride, int alpha, int beta );
616 // FIXME this wrapper has a significant cpu cost
617 static void x264_deblock_v_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
619 x264_deblock_v8_luma_mmxext( pix, stride, alpha, beta, tc0 );
620 x264_deblock_v8_luma_mmxext( pix+8, stride, alpha, beta, tc0+2 );
622 static void x264_deblock_v_luma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta )
624 x264_deblock_v8_luma_intra_mmxext( pix, stride, alpha, beta );
625 x264_deblock_v8_luma_intra_mmxext( pix+8, stride, alpha, beta );
627 #endif // HIGH_BIT_DEPTH
632 void x264_deblock_v_luma_altivec( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
633 void x264_deblock_h_luma_altivec( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 );
637 void x264_deblock_v_luma_neon( uint8_t *, int, int, int, int8_t * );
638 void x264_deblock_h_luma_neon( uint8_t *, int, int, int, int8_t * );
639 void x264_deblock_v_chroma_neon( uint8_t *, int, int, int, int8_t * );
640 void x264_deblock_h_chroma_neon( uint8_t *, int, int, int, int8_t * );
643 void x264_deblock_init( int cpu, x264_deblock_function_t *pf, int b_mbaff )
645 pf->deblock_luma[1] = deblock_v_luma_c;
646 pf->deblock_luma[0] = deblock_h_luma_c;
647 pf->deblock_chroma[1] = deblock_v_chroma_c;
648 pf->deblock_chroma[0] = deblock_h_chroma_c;
649 pf->deblock_luma_intra[1] = deblock_v_luma_intra_c;
650 pf->deblock_luma_intra[0] = deblock_h_luma_intra_c;
651 pf->deblock_chroma_intra[1] = deblock_v_chroma_intra_c;
652 pf->deblock_chroma_intra[0] = deblock_h_chroma_intra_c;
653 pf->deblock_luma_mbaff = deblock_v_luma_mbaff_c;
654 pf->deblock_chroma_mbaff = deblock_v_chroma_mbaff_c;
655 pf->deblock_luma_intra_mbaff = deblock_v_luma_intra_mbaff_c;
656 pf->deblock_chroma_intra_mbaff = deblock_v_chroma_intra_mbaff_c;
657 pf->deblock_strength = deblock_strength_c;
660 if( cpu&X264_CPU_MMXEXT )
663 pf->deblock_luma[1] = x264_deblock_v_luma_mmxext;
664 pf->deblock_luma[0] = x264_deblock_h_luma_mmxext;
665 pf->deblock_chroma[1] = x264_deblock_v_chroma_mmxext;
666 pf->deblock_chroma[0] = x264_deblock_h_chroma_mmxext;
667 pf->deblock_luma_intra[1] = x264_deblock_v_luma_intra_mmxext;
668 pf->deblock_luma_intra[0] = x264_deblock_h_luma_intra_mmxext;
669 pf->deblock_chroma_intra[1] = x264_deblock_v_chroma_intra_mmxext;
670 pf->deblock_chroma_intra[0] = x264_deblock_h_chroma_intra_mmxext;
672 pf->deblock_strength = x264_deblock_strength_mmxext;
673 if( cpu&X264_CPU_SSE2 )
675 pf->deblock_strength = x264_deblock_strength_sse2;
676 if( !(cpu&X264_CPU_STACK_MOD4) )
678 pf->deblock_luma[1] = x264_deblock_v_luma_sse2;
679 pf->deblock_luma[0] = x264_deblock_h_luma_sse2;
680 pf->deblock_chroma[1] = x264_deblock_v_chroma_sse2;
681 pf->deblock_chroma[0] = x264_deblock_h_chroma_sse2;
682 pf->deblock_luma_intra[1] = x264_deblock_v_luma_intra_sse2;
683 pf->deblock_luma_intra[0] = x264_deblock_h_luma_intra_sse2;
684 pf->deblock_chroma_intra[1] = x264_deblock_v_chroma_intra_sse2;
685 pf->deblock_chroma_intra[0] = x264_deblock_h_chroma_intra_sse2;
688 if( cpu&X264_CPU_SSSE3 )
689 pf->deblock_strength = x264_deblock_strength_ssse3;
690 if( cpu&X264_CPU_AVX )
692 pf->deblock_strength = x264_deblock_strength_avx;
693 if( !(cpu&X264_CPU_STACK_MOD4) )
695 pf->deblock_luma[1] = x264_deblock_v_luma_avx;
696 pf->deblock_luma[0] = x264_deblock_h_luma_avx;
697 pf->deblock_chroma[1] = x264_deblock_v_chroma_avx;
698 pf->deblock_chroma[0] = x264_deblock_h_chroma_avx;
699 pf->deblock_luma_intra[1] = x264_deblock_v_luma_intra_avx;
700 pf->deblock_luma_intra[0] = x264_deblock_h_luma_intra_avx;
701 pf->deblock_chroma_intra[1] = x264_deblock_v_chroma_intra_avx;
702 pf->deblock_chroma_intra[0] = x264_deblock_h_chroma_intra_avx;
710 if( cpu&X264_CPU_ALTIVEC )
712 pf->deblock_luma[1] = x264_deblock_v_luma_altivec;
713 pf->deblock_luma[0] = x264_deblock_h_luma_altivec;
715 #endif // HAVE_ALTIVEC
718 if( cpu&X264_CPU_NEON )
720 pf->deblock_luma[1] = x264_deblock_v_luma_neon;
721 pf->deblock_luma[0] = x264_deblock_h_luma_neon;
722 // pf->deblock_chroma[1] = x264_deblock_v_chroma_neon;
723 // pf->deblock_chroma[0] = x264_deblock_h_chroma_neon;
726 #endif // !HIGH_BIT_DEPTH