1 ;*****************************************************************************
2 ;* MMX/SSE2/AVX-optimized H.264 deblocking code
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2011 x264 project
6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
7 ;* Fiona Glaser <fiona@x264.com>
8 ;* Oskar Arvidsson <oskar@irock.se>
10 ;* This file is part of FFmpeg.
12 ;* FFmpeg is free software; you can redistribute it and/or
13 ;* modify it under the terms of the GNU Lesser General Public
14 ;* License as published by the Free Software Foundation; either
15 ;* version 2.1 of the License, or (at your option) any later version.
17 ;* FFmpeg is distributed in the hope that it will be useful,
18 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
19 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 ;* Lesser General Public License for more details.
22 ;* You should have received a copy of the GNU Lesser General Public
23 ;* License along with FFmpeg; if not, write to the Free Software
24 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 ;******************************************************************************
27 %include "libavutil/x86/x86util.asm"
31 pb_A1: times 16 db 0xA1
32 pb_3_1: times 4 db 3, 1
40 %define PASS8ROWS(base, base3, stride, stride3, offset) \
41 PASS8ROWS(base+offset, base3+offset, stride, stride3)
43 ; in: 8 rows of 4 bytes in %4..%11
44 ; out: 4 rows of 8 bytes in m0..m3
45 %macro TRANSPOSE4x8_LOAD 11
72 ; in: 4 rows of 8 bytes in m0..m3
73 ; out: 8 rows of 4 bytes in %1..%8
74 %macro TRANSPOSE8x4B_STORE 8
103 %macro TRANSPOSE4x8B_LOAD 8
104 TRANSPOSE4x8_LOAD bw, wd, dq, %1, %2, %3, %4, %5, %6, %7, %8
112 ; in: 8 rows of 8 (only the middle 6 pels are used) in %1..%8
113 ; out: 6 rows of 8 in [%9+0*16] .. [%9+5*16]
114 %macro TRANSPOSE6x8_MEM 9
123 SBUTTERFLY bw, 0, 1, 7
124 SBUTTERFLY bw, 2, 3, 7
125 SBUTTERFLY bw, 4, 5, 7
127 SBUTTERFLY3 bw, m6, %8, m7
128 SBUTTERFLY wd, 0, 2, 3
129 SBUTTERFLY wd, 4, 6, 3
132 SBUTTERFLY3 wd, m1, [%9+0x10], m3
133 SBUTTERFLY wd, 5, 7, 0
134 SBUTTERFLY dq, 1, 5, 0
135 SBUTTERFLY dq, 2, 6, 0
145 ; in: 8 rows of 8 in %1..%8
146 ; out: 8 rows of 8 in %9..%16
147 %macro TRANSPOSE8x8_MEM 16
156 SBUTTERFLY bw, 0, 1, 7
157 SBUTTERFLY bw, 2, 3, 7
158 SBUTTERFLY bw, 4, 5, 7
159 SBUTTERFLY3 bw, m6, %8, m7
161 SBUTTERFLY wd, 0, 2, 5
162 SBUTTERFLY wd, 4, 6, 5
163 SBUTTERFLY wd, 1, 3, 5
166 SBUTTERFLY wd, 6, 7, 5
167 SBUTTERFLY dq, 0, 4, 5
168 SBUTTERFLY dq, 1, 6, 5
173 SBUTTERFLY3 dq, m2, %11, m0
174 SBUTTERFLY dq, 3, 7, 4
182 ; out: %4 = |%1-%2|>%3
198 ; out: %4 = |%1-%2|>%3
215 ; in: m0=p1 m1=p0 m2=q0 m3=q1 %1=alpha-1 %2=beta-1
216 ; out: m5=beta-1, m7=mask, %3=alpha-1
223 packuswb m4, m4 ; 16x alpha-1
224 packuswb m5, m5 ; 16x beta-1
228 DIFF_GT m1, m2, m4, m7, m6 ; |p0-q0| > alpha-1
229 DIFF_GT m0, m1, m5, m4, m6 ; |p1-p0| > beta-1
231 DIFF_GT m3, m2, m5, m4, m6 ; |q1-q0| > beta-1
237 ; in: m0=p1 m1=p0 m2=q0 m3=q1 m7=(tc&mask)
240 %macro DEBLOCK_P0_Q0 0
242 pxor m5, m1, m2 ; p0^q0
244 pand m5, [pb_1] ; (p0^q0)&1
245 pavgb m3, m0 ; (p1 - q1 + 256)>>1
247 pavgb m3, [pb_3] ; (((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2
248 pavgb m4, m2 ; (q0 - p0 + 256)>>1
251 paddusb m3, m4 ; d+128+33
263 ; %1=p1 %2=q2 %3=[q2] %4=[q1] %5=tc0 %6=tmp
264 ; out: [q1] = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
265 ; clobbers: q2, tmp, tc0
268 pavgb %2, %6 ; avg(p2,avg(p0,q0))
270 pand %6, [pb_1] ; (p2^avg(p0,q0))&1
271 psubusb %2, %6 ; (p2+((p0+q0+1)>>1))>>1
280 ;-----------------------------------------------------------------------------
281 ; void ff_deblock_v_luma(uint8_t *pix, int stride, int alpha, int beta,
283 ;-----------------------------------------------------------------------------
284 %macro DEBLOCK_LUMA 0
285 cglobal deblock_v_luma_8, 5,5,10, pix_, stride_, alpha_, beta_, base3_
288 dec alpha_d ; alpha-1
291 add base3_q, pix_q ; pix-3*stride
293 mova m0, [base3_q + stride_q] ; p1
294 mova m1, [base3_q + 2*stride_q] ; p0
295 mova m2, [pix_q] ; q0
296 mova m3, [pix_q + stride_q] ; q1
300 punpcklbw m8, m8 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
306 movdqa m3, [base3_q] ; p2
307 DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
311 LUMA_Q1 m0, m3, [base3_q], [base3_q + stride_q], m6, m4
313 movdqa m4, [pix_q + 2*stride_q] ; q2
314 DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
318 mova m3, [pix_q + stride_q]
319 LUMA_Q1 m3, m4, [pix_q + 2*stride_q], [pix_q + stride_q], m8, m6
322 mova [base3_q + 2*stride_q], m1
326 ;-----------------------------------------------------------------------------
327 ; void ff_deblock_h_luma(uint8_t *pix, int stride, int alpha, int beta,
329 ;-----------------------------------------------------------------------------
331 cglobal deblock_h_luma_8, 5,9,0,0x60+16*WIN64
337 %define pix_tmp rsp+0x30 ; shadow space + r4
342 ; transpose 6x16 -> tmp space
343 TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r7, r8), pix_tmp
346 TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r7, r8), pix_tmp+8
349 ; alpha, beta, tc0 are still in r2d, r3d, r4
350 ; don't backup r6, r5, r7, r8 because deblock_v_luma_sse2 doesn't use them
351 lea r0, [pix_tmp+0x30]
356 call deblock_v_luma_8
358 ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
361 movq m0, [pix_tmp+0x18]
362 movq m1, [pix_tmp+0x28]
363 movq m2, [pix_tmp+0x38]
364 movq m3, [pix_tmp+0x48]
365 TRANSPOSE8x4B_STORE PASS8ROWS(r6, r5, r7, r8)
371 movq m0, [pix_tmp+0x10]
372 movq m1, [pix_tmp+0x20]
373 movq m2, [pix_tmp+0x30]
374 movq m3, [pix_tmp+0x40]
375 TRANSPOSE8x4B_STORE PASS8ROWS(r6, r5, r7, r8)
380 %macro DEBLOCK_H_LUMA_MBAFF 0
382 cglobal deblock_h_luma_mbaff_8, 5, 9, 10, 8*16, pix_, stride_, alpha_, beta_, tc0_, base3_, stride3_
383 movsxd stride_q, stride_d
387 lea stride3_q, [3*stride_q]
388 add base3_q, stride3_q
391 movq m1, [pix_q + stride_q - 4]
392 movq m2, [pix_q + 2*stride_q - 4]
393 movq m3, [base3_q - 4]
394 movq m4, [base3_q + stride_q - 4]
395 movq m5, [base3_q + 2*stride_q - 4]
396 movq m6, [base3_q + stride3_q - 4]
397 movq m7, [base3_q + 4*stride_q - 4]
399 TRANSPOSE_8X8B 0,1,2,3,4,5,6,7
403 movq [rsp + 16*i], m %+ i
419 LOAD_MASK alpha_d, beta_d
427 movdqa m3, [rsp + 16] ; p2
428 DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
432 LUMA_Q1 m0, m3, [rsp + 16], [rsp + 32], m6, m4
434 movdqa m4, [rsp + 96] ; q2
435 DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
440 LUMA_Q1 m3, m4, [rsp + 96], [rsp + 80], m8, m6
452 TRANSPOSE_8X8B 0,1,2,3,4,5,6,7
454 movq [pix_q + stride_q - 4], m1
455 movq [pix_q + 2*stride_q - 4], m2
456 movq [base3_q - 4], m3
457 movq [base3_q + stride_q - 4], m4
458 movq [base3_q + 2*stride_q - 4], m5
459 movq [base3_q + stride3_q - 4], m6
460 movq [base3_q + 4*stride_q - 4], m7
470 %if HAVE_AVX_EXTERNAL
478 %macro DEBLOCK_LUMA 2
479 ;-----------------------------------------------------------------------------
480 ; void ff_deblock_v8_luma(uint8_t *pix, int stride, int alpha, int beta,
482 ;-----------------------------------------------------------------------------
483 cglobal deblock_%1_luma_8, 5,5,8,2*%2
488 add r4, r0 ; pix-3*stride
490 mova m0, [r4+r1] ; p1
491 mova m1, [r4+2*r1] ; p0
493 mova m3, [r0+r1] ; q1
500 punpcklbw m4, m4 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
501 mova [esp+%2], m4 ; tc
505 mova [esp], m4 ; mask
507 DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
509 pand m4, [esp+%2] ; tc
512 LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
514 mova m4, [r0+2*r1] ; q2
515 DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
516 pand m6, [esp] ; mask
517 mova m5, [esp+%2] ; tc
521 LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m5, m6
528 ;-----------------------------------------------------------------------------
529 ; void ff_deblock_h_luma(uint8_t *pix, int stride, int alpha, int beta,
531 ;-----------------------------------------------------------------------------
533 cglobal deblock_h_luma_8, 0,5,8,0x60+12
539 %define pix_tmp esp+12
541 ; transpose 6x16 -> tmp space
542 TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp
545 TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp+8
548 lea r0, [pix_tmp+0x30]
554 call deblock_%1_luma_8
556 add dword [esp ], 8 ; pix_tmp+0x38
557 add dword [esp+16], 2 ; tc0+2
558 call deblock_%1_luma_8
562 ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
566 movq m0, [pix_tmp+0x10]
567 movq m1, [pix_tmp+0x20]
569 movq m2, [pix_tmp+0x30]
570 movq m3, [pix_tmp+0x40]
571 TRANSPOSE8x4B_STORE PASS8ROWS(r0, r1, r3, r4)
575 movq m0, [pix_tmp+0x18]
576 movq m1, [pix_tmp+0x28]
577 movq m2, [pix_tmp+0x38]
578 movq m3, [pix_tmp+0x48]
579 TRANSPOSE8x4B_STORE PASS8ROWS(r0, r1, r3, r4)
582 %endmacro ; DEBLOCK_LUMA
588 %if HAVE_AVX_EXTERNAL
597 %macro LUMA_INTRA_P012 4 ; p0..p3 in memory
607 pavgb t0, t1 ; ((p2+p1+1)/2 + (p0+q0+1)/2 + 1)/2
625 psubb t0, t2 ; p1' = (p2+p1+p0+q0+2)/4;
637 psubb t3, t2 ; p2+2*p1+2*p0+2*q0+q1
641 pavgb t1, t5 ; (((p2+q1)/2 + p1+1)/2 + (p0+q0+1)/2 + 1)/2
646 psubb t1, t3 ; p0'a = (p2+2*p1+2*p0+2*q0+q1+4)/8
652 pavgb t2, p1 ; p0'b = (2*p1+p0+q0+2)/4
660 mova %1, t1 ; store p0
665 pavgb t1, t0 ; (p3+p2+1)/2 + (p2+p1+p0+q0+2)/4
667 paddb t2, t4 ; 2*p3+3*p2+p1+p0+q0
672 psubb t1, t2 ; p2' = (2*p3+3*p2+p1+p0+q0+4)/8
680 mova %2, t0 ; store p1
681 mova %3, t1 ; store p2
684 %macro LUMA_INTRA_SWAP_PQ 0
690 %define mask1p mask1q
693 %macro DEBLOCK_LUMA_INTRA 1
712 %define mask1q [rsp-24]
717 %define spill(x) [esp+16*x]
722 %define mask0 spill(2)
723 %define mask1p spill(3)
724 %define mask1q spill(4)
729 ;-----------------------------------------------------------------------------
730 ; void ff_deblock_v_luma_intra(uint8_t *pix, int stride, int alpha, int beta)
731 ;-----------------------------------------------------------------------------
733 cglobal deblock_%1_luma_intra_8, 4,6,16,0x10
735 cglobal deblock_%1_luma_intra_8, 4,6,16,ARCH_X86_64*0x50-0x50
738 lea r5, [r1*3] ; 3*stride
744 add r4, r0 ; pix-4*stride
752 LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
753 SWAP 7, 12 ; m12=mask0
755 pavgb t5, mpb_1 ; alpha/4+1
758 DIFF_GT2 p0, q0, t5, t0, t3 ; t0 = |p0-q0| > alpha/4+1
759 DIFF_GT2 p0, p2, m5, t2, t5 ; mask1 = |p2-p0| > beta-1
760 DIFF_GT2 q0, q2, m5, t4, t5 ; t4 = |q2-q0| > beta-1
767 LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
771 pavgb m4, [pb_1] ; alpha/4+1
772 DIFF_GT2 p0, q0, m4, m6, m7 ; m6 = |p0-q0| > alpha/4+1
774 DIFF_GT2 p0, p2, m5, m4, m7 ; m4 = |p2-p0| > beta-1
777 DIFF_GT2 q0, q2, m5, m4, m7 ; m4 = |q2-q0| > beta-1
781 LUMA_INTRA_P012 [r4+r5], [r4+2*r1], [r4+r1], [r4]
783 LUMA_INTRA_P012 [r0], [r0+r1], [r0+2*r1], [r0+r5]
789 ;-----------------------------------------------------------------------------
790 ; void ff_deblock_h_luma_intra(uint8_t *pix, int stride, int alpha, int beta)
791 ;-----------------------------------------------------------------------------
792 cglobal deblock_h_luma_intra_8, 4,9,0,0x80
798 %define pix_tmp rsp+0x20 ; shadow space
803 ; transpose 8x16 -> tmp space
804 TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r7, r8), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
807 TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r7, r8), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
809 lea r0, [pix_tmp+0x40]
811 call deblock_v_luma_intra_8
813 ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
815 TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r6, r5, r7, r8)
820 TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r6, r5, r7, r8)
823 cglobal deblock_h_luma_intra_8, 2,4,8,0x80
829 ; transpose 8x16 -> tmp space
830 TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
833 TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
835 lea r0, [pix_tmp+0x40]
840 call deblock_%1_luma_intra_8
842 add dword [rsp], 8 ; pix_tmp+8
843 call deblock_%1_luma_intra_8
852 ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
853 TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
856 TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
859 %endmacro ; DEBLOCK_LUMA_INTRA
863 %if HAVE_AVX_EXTERNAL
869 DEBLOCK_LUMA_INTRA v8
874 %macro CHROMA_V_START 0
882 %macro CHROMA_H_START 0
894 ;-----------------------------------------------------------------------------
895 ; void ff_deblock_v_chroma(uint8_t *pix, int stride, int alpha, int beta,
897 ;-----------------------------------------------------------------------------
898 cglobal deblock_v_chroma_8, 5,6
904 call ff_chroma_inter_body_mmxext
909 ;-----------------------------------------------------------------------------
910 ; void ff_deblock_h_chroma(uint8_t *pix, int stride, int alpha, int beta,
912 ;-----------------------------------------------------------------------------
913 cglobal deblock_h_chroma_8, 5,7
915 ; This could use the red zone on 64 bit unix to avoid the stack pointer
916 ; readjustment, but valgrind assumes the red zone is clobbered on
917 ; function calls and returns.
926 TRANSPOSE4x8_LOAD bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
936 TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
943 ff_chroma_inter_body_mmxext:
954 cglobal deblock_h_chroma422_8, 5, 6
955 SUB rsp, (1+ARCH_X86_64*2)*mmsize
957 %define buf0 [rsp+16]
969 TRANSPOSE4x8B_LOAD PASS8ROWS(t5, r0, r1, t6)
979 TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
984 TRANSPOSE4x8B_LOAD PASS8ROWS(t5, r0, r1, t6)
994 TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
995 ADD rsp, (1+ARCH_X86_64*2)*mmsize
998 ; in: %1=p0 %2=p1 %3=q1
999 ; out: p0 = (p0 + q1 + 2*p1 + 2) >> 2
1000 %macro CHROMA_INTRA_P0 3
1003 pand m4, [pb_1] ; m4 = (p0^q1)&1
1006 pavgb %1, %2 ; dst = avg(p1, avg(p0,q1) - ((p0^q1)&1))
1009 ;------------------------------------------------------------------------------
1010 ; void ff_deblock_v_chroma_intra(uint8_t *pix, int stride, int alpha, int beta)
1011 ;------------------------------------------------------------------------------
1012 cglobal deblock_v_chroma_intra_8, 4,5
1018 call ff_chroma_intra_body_mmxext
1023 ;------------------------------------------------------------------------------
1024 ; void ff_deblock_h_chroma_intra(uint8_t *pix, int stride, int alpha, int beta)
1025 ;------------------------------------------------------------------------------
1026 cglobal deblock_h_chroma_intra_8, 4,6
1028 TRANSPOSE4x8_LOAD bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
1029 call ff_chroma_intra_body_mmxext
1030 TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
1033 cglobal deblock_h_chroma422_intra_8, 4, 6
1035 TRANSPOSE4x8_LOAD bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
1036 call ff_chroma_intra_body_mmxext
1037 TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
1042 TRANSPOSE4x8_LOAD bw, wd, dq, PASS8ROWS(t5, r0, r1, t6)
1043 call ff_chroma_intra_body_mmxext
1044 TRANSPOSE8x4B_STORE PASS8ROWS(t5, r0, r1, t6)
1048 ff_chroma_intra_body_mmxext:
1052 CHROMA_INTRA_P0 m1, m0, m3
1053 CHROMA_INTRA_P0 m2, m3, m0
1062 %macro LOAD_8_ROWS 8
1073 %macro STORE_8_ROWS 8
1084 %macro TRANSPOSE_8x4B_XMM 0
1091 punpckhdq m2, m0, m4
1097 %macro TRANSPOSE_4x8B_XMM 0
1100 punpckhwd m4, m0, m2
1110 %macro CHROMA_INTER_BODY_XMM 1
1111 LOAD_MASK alpha_d, beta_d
1120 %macro CHROMA_INTRA_BODY_XMM 0
1121 LOAD_MASK alpha_d, beta_d
1142 %macro CHROMA_V_START_XMM 1
1143 movsxdifnidn stride_q, stride_d
1151 %macro CHROMA_H_START_XMM 2
1152 movsxdifnidn stride_q, stride_d
1155 lea %2, [3*stride_q]
1160 %macro DEBLOCK_CHROMA_XMM 1
1164 cglobal deblock_v_chroma_8, 5, 6, 8, pix_, stride_, alpha_, beta_, tc0_
1165 CHROMA_V_START_XMM r5
1167 movq m1, [r5 + stride_q]
1169 movq m3, [pix_q + stride_q]
1170 CHROMA_INTER_BODY_XMM 1
1171 movq [r5 + stride_q], m1
1175 cglobal deblock_h_chroma_8, 5, 7, 8, 0-16, pix_, stride_, alpha_, beta_, tc0_
1176 CHROMA_H_START_XMM r5, r6
1177 LOAD_8_ROWS PASS8ROWS(pix_q - 2, r5 - 2, stride_q, r6)
1181 CHROMA_INTER_BODY_XMM 1
1185 STORE_8_ROWS PASS8ROWS(pix_q - 2, r5 - 2, stride_q, r6)
1188 cglobal deblock_h_chroma422_8, 5, 7, 8, 0-16, pix_, stride_, alpha_, beta_, tc0_,
1189 CHROMA_H_START_XMM r5, r6
1190 LOAD_8_ROWS PASS8ROWS(pix_q - 2, r5 - 2, stride_q, r6)
1194 CHROMA_INTER_BODY_XMM 2
1198 STORE_8_ROWS PASS8ROWS(pix_q - 2, r5 - 2, stride_q, r6)
1200 lea pix_q, [pix_q + 8*stride_q]
1201 lea r5, [r5 + 8*stride_q]
1204 LOAD_8_ROWS PASS8ROWS(pix_q - 2, r5 - 2, stride_q, r6)
1208 CHROMA_INTER_BODY_XMM 2
1212 STORE_8_ROWS PASS8ROWS(pix_q - 2, r5 - 2, stride_q, r6)
1215 cglobal deblock_v_chroma_intra_8, 4, 5, 8, pix_, stride_, alpha_, beta_
1216 CHROMA_V_START_XMM r4
1218 movq m1, [r4 + stride_q]
1220 movq m3, [pix_q + stride_q]
1221 CHROMA_INTRA_BODY_XMM
1222 movq [r4 + stride_q], m1
1226 cglobal deblock_h_chroma_intra_8, 4, 6, 8, pix_, stride_, alpha_, beta_
1227 CHROMA_H_START_XMM r4, r5
1228 LOAD_8_ROWS PASS8ROWS(pix_q - 2, r4 - 2, stride_q, r5)
1230 CHROMA_INTRA_BODY_XMM
1232 STORE_8_ROWS PASS8ROWS(pix_q - 2, r4 - 2, stride_q, r5)
1235 cglobal deblock_h_chroma422_intra_8, 4, 6, 8, pix_, stride_, alpha_, beta_
1236 CHROMA_H_START_XMM r4, r5
1237 LOAD_8_ROWS PASS8ROWS(pix_q - 2, r4 - 2, stride_q, r5)
1239 CHROMA_INTRA_BODY_XMM
1241 STORE_8_ROWS PASS8ROWS(pix_q - 2, r4 - 2, stride_q, r5)
1243 lea pix_q, [pix_q + 8*stride_q]
1244 lea r4, [r4 + 8*stride_q]
1246 LOAD_8_ROWS PASS8ROWS(pix_q - 2, r4 - 2, stride_q, r5)
1248 CHROMA_INTRA_BODY_XMM
1250 STORE_8_ROWS PASS8ROWS(pix_q - 2, r4 - 2, stride_q, r5)
1253 %endmacro ; DEBLOCK_CHROMA_XMM
1255 DEBLOCK_CHROMA_XMM sse2
1256 DEBLOCK_CHROMA_XMM avx
1258 ;-----------------------------------------------------------------------------
1259 ; void ff_h264_loop_filter_strength(int16_t bs[2][4][4], uint8_t nnz[40],
1260 ; int8_t ref[2][40], int16_t mv[2][40][2],
1261 ; int bidir, int edges, int step,
1262 ; int mask_mv0, int mask_mv1, int field);
1267 ; mask_mv0 is 0 or 3
1268 ; mask_mv1 is 0 or 1
1270 ;-----------------------------------------------------------------------------
1271 %macro loop_filter_strength_iteration 7 ; edges, step, mask_mv,
1272 ; dir, d_idx, mask_dir, bidir
1280 xor b_idxd, b_idxd ; for (b_idx = 0; b_idx < edges; b_idx += step)
1285 test b_idxd, dword mask_mvd
1286 jnz %%.skip_loop_iter ; if (!(b_idx & mask_mv))
1288 movd m2, [refq+b_idxq+d_idx+12] ; { ref0[bn] }
1289 punpckldq m2, [refq+b_idxq+d_idx+52] ; { ref0[bn], ref1[bn] }
1290 pshufw m0, [refq+b_idxq+12], 0x44 ; { ref0[b], ref0[b] }
1291 pshufw m1, [refq+b_idxq+52], 0x44 ; { ref1[b], ref1[b] }
1292 pshufw m3, m2, 0x4E ; { ref1[bn], ref0[bn] }
1293 psubb m0, m2 ; { ref0[b] != ref0[bn],
1294 ; ref0[b] != ref1[bn] }
1295 psubb m1, m3 ; { ref1[b] != ref1[bn],
1296 ; ref1[b] != ref0[bn] }
1299 mova m1, [mvq+b_idxq*4+(d_idx+12)*4]
1300 mova m2, [mvq+b_idxq*4+(d_idx+12)*4+mmsize]
1303 psubw m1, [mvq+b_idxq*4+12*4]
1304 psubw m2, [mvq+b_idxq*4+12*4+mmsize]
1305 psubw m3, [mvq+b_idxq*4+52*4]
1306 psubw m4, [mvq+b_idxq*4+52*4+mmsize]
1311 psubusb m1, m5 ; abs(mv[b] - mv[bn]) >= limit
1316 mova m1, [mvq+b_idxq*4+(d_idx+52)*4]
1317 mova m2, [mvq+b_idxq*4+(d_idx+52)*4+mmsize]
1320 psubw m1, [mvq+b_idxq*4+12*4]
1321 psubw m2, [mvq+b_idxq*4+12*4+mmsize]
1322 psubw m3, [mvq+b_idxq*4+52*4]
1323 psubw m4, [mvq+b_idxq*4+52*4+mmsize]
1328 psubusb m1, m5 ; abs(mv[b] - mv[bn]) >= limit
1337 movd m0, [refq+b_idxq+12]
1338 psubb m0, [refq+b_idxq+d_idx+12] ; ref[b] != ref[bn]
1340 mova m1, [mvq+b_idxq*4+12*4]
1341 mova m2, [mvq+b_idxq*4+12*4+mmsize]
1342 psubw m1, [mvq+b_idxq*4+(d_idx+12)*4]
1343 psubw m2, [mvq+b_idxq*4+(d_idx+12)*4+mmsize]
1346 psubusb m1, m5 ; abs(mv[b] - mv[bn]) >= limit
1349 %endif ; bidir == 1/0
1352 movd m1, [nnzq+b_idxq+12]
1353 por m1, [nnzq+b_idxq+d_idx+12] ; nnz[b] || nnz[bn]
1361 movq [bsq+b_idxq+32*dir], m1
1363 add b_idxd, dword stepd
1364 cmp b_idxd, dword edgesd
1369 cglobal h264_loop_filter_strength, 9, 9, 0, bs, nnz, ref, mv, bidir, edges, \
1370 step, mask_mv0, mask_mv1, field
1371 %define b_idxq bidirq
1372 %define b_idxd bidird
1385 %define mask_mv0d mask_mv0m
1386 %define mask_mv1d mask_mv1m
1388 shl dword mask_mv1d, 3
1389 shl dword mask_mv0d, 3
1393 loop_filter_strength_iteration edgesd, stepd, mask_mv1d, 1, -8, 0, 0
1394 loop_filter_strength_iteration 32, 8, mask_mv0d, 0, -1, -1, 0
1396 mova m0, [bsq+mmsize*0]
1397 mova m1, [bsq+mmsize*1]
1398 mova m2, [bsq+mmsize*2]
1399 mova m3, [bsq+mmsize*3]
1400 TRANSPOSE4x4W 0, 1, 2, 3, 4
1401 mova [bsq+mmsize*0], m0
1402 mova [bsq+mmsize*1], m1
1403 mova [bsq+mmsize*2], m2
1404 mova [bsq+mmsize*3], m3
1408 loop_filter_strength_iteration edgesd, stepd, mask_mv1d, 1, -8, 0, 1
1409 loop_filter_strength_iteration 32, 8, mask_mv0d, 0, -1, -1, 1
1411 mova m0, [bsq+mmsize*0]
1412 mova m1, [bsq+mmsize*1]
1413 mova m2, [bsq+mmsize*2]
1414 mova m3, [bsq+mmsize*3]
1415 TRANSPOSE4x4W 0, 1, 2, 3, 4
1416 mova [bsq+mmsize*0], m0
1417 mova [bsq+mmsize*1], m1
1418 mova [bsq+mmsize*2], m2
1419 mova [bsq+mmsize*3], m3