1 ;*****************************************************************************
2 ;* MMX/SSE2/AVX-optimized 10-bit H.264 deblocking code
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2011 x264 project
6 ;* Authors: Oskar Arvidsson <oskar@irock.se>
7 ;* Loren Merritt <lorenm@u.washington.edu>
8 ;* Fiona Glaser <fiona@x264.com>
10 ;* This file is part of FFmpeg.
12 ;* FFmpeg is free software; you can redistribute it and/or
13 ;* modify it under the terms of the GNU Lesser General Public
14 ;* License as published by the Free Software Foundation; either
15 ;* version 2.1 of the License, or (at your option) any later version.
17 ;* FFmpeg is distributed in the hope that it will be useful,
18 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
19 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 ;* Lesser General Public License for more details.
22 ;* You should have received a copy of the GNU Lesser General Public
23 ;* License along with FFmpeg; if not, write to the Free Software
24 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 ;******************************************************************************
27 %include "libavutil/x86/x86util.asm"
31 pw_pixel_max: times 8 dw ((1 << 10)-1)
39 ; out: %4 = |%1-%2|-%3
48 ; out: %4 = |%1-%2|<%3
54 psubw %5, %3 ; |%1-%2|-%3
55 pcmpgtw %4, %5 ; 0 > |%1-%2|-%3
73 pshuflw %1, %1, 01010000b
74 pshufd %1, %1, 01010000b
79 ; in: %1=p1, %2=p0, %3=q0, %4=q1
80 ; %5=alpha, %6=beta, %7-%9=tmp
83 ABS_SUB %2, %3, %5, %8, %7 ; |p0-q0| - alpha
84 ABS_SUB %1, %2, %6, %9, %7 ; |p1-p0| - beta
86 ABS_SUB %3, %4, %6, %9, %7 ; |q1-q0| - beta
92 ; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
94 %macro DEBLOCK_P0_Q0 7
103 mova %6, [pw_pixel_max]
112 ; in: %1=x2, %2=x1, %3=p0, %4=q0 %5=mask&tc, %6=tmp
114 pavgw %6, %3, %4 ; (p0+q0+1)>>1
124 %macro LUMA_DEBLOCK_ONE 3
125 DIFF_LT m5, %1, bm, m4, m6
132 LUMA_Q1 m5, %2, m1, m2, m4, m6
135 %macro LUMA_H_STORE 2
147 movhps [%1+r1*2-4], m2
149 movhps [%1+r1*4-4], m3
153 %macro DEBLOCK_LUMA 0
154 ;-----------------------------------------------------------------------------
155 ; void ff_deblock_v_luma_10(uint16_t *pix, int stride, int alpha, int beta,
157 ;-----------------------------------------------------------------------------
158 cglobal deblock_v_luma_10, 5,5,8*(mmsize/16)
159 %assign pad 5*mmsize+12-(stack_offset&15)
161 %define ms1 [rsp+mmsize]
162 %define ms2 [rsp+mmsize*2]
163 %define am [rsp+mmsize*3]
164 %define bm [rsp+mmsize*4]
168 LOAD_AB m4, m5, r2d, r3d
182 LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
187 LUMA_DEBLOCK_ONE m1, m0, ms1
191 LUMA_DEBLOCK_ONE m2, m3, ms2
201 DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
213 cglobal deblock_h_luma_10, 5,6,8*(mmsize/16)
214 %assign pad 7*mmsize+12-(stack_offset&15)
216 %define ms1 [rsp+mmsize]
217 %define ms2 [rsp+mmsize*2]
218 %define p1m [rsp+mmsize*3]
219 %define p2m [rsp+mmsize*4]
220 %define am [rsp+mmsize*5]
221 %define bm [rsp+mmsize*6]
225 LOAD_AB m4, m5, r2d, r3d
238 movq m2, [r0-8] ; y q2 q1 q0
245 TRANSPOSE4x4W 2, 5, 0, 1, 4
248 TRANSPOSE4x4W 2, 3, 6, 7, 4
250 movu m5, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
254 TRANSPOSE4x4W 5, 0, 2, 3, 6
261 TRANSPOSE4x4W 4, 1, 3, 7, 6
266 SBUTTERFLY qdq, 0, 1, 7
267 SBUTTERFLY qdq, 2, 3, 7
271 LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
275 LUMA_DEBLOCK_ONE m1, m0, ms1
279 LUMA_DEBLOCK_ONE m2, m3, ms2
289 DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
292 TRANSPOSE4x4W 0, 1, 2, 3, 4
296 lea r0, [r0+r1*(mmsize/2)]
297 lea r2, [r2+r1*(mmsize/2)]
305 ; in: m0=p1, m1=p0, m2=q0, m3=q1, m8=p2, m9=q2
306 ; m12=alpha, m13=beta
307 ; out: m0=p1', m3=q1', m1=p0', m2=q0'
308 ; clobbers: m4, m5, m6, m7, m10, m11, m14
309 %macro DEBLOCK_LUMA_INTER_SSE2 0
310 LOAD_MASK m0, m1, m2, m3, m12, m13, m7, m4, m6
312 DIFF_LT m8, m1, m13, m10, m4
313 DIFF_LT m9, m2, m13, m11, m4
323 LUMA_Q1 m8, m0, m1, m2, m5, m4
327 LUMA_Q1 m9, m3, m1, m2, m5, m4
335 DEBLOCK_P0_Q0 m1, m2, m0, m3, m4, m5, m6
341 %macro DEBLOCK_LUMA_64 0
342 cglobal deblock_v_luma_10, 5,5,15
354 LOAD_AB m12, m13, r2d, r3d
367 DEBLOCK_LUMA_INTER_SSE2
379 cglobal deblock_h_luma_10, 5,7,15
382 LOAD_AB m12, m13, r2d, r3d
390 movu m8, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
399 TRANSPOSE4x4W 8, 0, 2, 9, 10
400 TRANSPOSE4x4W 5, 1, 3, 7, 10
403 SBUTTERFLY qdq, 0, 1, 10
404 SBUTTERFLY qdq, 2, 3, 10
407 DEBLOCK_LUMA_INTER_SSE2
409 TRANSPOSE4x4W 0, 1, 2, 3, 4
421 %if HAVE_AVX_EXTERNAL
435 ; in: t0-t2: tmp registers
436 ; %1=p0 %2=p1 %3=p2 %4=p3 %5=q0 %6=q1 %7=mask0
437 ; %8=mask1p %9=2 %10=p0' %11=p1' %12=p2'
438 %macro LUMA_INTRA_P012 12 ; p0..p3 in memory
453 paddw t0, %9 ; (p2 + p1 + p0 + q0 + 2)
454 paddw t2, t0 ; (2*p3 + 3*p2 + p1 + p0 + q0 + 4)
473 psrlw t1, 2 ; (2*p1 + p0 + q1 + 2)/4
474 psrlw t0, 3 ; (p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4)>>3
486 %macro LUMA_INTRA_INIT 1
487 %xdefine pad %1*mmsize+((gprsize*3) % mmsize)-(stack_offset&15)
494 CAT_XDEFINE t, i, [rsp+mmsize*(i-4)]
500 ; in: %1-%3=tmp, %4=p2, %5=q2
501 %macro LUMA_INTRA_INTER 5
502 LOAD_AB t0, t1, r2d, r3d
504 LOAD_MASK m0, m1, m2, m3, %1, t1, t0, t2, t3
513 paddw t3, [pw_2] ; alpha/4+2
514 DIFF_LT m1, m2, t3, t2, t0 ; t2 = |p0-q0| < alpha/4+2
518 DIFF_LT t3, m2, t1, t2, t0 ; t2 = |q2-q0| < beta
522 DIFF_LT t3, m1, t1, t2, t0 ; t2 = |p2-p0| < beta
527 %macro LUMA_H_INTRA_LOAD 0
533 TRANSPOSE4x4W 4, 5, 0, 1, 2
541 TRANSPOSE4x4W 2, 3, 4, 5, 6
553 TRANSPOSE8x8W 4, 5, 0, 1, 2, 3, 6, 7, t4, t5
561 ; in: %1=q3 %2=q2' %3=q1' %4=q0' %5=p0' %6=p1' %7=p2' %8=p3 %9=tmp
562 %macro LUMA_H_INTRA_STORE 9
564 TRANSPOSE4x4W %1, %2, %3, %4, %9
567 movq [r0+r1*2-8], m%3
570 TRANSPOSE4x4W %5, %6, %7, %1, %9
576 TRANSPOSE2x4x4W %1, %2, %3, %4, %9
579 movq [r0+r1*2-8], m%3
582 movhps [r4+r1-8], m%2
583 movhps [r4+r1*2-8], m%3
584 movhps [r4+r5-8], m%4
590 TRANSPOSE2x4x4W %5, %6, %7, %1, %9
597 movhps [r4+r1*2], m%7
603 ;-----------------------------------------------------------------------------
604 ; void ff_deblock_v_luma_intra_10(uint16_t *pix, int stride, int alpha,
606 ;-----------------------------------------------------------------------------
607 %macro DEBLOCK_LUMA_INTRA_64 0
608 cglobal deblock_v_luma_intra_10, 4,7,16
621 lea r5, [r1*3] ; 3*stride
623 add r4, r0 ; pix-4*stride
628 LOAD_AB aa, bb, r2d, r3d
637 LOAD_MASK p1, p0, q0, q1, aa, bb, m3, t0, t1
640 paddw t2, m0 ; alpha/4+2
641 DIFF_LT p0, q0, t2, m6, t0 ; m6 = |p0-q0| < alpha/4+2
642 DIFF_LT p2, p0, bb, t1, t0 ; m7 = |p2-p0| < beta
643 DIFF_LT q2, q0, bb, m7, t0 ; t1 = |q2-q0| < beta
647 LUMA_INTRA_P012 p0, p1, p2, [r4], q0, q1, m3, m6, m0, [r4+r5], [r4+2*r1], [r4+r1]
648 LUMA_INTRA_P012 q0, q1, q2, [r0+r5], p0, p1, m3, m7, m0, [r0], [r0+r1], [r0+2*r1]
655 ;-----------------------------------------------------------------------------
656 ; void ff_deblock_h_luma_intra_10(uint16_t *pix, int stride, int alpha,
658 ;-----------------------------------------------------------------------------
659 cglobal deblock_h_luma_intra_10, 4,7,16
672 %assign pad 24-(stack_offset&15)
675 lea r5, [r1*3] ; 3*stride
676 add r4, r0 ; pix+4*stride
690 TRANSPOSE8x8W 5, 8, 9, 10, 11, 12, 13, 4, 1
692 LOAD_AB m1, m2, r2d, r3d
693 LOAD_MASK q1, q0, p0, p1, m1, m2, m3, t0, t1
695 paddw m1, m0 ; alpha/4+2
696 DIFF_LT p0, q0, m1, m6, t0 ; m6 = |p0-q0| < alpha/4+2
697 DIFF_LT q2, q0, m2, t1, t0 ; t1 = |q2-q0| < beta
698 DIFF_LT p0, p2, m2, m7, t0 ; m7 = |p2-p0| < beta
704 LUMA_INTRA_P012 q0, q1, q2, q3, p0, p1, m3, m6, m0, m5, m1, q2
705 LUMA_INTRA_P012 p0, p1, p2, p3, q0, q1, m3, m7, m0, p0, m6, p2
708 LUMA_H_INTRA_STORE 7, 8, 1, 5, 11, 6, 13, 4, 14
719 DEBLOCK_LUMA_INTRA_64
720 %if HAVE_AVX_EXTERNAL
722 DEBLOCK_LUMA_INTRA_64
727 %macro DEBLOCK_LUMA_INTRA 0
728 ;-----------------------------------------------------------------------------
729 ; void ff_deblock_v_luma_intra_10(uint16_t *pix, int stride, int alpha,
731 ;-----------------------------------------------------------------------------
732 cglobal deblock_v_luma_intra_10, 4,7,8*(mmsize/16)
742 mova m0, [r4+r1*2] ; p1
743 mova m1, [r4+r5] ; p0
745 mova m3, [r0+r1] ; q1
746 LUMA_INTRA_INTER t4, t5, t6, [r4+r1], [r0+r1*2]
747 LUMA_INTRA_P012 m1, m0, t3, [r4], m2, m3, t5, t4, [pw_2], [r4+r5], [r4+2*r1], [r4+r1]
748 mova t3, [r0+r1*2] ; q2
749 LUMA_INTRA_P012 m2, m3, t3, [r0+r5], m1, m0, t5, t6, [pw_2], [r0], [r0+r1], [r0+2*r1]
757 ;-----------------------------------------------------------------------------
758 ; void ff_deblock_h_luma_intra_10(uint16_t *pix, int stride, int alpha,
760 ;-----------------------------------------------------------------------------
761 cglobal deblock_h_luma_intra_10, 4,7,8*(mmsize/16)
768 lea r5, [r1*3] ; 3*stride
769 add r4, r0 ; pix+4*stride
776 LUMA_INTRA_INTER t8, t9, t10, t5, t6
778 LUMA_INTRA_P012 m1, m0, t3, t4, m2, m3, t9, t8, [pw_2], t8, t5, t11
780 LUMA_INTRA_P012 m2, m3, t3, t7, m1, m0, t9, t10, [pw_2], m4, t6, m5
788 LUMA_H_INTRA_STORE 2, 0, 1, 3, 4, 6, 5, t7, 7
790 lea r0, [r0+r1*(mmsize/2)]
794 lea r4, [r4+r1*(mmsize/2)]
809 %if HAVE_AVX_EXTERNAL
816 ; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
817 ; out: %1=p0', %2=q0'
818 %macro CHROMA_DEBLOCK_P0_Q0_INTRA 7
836 %macro CHROMA_V_LOAD 1
838 mova m1, [r0+r1] ; p0
840 mova m3, [%1+r1] ; q1
843 %macro CHROMA_V_STORE 0
848 %macro CHROMA_V_LOAD_TC 2
855 %macro DEBLOCK_CHROMA 0
856 ;-----------------------------------------------------------------------------
857 ; void ff_deblock_v_chroma_10(uint16_t *pix, int stride, int alpha, int beta,
859 ;-----------------------------------------------------------------------------
860 cglobal deblock_v_chroma_10, 5,7-(mmsize/16),8*(mmsize/16)
871 LOAD_AB m4, m5, r2d, r3d
872 LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
874 CHROMA_V_LOAD_TC m6, r4
878 DEBLOCK_P0_Q0 m1, m2, m0, m3, m7, m5, m6
891 ;-----------------------------------------------------------------------------
892 ; void ff_deblock_v_chroma_intra_10(uint16_t *pix, int stride, int alpha,
894 ;-----------------------------------------------------------------------------
895 cglobal deblock_v_chroma_intra_10, 4,6-(mmsize/16),8*(mmsize/16)
906 LOAD_AB m4, m5, r2d, r3d
907 LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
908 CHROMA_DEBLOCK_P0_Q0_INTRA m1, m2, m0, m3, m7, m5, m6
927 %if HAVE_AVX_EXTERNAL