1 ;*****************************************************************************
2 ;* MMX/SSE2/AVX-optimized 10-bit H.264 deblocking code
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2011 x264 project
6 ;* Authors: Oskar Arvidsson <oskar@irock.se>
7 ;* Loren Merritt <lorenm@u.washington.edu>
8 ;* Jason Garrett-Glaser <darkshikari@gmail.com>
10 ;* This file is part of Libav.
12 ;* Libav is free software; you can redistribute it and/or
13 ;* modify it under the terms of the GNU Lesser General Public
14 ;* License as published by the Free Software Foundation; either
15 ;* version 2.1 of the License, or (at your option) any later version.
17 ;* Libav is distributed in the hope that it will be useful,
18 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
19 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 ;* Lesser General Public License for more details.
22 ;* You should have received a copy of the GNU Lesser General Public
23 ;* License along with Libav; if not, write to the Free Software
24 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 ;******************************************************************************
28 %include "x86util.asm"
32 pw_pixel_max: times 8 dw ((1 << 10)-1)
40 ; out: %4 = |%1-%2|-%3
49 ; out: %4 = |%1-%2|<%3
55 psubw %5, %3 ; |%1-%2|-%3
56 pcmpgtw %4, %5 ; 0 > |%1-%2|-%3
74 pshuflw %1, %1, 01010000b
75 pshufd %1, %1, 01010000b
80 ; in: %1=p1, %2=p0, %3=q0, %4=q1
81 ; %5=alpha, %6=beta, %7-%9=tmp
84 ABS_SUB %2, %3, %5, %8, %7 ; |p0-q0| - alpha
85 ABS_SUB %1, %2, %6, %9, %7 ; |p1-p0| - beta
87 ABS_SUB %3, %4, %6, %9, %7 ; |q1-q0| - beta
93 ; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
95 %macro DEBLOCK_P0_Q0 7
104 mova %6, [pw_pixel_max]
113 ; in: %1=x2, %2=x1, %3=p0, %4=q0 %5=mask&tc, %6=tmp
115 pavgw %6, %3, %4 ; (p0+q0+1)>>1
125 %macro LUMA_DEBLOCK_ONE 3
126 DIFF_LT m5, %1, bm, m4, m6
133 LUMA_Q1 m5, %2, m1, m2, m4, m6
136 %macro LUMA_H_STORE 2
148 movhps [%1+r1*2-4], m2
150 movhps [%1+r1*4-4], m3
154 %macro DEBLOCK_LUMA 0
155 ;-----------------------------------------------------------------------------
156 ; void deblock_v_luma( uint16_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
157 ;-----------------------------------------------------------------------------
158 cglobal deblock_v_luma_10, 5,5,8*(mmsize/16)
159 %assign pad 5*mmsize+12-(stack_offset&15)
161 %define ms1 [rsp+mmsize]
162 %define ms2 [rsp+mmsize*2]
163 %define am [rsp+mmsize*3]
164 %define bm [rsp+mmsize*4]
168 LOAD_AB m4, m5, r2, r3
182 LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
187 LUMA_DEBLOCK_ONE m1, m0, ms1
191 LUMA_DEBLOCK_ONE m2, m3, ms2
201 DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
213 cglobal deblock_h_luma_10, 5,6,8*(mmsize/16)
214 %assign pad 7*mmsize+12-(stack_offset&15)
216 %define ms1 [rsp+mmsize]
217 %define ms2 [rsp+mmsize*2]
218 %define p1m [rsp+mmsize*3]
219 %define p2m [rsp+mmsize*4]
220 %define am [rsp+mmsize*5]
221 %define bm [rsp+mmsize*6]
225 LOAD_AB m4, m5, r2, r3
238 movq m2, [r0-8] ; y q2 q1 q0
245 TRANSPOSE4x4W 2, 5, 0, 1, 4
248 TRANSPOSE4x4W 2, 3, 6, 7, 4
250 movu m5, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
254 TRANSPOSE4x4W 5, 0, 2, 3, 6
261 TRANSPOSE4x4W 4, 1, 3, 7, 6
266 SBUTTERFLY qdq, 0, 1, 7
267 SBUTTERFLY qdq, 2, 3, 7
271 LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
275 LUMA_DEBLOCK_ONE m1, m0, ms1
279 LUMA_DEBLOCK_ONE m2, m3, ms2
289 DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
292 TRANSPOSE4x4W 0, 1, 2, 3, 4
296 lea r0, [r0+r1*(mmsize/2)]
297 lea r2, [r2+r1*(mmsize/2)]
305 ; in: m0=p1, m1=p0, m2=q0, m3=q1, m8=p2, m9=q2
306 ; m12=alpha, m13=beta
307 ; out: m0=p1', m3=q1', m1=p0', m2=q0'
308 ; clobbers: m4, m5, m6, m7, m10, m11, m14
309 %macro DEBLOCK_LUMA_INTER_SSE2 0
310 LOAD_MASK m0, m1, m2, m3, m12, m13, m7, m4, m6
312 DIFF_LT m8, m1, m13, m10, m4
313 DIFF_LT m9, m2, m13, m11, m4
323 LUMA_Q1 m8, m0, m1, m2, m5, m4
327 LUMA_Q1 m9, m3, m1, m2, m5, m4
335 DEBLOCK_P0_Q0 m1, m2, m0, m3, m4, m5, m6
341 %macro DEBLOCK_LUMA_64 0
342 cglobal deblock_v_luma_10, 5,5,15
354 LOAD_AB m12, m13, r2, r3
367 DEBLOCK_LUMA_INTER_SSE2
379 cglobal deblock_h_luma_10, 5,7,15
382 LOAD_AB m12, m13, r2, r3
390 movu m8, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
399 TRANSPOSE4x4W 8, 0, 2, 9, 10
400 TRANSPOSE4x4W 5, 1, 3, 7, 10
403 SBUTTERFLY qdq, 0, 1, 10
404 SBUTTERFLY qdq, 2, 3, 10
407 DEBLOCK_LUMA_INTER_SSE2
409 TRANSPOSE4x4W 0, 1, 2, 3, 4
433 ; in: t0-t2: tmp registers
434 ; %1=p0 %2=p1 %3=p2 %4=p3 %5=q0 %6=q1 %7=mask0
435 ; %8=mask1p %9=2 %10=p0' %11=p1' %12=p2'
436 %macro LUMA_INTRA_P012 12 ; p0..p3 in memory
451 paddw t0, %9 ; (p2 + p1 + p0 + q0 + 2)
452 paddw t2, t0 ; (2*p3 + 3*p2 + p1 + p0 + q0 + 4)
471 psrlw t1, 2 ; (2*p1 + p0 + q1 + 2)/4
472 psrlw t0, 3 ; (p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4)>>3
484 %macro LUMA_INTRA_INIT 1
485 %xdefine pad %1*mmsize+((gprsize*3) % mmsize)-(stack_offset&15)
492 CAT_XDEFINE t, i, [rsp+mmsize*(i-4)]
498 ; in: %1-%3=tmp, %4=p2, %5=q2
499 %macro LUMA_INTRA_INTER 5
500 LOAD_AB t0, t1, r2d, r3d
502 LOAD_MASK m0, m1, m2, m3, %1, t1, t0, t2, t3
511 paddw t3, [pw_2] ; alpha/4+2
512 DIFF_LT m1, m2, t3, t2, t0 ; t2 = |p0-q0| < alpha/4+2
516 DIFF_LT t3, m2, t1, t2, t0 ; t2 = |q2-q0| < beta
520 DIFF_LT t3, m1, t1, t2, t0 ; t2 = |p2-p0| < beta
525 %macro LUMA_H_INTRA_LOAD 0
531 TRANSPOSE4x4W 4, 5, 0, 1, 2
539 TRANSPOSE4x4W 2, 3, 4, 5, 6
551 TRANSPOSE8x8W 4, 5, 0, 1, 2, 3, 6, 7, t4, t5
559 ; in: %1=q3 %2=q2' %3=q1' %4=q0' %5=p0' %6=p1' %7=p2' %8=p3 %9=tmp
560 %macro LUMA_H_INTRA_STORE 9
562 TRANSPOSE4x4W %1, %2, %3, %4, %9
565 movq [r0+r1*2-8], m%3
568 TRANSPOSE4x4W %5, %6, %7, %1, %9
574 TRANSPOSE2x4x4W %1, %2, %3, %4, %9
577 movq [r0+r1*2-8], m%3
580 movhps [r4+r1-8], m%2
581 movhps [r4+r1*2-8], m%3
582 movhps [r4+r5-8], m%4
588 TRANSPOSE2x4x4W %5, %6, %7, %1, %9
595 movhps [r4+r1*2], m%7
601 ;-----------------------------------------------------------------------------
602 ; void deblock_v_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
603 ;-----------------------------------------------------------------------------
604 %macro DEBLOCK_LUMA_INTRA_64 0
605 cglobal deblock_v_luma_intra_10, 4,7,16
618 lea r5, [r1*3] ; 3*stride
620 add r4, r0 ; pix-4*stride
625 LOAD_AB aa, bb, r2d, r3d
634 LOAD_MASK p1, p0, q0, q1, aa, bb, m3, t0, t1
637 paddw t2, m0 ; alpha/4+2
638 DIFF_LT p0, q0, t2, m6, t0 ; m6 = |p0-q0| < alpha/4+2
639 DIFF_LT p2, p0, bb, t1, t0 ; m7 = |p2-p0| < beta
640 DIFF_LT q2, q0, bb, m7, t0 ; t1 = |q2-q0| < beta
644 LUMA_INTRA_P012 p0, p1, p2, [r4], q0, q1, m3, m6, m0, [r4+r5], [r4+2*r1], [r4+r1]
645 LUMA_INTRA_P012 q0, q1, q2, [r0+r5], p0, p1, m3, m7, m0, [r0], [r0+r1], [r0+2*r1]
652 ;-----------------------------------------------------------------------------
653 ; void deblock_h_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
654 ;-----------------------------------------------------------------------------
655 cglobal deblock_h_luma_intra_10, 4,7,16
668 %assign pad 24-(stack_offset&15)
671 lea r5, [r1*3] ; 3*stride
672 add r4, r0 ; pix+4*stride
686 TRANSPOSE8x8W 5, 8, 9, 10, 11, 12, 13, 4, 1
688 LOAD_AB m1, m2, r2d, r3d
689 LOAD_MASK q1, q0, p0, p1, m1, m2, m3, t0, t1
691 paddw m1, m0 ; alpha/4+2
692 DIFF_LT p0, q0, m1, m6, t0 ; m6 = |p0-q0| < alpha/4+2
693 DIFF_LT q2, q0, m2, t1, t0 ; t1 = |q2-q0| < beta
694 DIFF_LT p0, p2, m2, m7, t0 ; m7 = |p2-p0| < beta
700 LUMA_INTRA_P012 q0, q1, q2, q3, p0, p1, m3, m6, m0, m5, m1, q2
701 LUMA_INTRA_P012 p0, p1, p2, p3, q0, q1, m3, m7, m0, p0, m6, p2
704 LUMA_H_INTRA_STORE 7, 8, 1, 5, 11, 6, 13, 4, 14
715 DEBLOCK_LUMA_INTRA_64
717 DEBLOCK_LUMA_INTRA_64
721 %macro DEBLOCK_LUMA_INTRA 0
722 ;-----------------------------------------------------------------------------
723 ; void deblock_v_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
724 ;-----------------------------------------------------------------------------
725 cglobal deblock_v_luma_intra_10, 4,7,8*(mmsize/16)
735 mova m0, [r4+r1*2] ; p1
736 mova m1, [r4+r5] ; p0
738 mova m3, [r0+r1] ; q1
739 LUMA_INTRA_INTER t4, t5, t6, [r4+r1], [r0+r1*2]
740 LUMA_INTRA_P012 m1, m0, t3, [r4], m2, m3, t5, t4, [pw_2], [r4+r5], [r4+2*r1], [r4+r1]
741 mova t3, [r0+r1*2] ; q2
742 LUMA_INTRA_P012 m2, m3, t3, [r0+r5], m1, m0, t5, t6, [pw_2], [r0], [r0+r1], [r0+2*r1]
750 ;-----------------------------------------------------------------------------
751 ; void deblock_h_luma_intra( uint16_t *pix, int stride, int alpha, int beta )
752 ;-----------------------------------------------------------------------------
753 cglobal deblock_h_luma_intra_10, 4,7,8*(mmsize/16)
760 lea r5, [r1*3] ; 3*stride
761 add r4, r0 ; pix+4*stride
768 LUMA_INTRA_INTER t8, t9, t10, t5, t6
770 LUMA_INTRA_P012 m1, m0, t3, t4, m2, m3, t9, t8, [pw_2], t8, t5, t11
772 LUMA_INTRA_P012 m2, m3, t3, t7, m1, m0, t9, t10, [pw_2], m4, t6, m5
780 LUMA_H_INTRA_STORE 2, 0, 1, 3, 4, 6, 5, t7, 7
782 lea r0, [r0+r1*(mmsize/2)]
786 lea r4, [r4+r1*(mmsize/2)]
806 ; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
807 ; out: %1=p0', %2=q0'
808 %macro CHROMA_DEBLOCK_P0_Q0_INTRA 7
826 %macro CHROMA_V_LOAD 1
828 mova m1, [r0+r1] ; p0
830 mova m3, [%1+r1] ; q1
833 %macro CHROMA_V_STORE 0
838 %macro CHROMA_V_LOAD_TC 2
845 %macro DEBLOCK_CHROMA 0
846 ;-----------------------------------------------------------------------------
847 ; void deblock_v_chroma( uint16_t *pix, int stride, int alpha, int beta, int8_t *tc0 )
848 ;-----------------------------------------------------------------------------
849 cglobal deblock_v_chroma_10, 5,7-(mmsize/16),8*(mmsize/16)
860 LOAD_AB m4, m5, r2, r3
861 LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
863 CHROMA_V_LOAD_TC m6, r4
867 DEBLOCK_P0_Q0 m1, m2, m0, m3, m7, m5, m6
880 ;-----------------------------------------------------------------------------
881 ; void deblock_v_chroma_intra( uint16_t *pix, int stride, int alpha, int beta )
882 ;-----------------------------------------------------------------------------
883 cglobal deblock_v_chroma_intra_10, 4,6-(mmsize/16),8*(mmsize/16)
894 LOAD_AB m4, m5, r2, r3
895 LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
896 CHROMA_DEBLOCK_P0_Q0_INTRA m1, m2, m0, m3, m7, m5, m6