1 ;*****************************************************************************
2 ;* deblock-a.asm: x86 deblocking
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2013 x264 project
6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
7 ;* Fiona Glaser <fiona@x264.com>
8 ;* Oskar Arvidsson <oskar@irock.se>
10 ;* This program is free software; you can redistribute it and/or modify
11 ;* it under the terms of the GNU General Public License as published by
12 ;* the Free Software Foundation; either version 2 of the License, or
13 ;* (at your option) any later version.
15 ;* This program is distributed in the hope that it will be useful,
16 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
17 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 ;* GNU General Public License for more details.
20 ;* You should have received a copy of the GNU General Public License
21 ;* along with this program; if not, write to the Free Software
22 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 ;* This program is also available under a commercial proprietary license.
25 ;* For more information, contact us at licensing@x264.com.
26 ;*****************************************************************************
29 %include "x86util.asm"
33 load_bytes_shuf: times 2 db 3,4,5,6,11,12,13,14,4,5,6,7,12,13,14,15
34 insert_top_shuf: dd 0,1,4,5,7,2,3,6
35 transpose_shuf: db 0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15
50 ; out: %4 = |%1-%2|-%3
59 ; out: %4 = |%1-%2|<%3
65 psubw %5, %3 ; |%1-%2|-%3
66 pcmpgtw %4, %5 ; 0 > |%1-%2|-%3
90 ; in: %1=p1, %2=p0, %3=q0, %4=q1
91 ; %5=alpha, %6=beta, %7-%9=tmp
94 ABS_SUB %2, %3, %5, %8, %7 ; |p0-q0| - alpha
95 ABS_SUB %1, %2, %6, %9, %7 ; |p1-p0| - beta
97 ABS_SUB %3, %4, %6, %9, %7 ; |q1-q0| - beta
103 ; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
104 ; out: %1=p0', m2=q0'
105 %macro DEBLOCK_P0_Q0 7
114 mova %6, [pw_pixel_max]
123 ; in: %1=x2, %2=x1, %3=p0, %4=q0 %5=mask&tc, %6=tmp
125 pavgw %6, %3, %4 ; (p0+q0+1)>>1
135 %macro LUMA_DEBLOCK_ONE 3
136 DIFF_LT m5, %1, bm, m4, m6
143 LUMA_Q1 m5, %2, m1, m2, m4, m6
146 %macro LUMA_H_STORE 2
158 movhps [%1+r1*2-4], m2
160 movhps [%1+r1*4-4], m3
164 %macro DEBLOCK_LUMA 0
165 ;-----------------------------------------------------------------------------
166 ; void deblock_v_luma( uint16_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
167 ;-----------------------------------------------------------------------------
168 cglobal deblock_v_luma, 5,5,8
169 %assign pad 5*mmsize+12-(stack_offset&15)
171 %define ms1 [rsp+mmsize]
172 %define ms2 [rsp+mmsize*2]
173 %define am [rsp+mmsize*3]
174 %define bm [rsp+mmsize*4]
177 LOAD_AB m4, m5, r2d, r3d
191 LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
196 LUMA_DEBLOCK_ONE m1, m0, ms1
200 LUMA_DEBLOCK_ONE m2, m3, ms2
210 DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
222 cglobal deblock_h_luma, 5,6,8
223 %assign pad 7*mmsize+12-(stack_offset&15)
225 %define ms1 [rsp+mmsize]
226 %define ms2 [rsp+mmsize*2]
227 %define p1m [rsp+mmsize*3]
228 %define p2m [rsp+mmsize*4]
229 %define am [rsp+mmsize*5]
230 %define bm [rsp+mmsize*6]
233 LOAD_AB m4, m5, r2d, r3d
246 movq m2, [r0-8] ; y q2 q1 q0
253 TRANSPOSE4x4W 2, 5, 0, 1, 4
256 TRANSPOSE4x4W 2, 3, 6, 7, 4
258 movu m5, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
262 TRANSPOSE4x4W 5, 0, 2, 3, 6
269 TRANSPOSE4x4W 4, 1, 3, 7, 6
274 SBUTTERFLY qdq, 0, 1, 7
275 SBUTTERFLY qdq, 2, 3, 7
279 LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
283 LUMA_DEBLOCK_ONE m1, m0, ms1
287 LUMA_DEBLOCK_ONE m2, m3, ms2
297 DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
300 TRANSPOSE4x4W 0, 1, 2, 3, 4
304 lea r0, [r0+r1*(mmsize/2)]
305 lea r2, [r2+r1*(mmsize/2)]
313 ; in: m0=p1, m1=p0, m2=q0, m3=q1, m8=p2, m9=q2
314 ; m12=alpha, m13=beta
315 ; out: m0=p1', m3=q1', m1=p0', m2=q0'
316 ; clobbers: m4, m5, m6, m7, m10, m11, m14
317 %macro DEBLOCK_LUMA_INTER_SSE2 0
318 LOAD_MASK m0, m1, m2, m3, m12, m13, m7, m4, m6
320 DIFF_LT m8, m1, m13, m10, m4
321 DIFF_LT m9, m2, m13, m11, m4
331 LUMA_Q1 m8, m0, m1, m2, m5, m4
335 LUMA_Q1 m9, m3, m1, m2, m5, m4
343 DEBLOCK_P0_Q0 m1, m2, m0, m3, m4, m5, m6
349 %macro DEBLOCK_LUMA_64 0
350 cglobal deblock_v_luma, 5,5,15
361 LOAD_AB m12, m13, r2d, r3d
374 DEBLOCK_LUMA_INTER_SSE2
386 cglobal deblock_h_luma, 5,7,15
388 LOAD_AB m12, m13, r2d, r3d
396 movu m8, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
405 TRANSPOSE4x4W 8, 0, 2, 9, 10
406 TRANSPOSE4x4W 5, 1, 3, 7, 10
409 SBUTTERFLY qdq, 0, 1, 10
410 SBUTTERFLY qdq, 2, 3, 10
413 DEBLOCK_LUMA_INTER_SSE2
415 TRANSPOSE4x4W 0, 1, 2, 3, 4
439 ; in: t0-t2: tmp registers
440 ; %1=p0 %2=p1 %3=p2 %4=p3 %5=q0 %6=q1 %7=mask0
441 ; %8=mask1p %9=2 %10=p0' %11=p1' %12=p2'
442 %macro LUMA_INTRA_P012 12 ; p0..p3 in memory
457 paddw t0, %9 ; (p2 + p1 + p0 + q0 + 2)
458 paddw t2, t0 ; (2*p3 + 3*p2 + p1 + p0 + q0 + 4)
477 psrlw t1, 2 ; (2*p1 + p0 + q1 + 2)/4
478 psrlw t0, 3 ; (p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4)>>3
490 %macro LUMA_INTRA_INIT 1
491 %xdefine pad %1*mmsize+((gprsize*3) % mmsize)-(stack_offset&15)
498 CAT_XDEFINE t, i, [rsp+mmsize*(i-4)]
505 ; in: %1-%3=tmp, %4=p2, %5=q2
506 %macro LUMA_INTRA_INTER 5
507 LOAD_AB t0, t1, r2d, r3d
509 LOAD_MASK m0, m1, m2, m3, %1, t1, t0, t2, t3
518 paddw t3, [pw_2] ; alpha/4+2
519 DIFF_LT m1, m2, t3, t2, t0 ; t2 = |p0-q0| < alpha/4+2
523 DIFF_LT t3, m2, t1, t2, t0 ; t2 = |q2-q0| < beta
527 DIFF_LT t3, m1, t1, t2, t0 ; t2 = |p2-p0| < beta
532 %macro LUMA_H_INTRA_LOAD 0
538 TRANSPOSE4x4W 4, 5, 0, 1, 2
546 TRANSPOSE4x4W 2, 3, 4, 5, 6
558 TRANSPOSE8x8W 4, 5, 0, 1, 2, 3, 6, 7, t4, t5
566 ; in: %1=q3 %2=q2' %3=q1' %4=q0' %5=p0' %6=p1' %7=p2' %8=p3 %9=tmp
567 %macro LUMA_H_INTRA_STORE 9
569 TRANSPOSE4x4W %1, %2, %3, %4, %9
572 movq [r0+r1*2-8], m%3
575 TRANSPOSE4x4W %5, %6, %7, %1, %9
581 TRANSPOSE2x4x4W %1, %2, %3, %4, %9
584 movq [r0+r1*2-8], m%3
587 movhps [r4+r1-8], m%2
588 movhps [r4+r1*2-8], m%3
589 movhps [r4+r5-8], m%4
595 TRANSPOSE2x4x4W %5, %6, %7, %1, %9
602 movhps [r4+r1*2], m%7
608 ;-----------------------------------------------------------------------------
609 ; void deblock_v_luma_intra( uint16_t *pix, intptr_t stride, int alpha, int beta )
610 ;-----------------------------------------------------------------------------
611 %macro DEBLOCK_LUMA_INTRA_64 0
612 cglobal deblock_v_luma_intra, 4,7,16
626 lea r5, [r1*3] ; 3*stride
628 add r4, r0 ; pix-4*stride
631 LOAD_AB aa, bb, r2d, r3d
640 LOAD_MASK p1, p0, q0, q1, aa, bb, m3, t0, t1
643 paddw t2, m0 ; alpha/4+2
644 DIFF_LT p0, q0, t2, m6, t0 ; m6 = |p0-q0| < alpha/4+2
645 DIFF_LT p2, p0, bb, t1, t0 ; m7 = |p2-p0| < beta
646 DIFF_LT q2, q0, bb, m7, t0 ; t1 = |q2-q0| < beta
650 LUMA_INTRA_P012 p0, p1, p2, [r4], q0, q1, m3, m6, m0, [r4+r5], [r4+2*r1], [r4+r1]
651 LUMA_INTRA_P012 q0, q1, q2, [r0+r5], p0, p1, m3, m7, m0, [r0], [r0+r1], [r0+2*r1]
658 ;-----------------------------------------------------------------------------
659 ; void deblock_h_luma_intra( uint16_t *pix, intptr_t stride, int alpha, int beta )
660 ;-----------------------------------------------------------------------------
661 cglobal deblock_h_luma_intra, 4,7,16
674 %assign pad 24-(stack_offset&15)
678 lea r5, [r1*3] ; 3*stride
679 add r4, r0 ; pix+4*stride
691 TRANSPOSE8x8W 5, 8, 9, 10, 11, 12, 13, 4, 1
693 LOAD_AB m1, m2, r2d, r3d
694 LOAD_MASK q1, q0, p0, p1, m1, m2, m3, t0, t1
696 paddw m1, m0 ; alpha/4+2
697 DIFF_LT p0, q0, m1, m6, t0 ; m6 = |p0-q0| < alpha/4+2
698 DIFF_LT q2, q0, m2, t1, t0 ; t1 = |q2-q0| < beta
699 DIFF_LT p0, p2, m2, m7, t0 ; m7 = |p2-p0| < beta
705 LUMA_INTRA_P012 q0, q1, q2, q3, p0, p1, m3, m6, m0, m5, m1, q2
706 LUMA_INTRA_P012 p0, p1, p2, p3, q0, q1, m3, m7, m0, p0, m6, p2
709 LUMA_H_INTRA_STORE 7, 8, 1, 5, 11, 6, 13, 4, 14
720 DEBLOCK_LUMA_INTRA_64
722 DEBLOCK_LUMA_INTRA_64
726 %macro DEBLOCK_LUMA_INTRA 0
727 ;-----------------------------------------------------------------------------
728 ; void deblock_v_luma_intra( uint16_t *pix, intptr_t stride, int alpha, int beta )
729 ;-----------------------------------------------------------------------------
730 cglobal deblock_v_luma_intra, 4,7,8
738 mova m0, [r4+r1*2] ; p1
739 mova m1, [r4+r5] ; p0
741 mova m3, [r0+r1] ; q1
742 LUMA_INTRA_INTER t4, t5, t6, [r4+r1], [r0+r1*2]
743 LUMA_INTRA_P012 m1, m0, t3, [r4], m2, m3, t5, t4, [pw_2], [r4+r5], [r4+2*r1], [r4+r1]
744 mova t3, [r0+r1*2] ; q2
745 LUMA_INTRA_P012 m2, m3, t3, [r0+r5], m1, m0, t5, t6, [pw_2], [r0], [r0+r1], [r0+2*r1]
753 ;-----------------------------------------------------------------------------
754 ; void deblock_h_luma_intra( uint16_t *pix, intptr_t stride, int alpha, int beta )
755 ;-----------------------------------------------------------------------------
756 cglobal deblock_h_luma_intra, 4,7,8
763 lea r5, [r1*3] ; 3*stride
764 add r4, r0 ; pix+4*stride
769 LUMA_INTRA_INTER t8, t9, t10, t5, t6
771 LUMA_INTRA_P012 m1, m0, t3, t4, m2, m3, t9, t8, [pw_2], t8, t5, t11
773 LUMA_INTRA_P012 m2, m3, t3, t7, m1, m0, t9, t10, [pw_2], m4, t6, m5
781 LUMA_H_INTRA_STORE 2, 0, 1, 3, 4, 6, 5, t7, 7
783 lea r0, [r0+r1*(mmsize/2)]
787 lea r4, [r4+r1*(mmsize/2)]
806 %endif ; HIGH_BIT_DEPTH
808 %if HIGH_BIT_DEPTH == 0
809 ; expands to [base],...,[base+7*stride]
810 %define PASS8ROWS(base, base3, stride, stride3) \
811 [base], [base+stride], [base+stride*2], [base3], \
812 [base3+stride], [base3+stride*2], [base3+stride3], [base3+stride*4]
814 %define PASS8ROWS(base, base3, stride, stride3, offset) \
815 PASS8ROWS(base+offset, base3+offset, stride, stride3)
817 ; in: 8 rows of 4 bytes in %4..%11
818 ; out: 4 rows of 8 bytes in m0..m3
819 %macro TRANSPOSE4x8_LOAD 11
846 ; in: 4 rows of 8 bytes in m0..m3
847 ; out: 8 rows of 4 bytes in %1..%8
848 %macro TRANSPOSE8x4B_STORE 8
877 %macro TRANSPOSE4x8B_LOAD 8
878 TRANSPOSE4x8_LOAD bw, wd, dq, %1, %2, %3, %4, %5, %6, %7, %8
881 %macro TRANSPOSE4x8W_LOAD 8
883 TRANSPOSE4x8_LOAD wd, dq, qdq, %1, %2, %3, %4, %5, %6, %7, %8
890 TRANSPOSE4x4W 0, 1, 2, 3, 4
894 %macro TRANSPOSE8x2W_STORE 8
927 ; in: 8 rows of 8 (only the middle 6 pels are used) in %1..%8
928 ; out: 6 rows of 8 in [%9+0*16] .. [%9+5*16]
929 %macro TRANSPOSE6x8_MEM 9
938 SBUTTERFLY bw, 0, 1, 7
939 SBUTTERFLY bw, 2, 3, 7
940 SBUTTERFLY bw, 4, 5, 7
942 SBUTTERFLY3 bw, m6, %8, m7
943 SBUTTERFLY wd, 0, 2, 3
944 SBUTTERFLY wd, 4, 6, 3
947 SBUTTERFLY3 wd, m1, [%9+0x10], m3
948 SBUTTERFLY wd, 5, 7, 0
949 SBUTTERFLY dq, 1, 5, 0
950 SBUTTERFLY dq, 2, 6, 0
960 ; in: 8 rows of 8 in %1..%8
961 ; out: 8 rows of 8 in %9..%16
962 %macro TRANSPOSE8x8_MEM 16
971 SBUTTERFLY bw, 0, 1, 7
972 SBUTTERFLY bw, 2, 3, 7
973 SBUTTERFLY bw, 4, 5, 7
974 SBUTTERFLY3 bw, m6, %8, m7
976 SBUTTERFLY wd, 0, 2, 5
977 SBUTTERFLY wd, 4, 6, 5
978 SBUTTERFLY wd, 1, 3, 5
981 SBUTTERFLY wd, 6, 7, 5
982 SBUTTERFLY dq, 0, 4, 5
983 SBUTTERFLY dq, 1, 6, 5
988 SBUTTERFLY3 dq, m2, %11, m0
989 SBUTTERFLY dq, 3, 7, 4
997 ; out: %4 = |%1-%2|>%3
1000 %if avx_enabled == 0
1013 ; out: %4 = |%1-%2|>%3
1030 ; in: m0=p1 m1=p0 m2=q0 m3=q1 %1=alpha %2=beta
1031 ; out: m5=beta-1, m7=mask, %3=alpha-1
1033 %macro LOAD_MASK 2-3
1049 psubusb m4, m6 ; alpha - 1
1050 psubusb m5, m6 ; alpha - 2
1054 DIFF_GT m1, m2, m4, m7, m6 ; |p0-q0| > alpha-1
1055 DIFF_GT m0, m1, m5, m4, m6 ; |p1-p0| > beta-1
1057 DIFF_GT m3, m2, m5, m4, m6 ; |q1-q0| > beta-1
1063 ; in: m0=p1 m1=p0 m2=q0 m3=q1 m7=(tc&mask)
1064 ; out: m1=p0' m2=q0'
1066 %macro DEBLOCK_P0_Q0 0
1067 pxor m5, m1, m2 ; p0^q0
1068 pand m5, [pb_1] ; (p0^q0)&1
1071 pavgb m3, m0 ; (p1 - q1 + 256)>>1
1072 pavgb m3, [pb_3] ; (((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2
1074 pavgb m4, m2 ; (q0 - p0 + 256)>>1
1076 paddusb m3, m4 ; d+128+33
1089 ; %1=p1 %2=q2 %3=[q2] %4=[q1] %5=tc0 %6=tmp
1090 ; out: [q1] = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
1091 ; clobbers: q2, tmp, tc0
1094 pavgb %2, %6 ; avg(p2,avg(p0,q0))
1096 pand %6, [pb_1] ; (p2^avg(p0,q0))&1
1097 psubusb %2, %6 ; (p2+((p0+q0+1)>>1))>>1
1106 ;-----------------------------------------------------------------------------
1107 ; void deblock_v_luma( uint8_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
1108 ;-----------------------------------------------------------------------------
1109 %macro DEBLOCK_LUMA 0
1110 cglobal deblock_v_luma, 5,5,10
1114 add r4, r0 ; pix-3*stride
1116 mova m0, [r4+r1] ; p1
1117 mova m1, [r4+2*r1] ; p0
1119 mova m3, [r0+r1] ; q1
1123 pshufb m8, [pb_unpackbd1]
1124 pblendvb m9, m7, m6, m8
1127 punpcklbw m8, m8 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
1135 DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
1137 psubb m7, m8, m6 ; tc++
1139 LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
1141 mova m4, [r0+2*r1] ; q2
1142 DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
1147 LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m8, m6
1154 ;-----------------------------------------------------------------------------
1155 ; void deblock_h_luma( uint8_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
1156 ;-----------------------------------------------------------------------------
1158 cglobal deblock_h_luma, 5,9,0,0x60+16*WIN64
1163 %define pix_tmp rsp+0x30 ; shadow space + r4
1168 ; transpose 6x16 -> tmp space
1169 TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r1, r8), pix_tmp
1172 TRANSPOSE6x8_MEM PASS8ROWS(r6, r5, r1, r8), pix_tmp+8
1175 ; alpha, beta, tc0 are still in r2d, r3d, r4
1176 ; don't backup r6, r5, r7, r8 because deblock_v_luma_sse2 doesn't use them
1178 lea r0, [pix_tmp+0x30]
1185 ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
1188 movq m0, [pix_tmp+0x18]
1189 movq m1, [pix_tmp+0x28]
1190 movq m2, [pix_tmp+0x38]
1191 movq m3, [pix_tmp+0x48]
1192 TRANSPOSE8x4B_STORE PASS8ROWS(r6, r5, r7, r8)
1198 movq m0, [pix_tmp+0x10]
1199 movq m1, [pix_tmp+0x20]
1200 movq m2, [pix_tmp+0x30]
1201 movq m3, [pix_tmp+0x40]
1202 TRANSPOSE8x4B_STORE PASS8ROWS(r6, r5, r7, r8)
1214 %macro DEBLOCK_LUMA 2
1215 ;-----------------------------------------------------------------------------
1216 ; void deblock_v8_luma( uint8_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
1217 ;-----------------------------------------------------------------------------
1218 cglobal deblock_%1_luma, 5,5,8,2*%2
1221 add r4, r0 ; pix-3*stride
1223 mova m0, [r4+r1] ; p1
1224 mova m1, [r4+2*r1] ; p0
1226 mova m3, [r0+r1] ; q1
1232 pshufb m4, [pb_unpackbd1]
1233 mova [esp+%2], m4 ; tc
1234 pblendvb m4, m7, m6, m4
1237 punpcklbw m4, m4 ; tc = 4x tc0[3], 4x tc0[2], 4x tc0[1], 4x tc0[0]
1238 mova [esp+%2], m4 ; tc
1243 mova [esp], m4 ; mask
1246 DIFF_GT2 m1, m3, m5, m6, m7 ; |p2-p0| > beta-1
1248 pand m4, [esp+%2] ; tc
1251 LUMA_Q1 m0, m3, [r4], [r4+r1], m6, m4
1253 mova m4, [r0+2*r1] ; q2
1254 DIFF_GT2 m2, m4, m5, m6, m3 ; |q2-q0| > beta-1
1255 mova m5, [esp] ; mask
1257 mova m5, [esp+%2] ; tc
1261 LUMA_Q1 m3, m4, [r0+2*r1], [r0+r1], m5, m6
1268 ;-----------------------------------------------------------------------------
1269 ; void deblock_h_luma( uint8_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
1270 ;-----------------------------------------------------------------------------
1272 cglobal deblock_h_luma, 0,5,8,0x60+HAVE_ALIGNED_STACK*12
1278 %define pix_tmp esp+12*HAVE_ALIGNED_STACK
1280 ; transpose 6x16 -> tmp space
1281 TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp
1284 TRANSPOSE6x8_MEM PASS8ROWS(r0, r1, r3, r4), pix_tmp+8
1287 lea r0, [pix_tmp+0x30]
1293 call deblock_%1_luma
1295 add dword [esp ], 8 ; pix_tmp+0x38
1296 add dword [esp+16], 2 ; tc0+2
1297 call deblock_%1_luma
1301 ; transpose 16x4 -> original space (only the middle 4 rows were changed by the filter)
1306 movq m0, [pix_tmp+0x10]
1307 movq m1, [pix_tmp+0x20]
1308 movq m2, [pix_tmp+0x30]
1309 movq m3, [pix_tmp+0x40]
1310 TRANSPOSE8x4B_STORE PASS8ROWS(r0, r1, r3, r4)
1314 movq m0, [pix_tmp+0x18]
1315 movq m1, [pix_tmp+0x28]
1316 movq m2, [pix_tmp+0x38]
1317 movq m3, [pix_tmp+0x48]
1318 TRANSPOSE8x4B_STORE PASS8ROWS(r0, r1, r3, r4)
1321 %endmacro ; DEBLOCK_LUMA
1334 %macro LUMA_INTRA_P012 4 ; p0..p3 in memory
1344 pavgb t0, t1 ; ((p2+p1+1)/2 + (p0+q0+1)/2 + 1)/2
1362 psubb t0, t2 ; p1' = (p2+p1+p0+q0+2)/4;
1374 psubb t3, t2 ; p2+2*p1+2*p0+2*q0+q1
1378 pavgb t1, t5 ; (((p2+q1)/2 + p1+1)/2 + (p0+q0+1)/2 + 1)/2
1383 psubb t1, t3 ; p0'a = (p2+2*p1+2*p0+2*q0+q1+4)/8
1389 pavgb t2, p1 ; p0'b = (2*p1+p0+q0+2)/4
1397 mova %1, t1 ; store p0
1402 pavgb t1, t0 ; (p3+p2+1)/2 + (p2+p1+p0+q0+2)/4
1404 paddb t2, t4 ; 2*p3+3*p2+p1+p0+q0
1409 psubb t1, t2 ; p2' = (2*p3+3*p2+p1+p0+q0+4)/8
1417 mova %2, t0 ; store p1
1418 mova %3, t1 ; store p2
1421 %macro LUMA_INTRA_SWAP_PQ 0
1427 %define mask1p mask1q
1430 %macro DEBLOCK_LUMA_INTRA 1
1447 %define mask1q [rsp]
1449 %define mask1q [rsp-24]
1454 %define spill(x) [esp+16*x]
1456 %define q2 [r0+2*r1]
1459 %define mask0 spill(2)
1460 %define mask1p spill(3)
1461 %define mask1q spill(4)
1462 %define mpb_0 [pb_0]
1463 %define mpb_1 [pb_1]
1466 ;-----------------------------------------------------------------------------
1467 ; void deblock_v_luma_intra( uint8_t *pix, intptr_t stride, int alpha, int beta )
1468 ;-----------------------------------------------------------------------------
1469 cglobal deblock_%1_luma_intra, 4,6,16,0-(1-ARCH_X86_64)*0x50-WIN64*0x10
1471 lea r5, [r1*3] ; 3*stride
1473 add r4, r0 ; pix-4*stride
1481 LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
1482 SWAP 7, 12 ; m12=mask0
1484 pavgb t5, mpb_1 ; alpha/4+1
1486 movdqa q2, [r0+2*r1]
1487 DIFF_GT2 p0, q0, t5, t0, t3 ; t0 = |p0-q0| > alpha/4+1
1488 DIFF_GT2 p0, p2, m5, t2, t5, 1 ; mask1 = |p2-p0| > beta-1
1489 DIFF_GT2 q0, q2, m5, t4, t5, 1 ; t4 = |q2-q0| > beta-1
1496 LOAD_MASK r2d, r3d, t5 ; m5=beta-1, t5=alpha-1, m7=mask0
1500 pavgb m4, [pb_1] ; alpha/4+1
1501 DIFF_GT2 p0, q0, m4, m6, m7 ; m6 = |p0-q0| > alpha/4+1
1503 DIFF_GT2 p0, p2, m5, m4, m7, 1 ; m4 = |p2-p0| > beta-1
1506 DIFF_GT2 q0, q2, m5, m4, m7, 1 ; m4 = |q2-q0| > beta-1
1510 LUMA_INTRA_P012 [r4+r5], [r4+2*r1], [r4+r1], [r4]
1512 LUMA_INTRA_P012 [r0], [r0+r1], [r0+2*r1], [r0+r5]
1518 ;-----------------------------------------------------------------------------
1519 ; void deblock_h_luma_intra( uint8_t *pix, intptr_t stride, int alpha, int beta )
1520 ;-----------------------------------------------------------------------------
1521 cglobal deblock_h_luma_intra, 4,9,0,0x80
1526 %define pix_tmp rsp+0x20 ; shadow space
1531 ; transpose 8x16 -> tmp space
1532 TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r1, r8), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
1535 TRANSPOSE8x8_MEM PASS8ROWS(r6, r5, r1, r8), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
1538 lea r0, [pix_tmp+0x40]
1540 call deblock_v_luma_intra
1542 ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
1544 TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r6, r5, r7, r8)
1549 TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r6, r5, r7, r8)
1552 cglobal deblock_h_luma_intra, 2,4,8,0x80
1558 ; transpose 8x16 -> tmp space
1559 TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30)
1562 TRANSPOSE8x8_MEM PASS8ROWS(r0, r2, r1, r3), PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30)
1564 lea r0, [pix_tmp+0x40]
1569 call deblock_%1_luma_intra
1571 add dword [rsp], 8 ; pix_tmp+8
1572 call deblock_%1_luma_intra
1581 ; transpose 16x6 -> original space (but we can't write only 6 pixels, so really 16x8)
1582 TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp, pix_tmp+0x30, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
1585 TRANSPOSE8x8_MEM PASS8ROWS(pix_tmp+8, pix_tmp+0x38, 0x10, 0x30), PASS8ROWS(r0, r2, r1, r3)
1587 %endif ; ARCH_X86_64
1588 %endmacro ; DEBLOCK_LUMA_INTRA
1591 DEBLOCK_LUMA_INTRA v
1593 DEBLOCK_LUMA_INTRA v
1594 %if ARCH_X86_64 == 0
1596 DEBLOCK_LUMA_INTRA v8
1598 %endif ; !HIGH_BIT_DEPTH
1601 ; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
1602 ; out: %1=p0', %2=q0'
1603 %macro CHROMA_DEBLOCK_P0_Q0_INTRA 7
1623 %macro CHROMA_H_LOAD 0-1
1624 movq m0, [r0-8] ; p1 p1 p0 p0
1625 movq m2, [r0] ; q0 q0 q1 q1
1631 punpckldq m0, m5 ; p1
1632 punpckhdq m1, m5 ; p0
1633 punpckldq m2, m7 ; q0
1634 punpckhdq m3, m7 ; q1
1636 movq m4, [r0+r1*2-8]
1640 punpckldq m0, m5 ; p1 ... p0 ...
1641 punpckldq m2, m7 ; q0 ... q1 ...
1644 punpckhqdq m1, m0, m4 ; p0
1645 punpcklqdq m0, m4 ; p1
1646 punpckhqdq m3, m2, m6 ; q1
1647 punpcklqdq m2, m6 ; q0
1651 %macro CHROMA_V_LOAD 1
1653 mova m1, [r0+r1] ; p0
1655 mova m3, [%1+r1] ; q1
1658 ; clobbers: m1, m2, m3
1659 %macro CHROMA_H_STORE 0-1
1660 SBUTTERFLY dq, 1, 2, 3
1666 movq [r0+r1*2-4], m2
1667 movhps [r0+r1-4], m1
1668 movhps [r0+%1-4], m2
1672 %macro CHROMA_V_STORE 0
1677 %macro DEBLOCK_CHROMA 0
1678 cglobal deblock_inter_body
1679 LOAD_AB m4, m5, r2d, r3d
1680 LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
1685 DEBLOCK_P0_Q0 m1, m2, m0, m3, m7, m5, m6
1688 ;-----------------------------------------------------------------------------
1689 ; void deblock_v_chroma( uint16_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
1690 ;-----------------------------------------------------------------------------
1691 cglobal deblock_v_chroma, 5,7,8
1699 call deblock_inter_body
1708 ;-----------------------------------------------------------------------------
1709 ; void deblock_h_chroma( uint16_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
1710 ;-----------------------------------------------------------------------------
1711 cglobal deblock_h_chroma, 5,7,8
1719 call deblock_inter_body
1721 lea r0, [r0+r1*(mmsize/4)]
1728 cglobal deblock_intra_body
1729 LOAD_AB m4, m5, r2d, r3d
1730 LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
1731 CHROMA_DEBLOCK_P0_Q0_INTRA m1, m2, m0, m3, m7, m5, m6
1734 ;-----------------------------------------------------------------------------
1735 ; void deblock_v_chroma_intra( uint16_t *pix, intptr_t stride, int alpha, int beta )
1736 ;-----------------------------------------------------------------------------
1737 cglobal deblock_v_chroma_intra, 4,6,8
1747 call deblock_intra_body
1755 ;-----------------------------------------------------------------------------
1756 ; void deblock_h_chroma_intra( uint16_t *pix, intptr_t stride, int alpha, int beta )
1757 ;-----------------------------------------------------------------------------
1758 cglobal deblock_h_chroma_intra, 4,6,8
1766 call deblock_intra_body
1768 lea r0, [r0+r1*(mmsize/4)]
1773 ;-----------------------------------------------------------------------------
1774 ; void deblock_h_chroma_intra_mbaff( uint16_t *pix, intptr_t stride, int alpha, int beta )
1775 ;-----------------------------------------------------------------------------
1776 cglobal deblock_h_chroma_intra_mbaff, 4,6,8
1785 LOAD_AB m4, m5, r2d, r3d
1786 LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
1787 CHROMA_DEBLOCK_P0_Q0_INTRA m1, m2, m0, m3, m7, m5, m6
1790 lea r0, [r0+r1*(mmsize/4)]
1796 ;-----------------------------------------------------------------------------
1797 ; void deblock_h_chroma_mbaff( uint16_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
1798 ;-----------------------------------------------------------------------------
1799 cglobal deblock_h_chroma_mbaff, 5,7,8
1807 LOAD_AB m4, m5, r2d, r3d
1808 LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
1814 DEBLOCK_P0_Q0 m1, m2, m0, m3, m7, m5, m6
1817 lea r0, [r0+r1*(mmsize/4)]
1824 ;-----------------------------------------------------------------------------
1825 ; void deblock_h_chroma_422_intra( uint16_t *pix, intptr_t stride, int alpha, int beta )
1826 ;-----------------------------------------------------------------------------
1827 cglobal deblock_h_chroma_422_intra, 4,6,8
1835 call deblock_intra_body
1837 lea r0, [r0+r1*(mmsize/4)]
1842 ;-----------------------------------------------------------------------------
1843 ; void deblock_h_chroma_422( uint16_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
1844 ;-----------------------------------------------------------------------------
1845 cglobal deblock_h_chroma_422, 5,7,8
1851 LOAD_AB m4, m5, r2m, r3d
1852 LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
1859 DEBLOCK_P0_Q0 m1, m2, m0, m3, m7, m5, m6
1861 lea r0, [r0+r1*(mmsize/4)]
1867 add r4, r2 ; increment once every 2 iterations
1872 %endmacro ; DEBLOCK_CHROMA
1874 %if ARCH_X86_64 == 0
1882 %endif ; HIGH_BIT_DEPTH
1884 %if HIGH_BIT_DEPTH == 0
1885 %macro CHROMA_V_START 0
1895 %macro CHROMA_H_START 0
1902 %macro CHROMA_V_LOOP 1
1914 %macro CHROMA_H_LOOP 1
1929 %macro DEBLOCK_CHROMA 0
1930 cglobal chroma_inter_body
1939 ;-----------------------------------------------------------------------------
1940 ; void deblock_v_chroma( uint8_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
1941 ;-----------------------------------------------------------------------------
1942 cglobal deblock_v_chroma, 5,6,8
1948 call chroma_inter_body
1954 ;-----------------------------------------------------------------------------
1955 ; void deblock_h_chroma( uint8_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
1956 ;-----------------------------------------------------------------------------
1957 cglobal deblock_h_chroma, 5,7,8
1963 TRANSPOSE4x8W_LOAD PASS8ROWS(t5, r0, r1, t6)
1964 call chroma_inter_body
1965 TRANSPOSE8x2W_STORE PASS8ROWS(t5, r0, r1, t6, 2)
1968 %endmacro ; DEBLOCK_CHROMA
1974 %if ARCH_X86_64 == 0
1979 ;-----------------------------------------------------------------------------
1980 ; void deblock_h_chroma_mbaff( uint8_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
1981 ;-----------------------------------------------------------------------------
1982 %macro DEBLOCK_H_CHROMA_420_MBAFF 0
1983 cglobal deblock_h_chroma_mbaff, 5,7,8
1988 TRANSPOSE4x8W_LOAD PASS8ROWS(t5, r0, r1, t6)
1994 TRANSPOSE8x2W_STORE PASS8ROWS(t5, r0, r1, t6, 2)
1999 DEBLOCK_H_CHROMA_420_MBAFF
2000 %if ARCH_X86_64 == 0
2002 DEBLOCK_H_CHROMA_420_MBAFF
2005 %macro DEBLOCK_H_CHROMA_422 0
2006 cglobal deblock_h_chroma_422, 5,8,8
2010 %define cntr dword r0m
2015 TRANSPOSE4x8W_LOAD PASS8ROWS(t5, r0, r1, t6)
2023 pshufw m6, m6, q0000
2027 TRANSPOSE8x2W_STORE PASS8ROWS(t5, r0, r1, t6, 2)
2028 lea r0, [r0+r1*(mmsize/2)]
2029 lea t5, [t5+r1*(mmsize/2)]
2037 DEBLOCK_H_CHROMA_422
2039 DEBLOCK_H_CHROMA_422
2041 DEBLOCK_H_CHROMA_422
2043 ; in: %1=p0 %2=p1 %3=q1
2044 ; out: p0 = (p0 + q1 + 2*p1 + 2) >> 2
2045 %macro CHROMA_INTRA_P0 3
2047 pand m4, [pb_1] ; m4 = (p0^q1)&1
2050 pavgb %1, %2 ; dst = avg(p1, avg(p0,q1) - ((p0^q1)&1))
2056 %macro DEBLOCK_CHROMA_INTRA_BODY 0
2057 cglobal chroma_intra_body
2061 CHROMA_INTRA_P0 m1, m0, m3
2062 CHROMA_INTRA_P0 m2, m3, m0
2072 %macro DEBLOCK_CHROMA_INTRA 0
2073 ;-----------------------------------------------------------------------------
2074 ; void deblock_v_chroma_intra( uint8_t *pix, intptr_t stride, int alpha, int beta )
2075 ;-----------------------------------------------------------------------------
2076 cglobal deblock_v_chroma_intra, 4,5,8
2082 call chroma_intra_body
2088 ;-----------------------------------------------------------------------------
2089 ; void deblock_h_chroma_intra( uint8_t *pix, intptr_t stride, int alpha, int beta )
2090 ;-----------------------------------------------------------------------------
2091 cglobal deblock_h_chroma_intra, 4,6,8
2097 TRANSPOSE4x8W_LOAD PASS8ROWS(t5, r0, r1, t6)
2098 call chroma_intra_body
2099 TRANSPOSE8x2W_STORE PASS8ROWS(t5, r0, r1, t6, 2)
2103 cglobal deblock_h_chroma_422_intra, 4,7,8
2107 TRANSPOSE4x8W_LOAD PASS8ROWS(t5, r0, r1, t6)
2108 call chroma_intra_body
2109 TRANSPOSE8x2W_STORE PASS8ROWS(t5, r0, r1, t6, 2)
2110 lea r0, [r0+r1*(mmsize/2)]
2111 lea t5, [t5+r1*(mmsize/2)]
2115 %endmacro ; DEBLOCK_CHROMA_INTRA
2118 DEBLOCK_CHROMA_INTRA_BODY
2119 DEBLOCK_CHROMA_INTRA
2121 DEBLOCK_CHROMA_INTRA_BODY
2122 DEBLOCK_CHROMA_INTRA
2124 DEBLOCK_CHROMA_INTRA_BODY
2125 %if ARCH_X86_64 == 0
2126 DEBLOCK_CHROMA_INTRA
2129 ;-----------------------------------------------------------------------------
2130 ; void deblock_h_chroma_intra_mbaff( uint8_t *pix, intptr_t stride, int alpha, int beta )
2131 ;-----------------------------------------------------------------------------
2133 cglobal deblock_h_chroma_intra_mbaff, 4,6,8
2135 TRANSPOSE4x8W_LOAD PASS8ROWS(t5, r0, r1, t6)
2136 call chroma_intra_body
2137 TRANSPOSE8x2W_STORE PASS8ROWS(t5, r0, r1, t6, 2)
2139 %endif ; !HIGH_BIT_DEPTH
2143 ;-----------------------------------------------------------------------------
2144 ; static void deblock_strength( uint8_t nnz[48], int8_t ref[2][40], int16_t mv[2][40][2],
2145 ; uint8_t bs[2][4][4], int mvy_limit, int bframe )
2146 ;-----------------------------------------------------------------------------
2148 %define scan8start (4+1*8)
2149 %define nnz r0+scan8start
2150 %define ref r1+scan8start
2151 %define mv r2+scan8start*4
2155 %macro LOAD_BYTES_MMX 1
2160 punpckldq m2, [%1+8*1-1]
2161 punpckldq m0, [%1+8*1]
2162 punpckldq m3, [%1+8*3-1]
2163 punpckldq m1, [%1+8*3]
2166 %macro DEBLOCK_STRENGTH_REFS_MMX 0
2177 punpckldq m2, m0 ; row -1, row 0
2178 punpckldq m3, m1 ; row 1, row 2
2187 %macro DEBLOCK_STRENGTH_MVS_MMX 2
2200 %macro DEBLOCK_STRENGTH_NNZ_MMX 1
2207 pminub m4, m6 ; mv ? 1 : 0
2209 paddb m2, m2 ; nnz ? 2 : 0
2215 %macro LOAD_BYTES_XMM 1
2216 movu m2, [%1-4] ; FIXME could be aligned if we changed nnz's allocation
2219 shufps m2, m1, q3131 ; cur nnz, all rows
2221 shufps m0, m1, q3131 ; left neighbors
2223 movd m3, [%1-8] ; could be palignr if nnz was aligned
2224 por m1, m3 ; top neighbors
2228 cglobal deblock_strength, 6,6
2229 ; Prepare mv comparison register
2242 DEBLOCK_STRENGTH_REFS_MMX
2245 DEBLOCK_STRENGTH_MVS_MMX bs0, 4
2246 DEBLOCK_STRENGTH_MVS_MMX bs1, 4*8
2259 DEBLOCK_STRENGTH_NNZ_MMX bs0
2260 ; Transpose column output
2261 SBUTTERFLY bw, 2, 3, 4
2262 SBUTTERFLY bw, 2, 3, 4
2267 punpckldq m2, m0 ; row -1, row 0
2268 punpckldq m3, m1 ; row 1, row 2
2269 DEBLOCK_STRENGTH_NNZ_MMX bs1
2274 %macro DEBLOCK_STRENGTH_XMM 0
2275 cglobal deblock_strength, 6,6,7
2276 ; Prepare mv comparison register
2296 palignr m3, m0, [mv+4*8*0-16], 12
2297 palignr m2, m1, [mv+4*8*1-16], 12
2304 palignr m3, m2, [mv+4*8*2-16], 12
2306 palignr m3, m1, [mv+4*8*3-16], 12
2310 movu m0, [mv-4+4*8*0]
2311 movu m1, [mv-4+4*8*1]
2312 movu m2, [mv-4+4*8*2]
2313 movu m3, [mv-4+4*8*3]
2314 psubw m0, [mv+4*8*0]
2315 psubw m1, [mv+4*8*1]
2316 psubw m2, [mv+4*8*2]
2317 psubw m3, [mv+4*8*3]
2328 mova m0, [mv+4*8*-1]
2329 mova m1, [mv+4*8* 0]
2330 mova m2, [mv+4*8* 1]
2331 mova m3, [mv+4*8* 2]
2335 psubw m3, [mv+4*8* 3]
2356 pminub m4, m6 ; mv ? 1 : 0
2358 paddb m0, m0 ; nnz ? 2 : 0
2363 pshufb m4, [transpose_shuf]
2376 DEBLOCK_STRENGTH_XMM
2378 DEBLOCK_STRENGTH_XMM
2380 DEBLOCK_STRENGTH_XMM
2382 %macro LOAD_BYTES_YMM 1
2383 movu m0, [%1-4] ; ___E FGHI ___J KLMN ___O PQRS ___T UVWX
2384 pshufb m0, [load_bytes_shuf] ; EFGH JKLM FGHI KLMN OPQR TUVW PQRS UVWX
2385 mova m2, [insert_top_shuf]
2386 vpermq m1, m0, q3131 ; FGHI KLMN PQRS UVWX x2
2387 vpermd m0, m2, m0 ; EFGH JKLM OPQR TUVW ____ FGHI KLMN PQRS
2388 vpbroadcastd m2, [%1-8] ; ABCD ....
2389 vpblendd m0, m0, m2, 00010000b ; EFGH JKLM OPQR TUVW ABCD FGHI KLMN PQRS
2393 cglobal deblock_strength, 6,6,7
2394 ; Prepare mv comparison register
2398 vpbroadcastw m6, xm6
2399 pxor m5, m5 ; bs0,bs1
2408 movu xm0, [mv-4+4*8*0]
2409 vinserti128 m0, m0, [mv+4*8*-1], 1
2410 vbroadcasti128 m2, [mv+4*8* 0]
2411 vinserti128 m1, m2, [mv-4+4*8*1], 0
2412 vbroadcasti128 m3, [mv+4*8* 1]
2416 vinserti128 m2, m3, [mv-4+4*8*2], 0
2417 vbroadcasti128 m4, [mv+4*8* 2]
2418 vinserti128 m3, m4, [mv-4+4*8*3], 0
2420 vbroadcasti128 m4, [mv+4*8* 3]
2441 pminub m5, m6 ; mv ? 1 : 0
2442 paddb m0, m0 ; nnz ? 2 : 0
2444 vextracti128 [bs1], m5, 1
2445 pshufb xm5, [transpose_shuf]