X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fx86%2Fhevc_deblock.asm;h=3c69b5d92e8eddd58245cb5e49ae3551e8f17469;hb=dc69247de421503efd289dceb737cfb2a3cf7d6d;hp=c035668ed3887308b655072c58aeae744ec3e5ea;hpb=776647360db37628ccfcaf9a624544b2be45eb7a;p=ffmpeg diff --git a/libavcodec/x86/hevc_deblock.asm b/libavcodec/x86/hevc_deblock.asm index c035668ed38..3c69b5d92e8 100644 --- a/libavcodec/x86/hevc_deblock.asm +++ b/libavcodec/x86/hevc_deblock.asm @@ -329,8 +329,10 @@ ALIGN 16 paddw m5, [pw_4]; +4 psraw m5, 3; >> 3 +%if %1 > 8 psllw m4, %1-8; << (BIT_DEPTH - 8) psllw m6, %1-8; << (BIT_DEPTH - 8) +%endif pmaxsw m5, m4 pminsw m5, m6 paddw m1, m5; p0 + delta0 @@ -351,12 +353,16 @@ ALIGN 16 ;beta calculations mov r11, [betaq]; +%if %1 > 8 shl r11, %1 - 8 +%endif movd m13, r11d; beta0 add betaq, 4; punpcklwd m13, m13 mov r12, [betaq]; +%if %1 > 8 shl r12, %1 - 8 +%endif movd m14, r12d; beta1 punpcklwd m14, m14 pshufd m13, m14, 0; beta0, beta1 @@ -376,7 +382,7 @@ ALIGN 16 pcmpgtw m15, m13, m14; beta0, beta1 movmskps r13, m15 ;filtering mask 0d0 + 0d3 < beta0 (bit 2 or 3) , 1d0 + 1d3 < beta1 (bit 0 or 1) cmp r13, 0 - je .bypassluma + je .bypassluma ;weak / strong decision compare to beta_2 psraw m15, m13, 2; beta >> 2 @@ -420,14 +426,18 @@ ALIGN 16 ;decide between strong and weak filtering ;tc25 calculations mov r2d, [tcq]; +%if %1 > 8 shl r2, %1 - 8 +%endif movd m8, r2d; tc0 add tcq, 4; mov r3d, [tcq]; +%if %1 > 8 shl r3, %1 - 8 +%endif movd m9, r3d; tc1 add r2d, r3d; tc0 + tc1 - jz .bypassluma + jz .bypassluma punpcklwd m8, m8 punpcklwd m9, m9 shufps m8, m9, 0; tc0, tc1 @@ -469,7 +479,7 @@ ALIGN 16 and r14, r2; strong mask, bits 2 and 0 pmullw m14, m9, [pw_m2]; -tc * 2 - psllw m9, 1; tc * 2 + paddw m9, m9 and r14, 5; 0b101 mov r2, r14; strong mask @@ -489,7 +499,7 @@ ALIGN 16 paddw m12, m2, m3; p1 + p0 paddw m12, m4; p1 + p0 + q0 mova m10, m12; copy - psllw m12, 1; 2*p1 + 2*p0 + 2*q0 + paddw m12, m12; 2*p1 + 2*p0 + 2*q0 paddw m12, m1; p2 + 2*p1 + 2*p0 + 2*q0 paddw m12, m5; p2 + 2*p1 + 2*p0 + 2*q0 + q1 paddw m12, m13; p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 @@ -509,10 +519,10 @@ ALIGN 16 paddw m15, m2; p1' paddw m8, m1, m0; p3 + p2 - psllw m8, 1; 2*p3 + 2*p2 + paddw m8, m8; 2*p3 + 2*p2 paddw m8, m1; 2*p3 + 3*p2 paddw m8, m10; 2*p3 + 3*p2 + p1 + p0 + q0 - psllw m13, 1; 4 in every cell + paddw m13, m13 paddw m8, m13; 2*p3 + 3*p2 + p1 + p0 + q0 + 4 psraw m8, 3; (2*p3 + 3*p2 + p1 + p0 + q0 + 4) >> 3 psubw m8, m1; ((2*p3 + 3*p2 + p1 + p0 + q0 + 4) >> 3) - p2 @@ -523,7 +533,7 @@ ALIGN 16 paddw m8, m3, m4; p0 + q0 paddw m8, m5; p0 + q0 + q1 - psllw m8, 1; 2*p0 + 2*q0 + 2*q1 + paddw m8, m8; 2*p0 + 2*q0 + 2*q1 paddw m8, m2; p1 + 2*p0 + 2*q0 + 2*q1 paddw m8, m6; p1 + 2*p0 + 2*q0 + 2*q1 + q2 paddw m8, m13; p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 @@ -548,8 +558,8 @@ ALIGN 16 paddw m13, m7; q3 + 2 paddw m13, m6; q3 + q2 + 2 - psllw m13, 1; 2*q3 + 2*q2 + 4 - paddw m13, m6; 2*q3 + 3*q2 + 4 + paddw m13, m13; 2*q3 + 2*q2 + 4 + paddw m13, m6; 2*q3 + 3*q2 + 4 paddw m13, m10; 2*q3 + 3*q2 + q1 + q0 + p0 + 4 psraw m13, 3; (2*q3 + 3*q2 + q1 + q0 + p0 + 4) >> 3 psubw m13, m6; ((2*q3 + 3*q2 + q1 + q0 + p0 + 4) >> 3) - q2 @@ -565,7 +575,7 @@ ALIGN 16 .weakfilter: not r14; strong mask -> weak mask and r14, r13; final weak filtering mask, bits 0 and 1 - jz .store + jz .store ; weak filtering mask mov r2, r14 @@ -734,11 +744,11 @@ cglobal hevc_h_loop_filter_chroma_10, 3, 4, 7, pix, stride, tc, pix0 ;----------------------------------------------------------------------------- cglobal hevc_v_loop_filter_luma_8, 4, 15, 16, pix, stride, beta, tc sub r0, 4 - lea r5, [3*r1] + lea r5, [3 * r1] mov r6, r0 add r0, r5 TRANSPOSE8x8B_LOAD PASS8ROWS(r6, r0, r1, r5) - LUMA_DEBLOCK_BODY 8, v + LUMA_DEBLOCK_BODY 8, v .store: TRANSPOSE8x8B_STORE PASS8ROWS(r6, r0, r1, r5) .bypassluma: @@ -746,11 +756,11 @@ cglobal hevc_v_loop_filter_luma_8, 4, 15, 16, pix, stride, beta, tc cglobal hevc_v_loop_filter_luma_10, 4, 15, 16, pix, stride, beta, tc sub pixq, 8 - lea r5, [3*strideq] + lea r5, [3 * strideq] mov r6, pixq add pixq, r5 TRANSPOSE8x8W_LOAD PASS8ROWS(r6, pixq, strideq, r5) - LUMA_DEBLOCK_BODY 10, v + LUMA_DEBLOCK_BODY 10, v .store: TRANSPOSE8x8W_STORE PASS8ROWS(r6, r0, r1, r5) .bypassluma: @@ -760,18 +770,18 @@ cglobal hevc_v_loop_filter_luma_10, 4, 15, 16, pix, stride, beta, tc ; void ff_hevc_h_loop_filter_luma(uint8_t *_pix, ptrdiff_t _stride, int *_beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q); ;----------------------------------------------------------------------------- cglobal hevc_h_loop_filter_luma_8, 4, 15, 16, pix, stride, beta, tc, count, pix0, src3stride - lea src3strideq, [3*strideq] + lea src3strideq, [3 * strideq] mov pix0q, pixq sub pix0q, src3strideq sub pix0q, strideq - movdqu m0, [pix0q]; p3 - movdqu m1, [pix0q+strideq]; p2 - movdqu m2, [pix0q+2*strideq]; p1 - movdqu m3, [pix0q+src3strideq]; p0 - movdqu m4, [pixq]; q0 - movdqu m5, [pixq+strideq]; q1 - movdqu m6, [pixq+2*strideq]; q2 - movdqu m7, [pixq+src3strideq]; q3 + movdqu m0, [pix0q]; p3 + movdqu m1, [pix0q + strideq]; p2 + movdqu m2, [pix0q + 2 * strideq]; p1 + movdqu m3, [pix0q + src3strideq]; p0 + movdqu m4, [pixq]; q0 + movdqu m5, [pixq + strideq]; q1 + movdqu m6, [pixq + 2 * strideq]; q2 + movdqu m7, [pixq + src3strideq]; q3 pxor m8, m8 punpcklbw m0, m8 punpcklbw m1, m8 @@ -781,7 +791,7 @@ cglobal hevc_h_loop_filter_luma_8, 4, 15, 16, pix, stride, beta, tc, count, pix0 punpcklbw m5, m8 punpcklbw m6, m8 punpcklbw m7, m8 - LUMA_DEBLOCK_BODY 8, h + LUMA_DEBLOCK_BODY 8, h .store: packuswb m1, m1; p2 packuswb m2, m2; p1 @@ -799,33 +809,33 @@ cglobal hevc_h_loop_filter_luma_8, 4, 15, 16, pix, stride, beta, tc, count, pix0 RET cglobal hevc_h_loop_filter_luma_10, 4, 15, 16, pix, stride, beta, tc, count, pix0, src3stride - lea src3strideq, [3*strideq] - mov pix0q, pixq - sub pix0q, src3strideq - sub pix0q, strideq - movdqu m0, [pix0q]; p3 - movdqu m1, [pix0q+strideq]; p2 - movdqu m2, [pix0q+2*strideq]; p1 - movdqu m3, [pix0q+src3strideq]; p0 - movdqu m4, [pixq]; q0 - movdqu m5, [pixq+strideq]; q1 - movdqu m6, [pixq+2*strideq]; q2 - movdqu m7, [pixq+src3strideq]; q3 - LUMA_DEBLOCK_BODY 10, h + lea src3strideq, [3 * strideq] + mov pix0q, pixq + sub pix0q, src3strideq + sub pix0q, strideq + movdqu m0, [pix0q]; p3 + movdqu m1, [pix0q + strideq]; p2 + movdqu m2, [pix0q + 2 * strideq]; p1 + movdqu m3, [pix0q + src3strideq]; p0 + movdqu m4, [pixq]; q0 + movdqu m5, [pixq + strideq]; q1 + movdqu m6, [pixq + 2 * strideq]; q2 + movdqu m7, [pixq + src3strideq]; q3 + LUMA_DEBLOCK_BODY 10, h .store: - pxor m8, m8; zeros reg - CLIPW m1, m8, [pw_pixel_max] - CLIPW m2, m8, [pw_pixel_max] - CLIPW m3, m8, [pw_pixel_max] - CLIPW m4, m8, [pw_pixel_max] - CLIPW m5, m8, [pw_pixel_max] - CLIPW m6, m8, [pw_pixel_max] - movdqu [pix0q+strideq], m1; p2 - movdqu [pix0q+2*strideq], m2; p1 - movdqu [pix0q+src3strideq], m3; p0 - movdqu [pixq], m4; q0 - movdqu [pixq+strideq], m5; q1 - movdqu [pixq+2*strideq], m6; q2 + pxor m8, m8; zeros reg + CLIPW m1, m8, [pw_pixel_max] + CLIPW m2, m8, [pw_pixel_max] + CLIPW m3, m8, [pw_pixel_max] + CLIPW m4, m8, [pw_pixel_max] + CLIPW m5, m8, [pw_pixel_max] + CLIPW m6, m8, [pw_pixel_max] + movdqu [pix0q + strideq], m1; p2 + movdqu [pix0q + 2 * strideq], m2; p1 + movdqu [pix0q + src3strideq], m3; p0 + movdqu [pixq ], m4; q0 + movdqu [pixq + strideq], m5; q1 + movdqu [pixq + 2 * strideq], m6; q2 .bypassluma: RET %endmacro