1 ;*****************************************************************************
2 ;* SSE2-optimized HEVC deblocking code
3 ;*****************************************************************************
4 ;* Copyright (C) 2013 VTT
6 ;* Authors: Seppo Tomperi <seppo.tomperi@vtt.fi>
8 ;* This file is part of FFmpeg.
10 ;* FFmpeg is free software; you can redistribute it and/or
11 ;* modify it under the terms of the GNU Lesser General Public
12 ;* License as published by the Free Software Foundation; either
13 ;* version 2.1 of the License, or (at your option) any later version.
15 ;* FFmpeg is distributed in the hope that it will be useful,
16 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
17 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 ;* Lesser General Public License for more details.
20 ;* You should have received a copy of the GNU Lesser General Public
21 ;* License along with FFmpeg; if not, write to the Free Software
22 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 ;******************************************************************************
25 %include "libavutil/x86/x86util.asm"
30 %define pw_pixel_max_10 pw_1023
31 pw_pixel_max_12: times 8 dw ((1 << 12)-1)
42 ; in: 8 rows of 4 bytes in %4..%11
43 ; out: 4 rows of 8 words in m0..m3
44 %macro TRANSPOSE4x8B_LOAD 8
73 ; in: 4 rows of 8 words in m0..m3
74 ; out: 8 rows of 4 bytes in %1..%8
75 %macro TRANSPOSE8x4B_STORE 8
78 SBUTTERFLY bw, 0, 1, 2
79 SBUTTERFLY wd, 0, 1, 2
98 ; in: 8 rows of 4 words in %4..%11
99 ; out: 4 rows of 8 words in m0..m3
100 %macro TRANSPOSE4x8W_LOAD 8
121 punpckhqdq m1, m0, m4
123 punpckhqdq m3, m2, m6
128 ; in: 4 rows of 8 words in m0..m3
129 ; out: 8 rows of 4 words in %1..%8
130 %macro TRANSPOSE8x4W_STORE 9
131 TRANSPOSE4x4W 0, 1, 2, 3, 4
133 pxor m5, m5; zeros reg
149 ; in: 8 rows of 8 bytes in %1..%8
150 ; out: 8 rows of 8 words in m0..m7
151 %macro TRANSPOSE8x8B_LOAD 8
172 punpckldq m1, m3, m9; 0, 1
173 punpckhdq m3, m9; 2, 3
175 punpckldq m5, m7, m4; 4, 5
176 punpckhdq m7, m4; 6, 7
180 punpcklbw m0, m1, m13; 0 in 16 bit
181 punpckhbw m1, m13; 1 in 16 bit
183 punpcklbw m2, m3, m13; 2
186 punpcklbw m4, m5, m13; 4
189 punpcklbw m6, m7, m13; 6
194 ; in: 8 rows of 8 words in m0..m8
195 ; out: 8 rows of 8 bytes in %1..%8
196 %macro TRANSPOSE8x8B_STORE 8
201 TRANSPOSE2x4x4B 0, 1, 2, 3, 4
213 ; in: 8 rows of 8 words in %1..%8
214 ; out: 8 rows of 8 words in m0..m7
215 %macro TRANSPOSE8x8W_LOAD 8
224 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
227 ; in: 8 rows of 8 words in m0..m8
228 ; out: 8 rows of 8 words in %1..%8
229 %macro TRANSPOSE8x8W_STORE 9
230 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
258 pand %2, m11 ; and mask
259 pandn m10, m11, %1; and -mask
266 ; mask in %3, will be clobbered
267 %macro MASKED_COPY2 3
268 pand %2, %3 ; and mask
269 pandn %3, %1; and -mask
275 ; input in m0 ... m3 and tcs in r2. Output in m1 and m2
276 %macro CHROMA_DEBLOCK_BODY 1
277 psubw m4, m2, m1; q0 - p0
278 psubw m5, m0, m3; p1 - q1
285 pshufd m6, m6, 0xA0; tc0, tc1
287 psignw m4, m6, [pw_m1]; -tc0, -tc1
289 pmullw m4, m6, [pw_m1]; -tc0, -tc1
297 psllw m4, %1-8; << (BIT_DEPTH - 8)
298 psllw m6, %1-8; << (BIT_DEPTH - 8)
302 paddw m1, m5; p0 + delta0
303 psubw m2, m5; q0 - delta0
306 ; input in m0 ... m7, beta in r2 tcs in r3. Output in m1...m6
307 %macro LUMA_DEBLOCK_BODY 2
311 ABS1 m10, m11 ; 0dp0, 0dp3 , 1dp0, 1dp3
316 ABS1 m11, m13 ; 0dq0, 0dq3 , 1dq0, 1dq3
324 ;end beta calculations
326 paddw m9, m10, m11; 0d0, 0d3 , 1d0, 1d3
328 pshufhw m14, m9, 0x0f ;0b00001111; 0d3 0d3 0d0 0d0 in high
329 pshuflw m14, m14, 0x0f ;0b00001111; 1d3 1d3 1d0 1d0 in low
331 pshufhw m9, m9, 0xf0 ;0b11110000; 0d0 0d0 0d3 0d3
332 pshuflw m9, m9, 0xf0 ;0b11110000; 1d0 1d0 1d3 1d3
334 paddw m14, m9; 0d0+0d3, 1d0+1d3
337 pcmpgtw m15, m13, m14
338 movmskps r13, m15 ;filtering mask 0d0 + 0d3 < beta0 (bit 2 or 3) , 1d0 + 1d3 < beta1 (bit 0 or 1)
342 ;weak / strong decision compare to beta_2
343 psraw m15, m13, 2; beta >> 2
345 pcmpgtw m15, m8; (d0 << 1) < beta_2, (d3 << 1) < beta_2
347 ;end weak / strong decision
349 ; weak filter nd_p/q calculation
363 ; end calc for weak filter
375 pcmpeqd m11, [pd_1]; filtering mask
377 ;decide between strong and weak filtering
388 add r11d, r3d; tc0 + tc1
393 shufps m8, m9, 0; tc0, tc1
396 pavgw m8, m9; tc25 = ((tc * 5 + 1) >> 1)
397 ;end tc25 calculations
399 ;----beta_3 comparison-----
400 psubw m12, m0, m3; p3 - p0
401 ABS1 m12, m14; abs(p3 - p0)
403 psubw m15, m7, m4; q3 - q0
404 ABS1 m15, m14; abs(q3 - q0)
406 paddw m12, m15; abs(p3 - p0) + abs(q3 - q0)
408 pshufhw m12, m12, 0xf0 ;0b11110000;
409 pshuflw m12, m12, 0xf0 ;0b11110000;
411 psraw m13, 3; beta >> 3
414 and r6, r11; strong mask , beta_2 and beta_3 comparisons
415 ;----beta_3 comparison end-----
416 ;----tc25 comparison---
417 psubw m12, m3, m4; p0 - q0
418 ABS1 m12, m14; abs(p0 - q0)
420 pshufhw m12, m12, 0xf0 ;0b11110000;
421 pshuflw m12, m12, 0xf0 ;0b11110000;
423 pcmpgtw m8, m12; tc25 comparisons
425 and r6, r11; strong mask, beta_2, beta_3 and tc25 comparisons
426 ;----tc25 comparison end---
429 and r6, r11; strong mask, bits 2 and 0
431 pmullw m14, m9, [pw_m2]; -tc * 2
435 mov r11, r6; strong mask
437 movd m12, r6d; store to xmm for mask generation
440 movd m10, r11d; store to xmm for mask generation
441 or r6, r11; final strong mask, bits 1 and 0
445 pcmpeqd m10, [pd_1]; strong mask
447 mova m13, [pw_4]; 4 in every cell
448 pand m11, m10; combine filtering mask and strong mask
449 paddw m12, m2, m3; p1 + p0
450 paddw m12, m4; p1 + p0 + q0
452 paddw m12, m12; 2*p1 + 2*p0 + 2*q0
453 paddw m12, m1; p2 + 2*p1 + 2*p0 + 2*q0
454 paddw m12, m5; p2 + 2*p1 + 2*p0 + 2*q0 + q1
455 paddw m12, m13; p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4
456 psraw m12, 3; ((p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4) >> 3)
457 psubw m12, m3; ((p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4) >> 3) - p0
459 pminsw m12, m9; av_clip( , -2 * tc, 2 * tc)
462 paddw m15, m1, m10; p2 + p1 + p0 + q0
463 psrlw m13, 1; 2 in every cell
464 paddw m15, m13; p2 + p1 + p0 + q0 + 2
465 psraw m15, 2; (p2 + p1 + p0 + q0 + 2) >> 2
466 psubw m15, m2;((p2 + p1 + p0 + q0 + 2) >> 2) - p1
468 pminsw m15, m9; av_clip( , -2 * tc, 2 * tc)
471 paddw m8, m1, m0; p3 + p2
472 paddw m8, m8; 2*p3 + 2*p2
473 paddw m8, m1; 2*p3 + 3*p2
474 paddw m8, m10; 2*p3 + 3*p2 + p1 + p0 + q0
476 paddw m8, m13; 2*p3 + 3*p2 + p1 + p0 + q0 + 4
477 psraw m8, 3; (2*p3 + 3*p2 + p1 + p0 + q0 + 4) >> 3
478 psubw m8, m1; ((2*p3 + 3*p2 + p1 + p0 + q0 + 4) >> 3) - p2
480 pminsw m8, m9; av_clip( , -2 * tc, 2 * tc)
484 paddw m8, m3, m4; p0 + q0
485 paddw m8, m5; p0 + q0 + q1
486 paddw m8, m8; 2*p0 + 2*q0 + 2*q1
487 paddw m8, m2; p1 + 2*p0 + 2*q0 + 2*q1
488 paddw m8, m6; p1 + 2*p0 + 2*q0 + 2*q1 + q2
489 paddw m8, m13; p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4
490 psraw m8, 3; (p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4) >>3
493 pminsw m8, m9; av_clip( , -2 * tc, 2 * tc)
497 paddw m15, m3, m4; p0 + q0
498 paddw m15, m5; p0 + q0 + q1
500 paddw m15, m6; p0 + q0 + q1 + q2
501 psrlw m13, 1; 2 in every cell
502 paddw m15, m13; p0 + q0 + q1 + q2 + 2
503 psraw m15, 2; (p0 + q0 + q1 + q2 + 2) >> 2
504 psubw m15, m5; ((p0 + q0 + q1 + q2 + 2) >> 2) - q1
506 pminsw m15, m9; av_clip( , -2 * tc, 2 * tc)
509 paddw m13, m7; q3 + 2
510 paddw m13, m6; q3 + q2 + 2
511 paddw m13, m13; 2*q3 + 2*q2 + 4
512 paddw m13, m6; 2*q3 + 3*q2 + 4
513 paddw m13, m10; 2*q3 + 3*q2 + q1 + q0 + p0 + 4
514 psraw m13, 3; (2*q3 + 3*q2 + q1 + q0 + p0 + 4) >> 3
515 psubw m13, m6; ((2*q3 + 3*q2 + q1 + q0 + p0 + 4) >> 3) - q2
517 pminsw m13, m9; av_clip( , -2 * tc, 2 * tc)
526 not r6; strong mask -> weak mask
527 and r6, r13; final weak filtering mask, bits 0 and 1
530 ; weak filtering mask
537 pcmpeqd m11, [pd_1]; filtering mask
542 shr betaq, 3; ((beta + (beta >> 1)) >> 3))
545 psubw m12, m4, m3 ; q0 - p0
546 psllw m10, m12, 3; 8 * (q0 - p0)
547 paddw m12, m10 ; 9 * (q0 - p0)
549 psubw m10, m5, m2 ; q1 - p1
550 psllw m8, m10, 1; 2 * ( q1 - p1 )
551 paddw m10, m8; 3 * ( q1 - p1 )
552 psubw m12, m10; 9 * (q0 - p0) - 3 * ( q1 - p1 )
554 psraw m12, 4; >> 4 , delta0
555 PABSW m13, m12; abs(delta0)
558 psllw m10, m9, 2; 8 * tc
559 paddw m10, m9; 10 * tc
563 psraw m9, 1; tc * 2 -> tc
564 psraw m14, 1; -tc * 2 -> -tc
567 pminsw m12, m9; av_clip(delta0, -tc, tc)
569 psraw m9, 1; tc -> tc / 2
571 psignw m14, m9, [pw_m1]; -tc / 2
573 pmullw m14, m9, [pw_m1]; -tc / 2
576 pavgw m15, m1, m3; (p2 + p0 + 1) >> 1
577 psubw m15, m2; ((p2 + p0 + 1) >> 1) - p1
578 paddw m15, m12; ((p2 + p0 + 1) >> 1) - p1 + delta0
579 psraw m15, 1; (((p2 + p0 + 1) >> 1) - p1 + delta0) >> 1
581 pminsw m15, m9; av_clip(deltap1, -tc/2, tc/2)
588 movd m13, r7d; 1dp0 + 1dp3
589 movd m8, r8d; 0dp0 + 0dp3
595 ;end beta calculations
596 MASKED_COPY2 m2, m15, m8; write p1'
598 pavgw m8, m6, m4; (q2 + q0 + 1) >> 1
599 psubw m8, m5; ((q2 + q0 + 1) >> 1) - q1
600 psubw m8, m12; ((q2 + q0 + 1) >> 1) - q1 - delta0)
601 psraw m8, 1; ((q2 + q0 + 1) >> 1) - q1 - delta0) >> 1
603 pminsw m8, m9; av_clip(deltaq1, -tc/2, tc/2)
610 shufps m13, m15, 0; dq0 + dq3
612 pcmpgtw m10, m13; compare to ((beta+(beta>>1))>>3)
614 MASKED_COPY2 m5, m8, m10; write q1'
616 paddw m15, m3, m12 ; p0 + delta0
619 psubw m8, m4, m12 ; q0 - delta0
623 ;-----------------------------------------------------------------------------
624 ; void ff_hevc_v_loop_filter_chroma(uint8_t *_pix, ptrdiff_t _stride, int32_t *tc,
625 ; uint8_t *_no_p, uint8_t *_no_q);
626 ;-----------------------------------------------------------------------------
627 %macro LOOP_FILTER_CHROMA 0
628 cglobal hevc_v_loop_filter_chroma_8, 3, 5, 7, pix, stride, tc, pix0, r3stride
630 lea r3strideq, [3*strideq]
633 TRANSPOSE4x8B_LOAD PASS8ROWS(pix0q, pixq, strideq, r3strideq)
634 CHROMA_DEBLOCK_BODY 8
635 TRANSPOSE8x4B_STORE PASS8ROWS(pix0q, pixq, strideq, r3strideq)
638 cglobal hevc_v_loop_filter_chroma_10, 3, 5, 7, pix, stride, tc, pix0, r3stride
640 lea r3strideq, [3*strideq]
643 TRANSPOSE4x8W_LOAD PASS8ROWS(pix0q, pixq, strideq, r3strideq)
644 CHROMA_DEBLOCK_BODY 10
645 TRANSPOSE8x4W_STORE PASS8ROWS(pix0q, pixq, strideq, r3strideq), [pw_pixel_max_10]
648 cglobal hevc_v_loop_filter_chroma_12, 3, 5, 7, pix, stride, tc, pix0, r3stride
650 lea r3strideq, [3*strideq]
653 TRANSPOSE4x8W_LOAD PASS8ROWS(pix0q, pixq, strideq, r3strideq)
654 CHROMA_DEBLOCK_BODY 12
655 TRANSPOSE8x4W_STORE PASS8ROWS(pix0q, pixq, strideq, r3strideq), [pw_pixel_max_12]
658 ;-----------------------------------------------------------------------------
659 ; void ff_hevc_h_loop_filter_chroma(uint8_t *_pix, ptrdiff_t _stride, int32_t *tc,
660 ; uint8_t *_no_p, uint8_t *_no_q);
661 ;-----------------------------------------------------------------------------
662 cglobal hevc_h_loop_filter_chroma_8, 3, 4, 7, pix, stride, tc, pix0
667 movq m1, [pix0q+strideq]; p0
669 movq m3, [pixq+strideq]; q1
670 pxor m5, m5; zeros reg
675 CHROMA_DEBLOCK_BODY 8
677 movh[pix0q+strideq], m1
681 cglobal hevc_h_loop_filter_chroma_10, 3, 4, 7, pix, stride, tc, pix0
686 movu m1, [pix0q+strideq]; p0
688 movu m3, [pixq+strideq]; q1
689 CHROMA_DEBLOCK_BODY 10
690 pxor m5, m5; zeros reg
691 CLIPW m1, m5, [pw_pixel_max_10]
692 CLIPW m2, m5, [pw_pixel_max_10]
693 movu [pix0q+strideq], m1
697 cglobal hevc_h_loop_filter_chroma_12, 3, 4, 7, pix, stride, tc, pix0
702 movu m1, [pix0q+strideq]; p0
704 movu m3, [pixq+strideq]; q1
705 CHROMA_DEBLOCK_BODY 12
706 pxor m5, m5; zeros reg
707 CLIPW m1, m5, [pw_pixel_max_12]
708 CLIPW m2, m5, [pw_pixel_max_12]
709 movu [pix0q+strideq], m1
720 %macro LOOP_FILTER_LUMA 0
721 ;-----------------------------------------------------------------------------
722 ; void ff_hevc_v_loop_filter_luma(uint8_t *_pix, ptrdiff_t _stride, int beta,
723 ; int32_t *tc, uint8_t *_no_p, uint8_t *_no_q);
724 ;-----------------------------------------------------------------------------
725 cglobal hevc_v_loop_filter_luma_8, 4, 14, 16, pix, stride, beta, tc, pix0, src3stride
728 mov src3strideq, pixq
730 TRANSPOSE8x8B_LOAD PASS8ROWS(src3strideq, pixq, r1, pix0q)
731 LUMA_DEBLOCK_BODY 8, v
733 TRANSPOSE8x8B_STORE PASS8ROWS(src3strideq, pixq, r1, pix0q)
737 cglobal hevc_v_loop_filter_luma_10, 4, 14, 16, pix, stride, beta, tc, pix0, src3stride
739 lea pix0q, [3 * strideq]
740 mov src3strideq, pixq
742 TRANSPOSE8x8W_LOAD PASS8ROWS(src3strideq, pixq, strideq, pix0q)
743 LUMA_DEBLOCK_BODY 10, v
745 TRANSPOSE8x8W_STORE PASS8ROWS(src3strideq, pixq, r1, pix0q), [pw_pixel_max_10]
749 cglobal hevc_v_loop_filter_luma_12, 4, 14, 16, pix, stride, beta, tc, pix0, src3stride
751 lea pix0q, [3 * strideq]
752 mov src3strideq, pixq
754 TRANSPOSE8x8W_LOAD PASS8ROWS(src3strideq, pixq, strideq, pix0q)
755 LUMA_DEBLOCK_BODY 12, v
757 TRANSPOSE8x8W_STORE PASS8ROWS(src3strideq, pixq, r1, pix0q), [pw_pixel_max_12]
761 ;-----------------------------------------------------------------------------
762 ; void ff_hevc_h_loop_filter_luma(uint8_t *_pix, ptrdiff_t _stride, int beta,
763 ; int32_t *tc, uint8_t *_no_p, uint8_t *_no_q);
764 ;-----------------------------------------------------------------------------
765 cglobal hevc_h_loop_filter_luma_8, 4, 14, 16, pix, stride, beta, tc, pix0, src3stride
766 lea src3strideq, [3 * strideq]
768 sub pix0q, src3strideq
771 movq m1, [pix0q + strideq]; p2
772 movq m2, [pix0q + 2 * strideq]; p1
773 movq m3, [pix0q + src3strideq]; p0
775 movq m5, [pixq + strideq]; q1
776 movq m6, [pixq + 2 * strideq]; q2
777 movq m7, [pixq + src3strideq]; q3
787 LUMA_DEBLOCK_BODY 8, h
792 movh [pix0q + strideq], m1
793 movhps [pix0q + 2 * strideq], m1
794 movh [pix0q + src3strideq], m3
796 movh [pixq + strideq], m5
797 movhps [pixq + 2 * strideq], m5
801 cglobal hevc_h_loop_filter_luma_10, 4, 14, 16, pix, stride, beta, tc, pix0, src3stride
802 lea src3strideq, [3 * strideq]
804 sub pix0q, src3strideq
806 movdqu m0, [pix0q]; p3
807 movdqu m1, [pix0q + strideq]; p2
808 movdqu m2, [pix0q + 2 * strideq]; p1
809 movdqu m3, [pix0q + src3strideq]; p0
810 movdqu m4, [pixq]; q0
811 movdqu m5, [pixq + strideq]; q1
812 movdqu m6, [pixq + 2 * strideq]; q2
813 movdqu m7, [pixq + src3strideq]; q3
814 LUMA_DEBLOCK_BODY 10, h
816 pxor m8, m8; zeros reg
817 CLIPW m1, m8, [pw_pixel_max_10]
818 CLIPW m2, m8, [pw_pixel_max_10]
819 CLIPW m3, m8, [pw_pixel_max_10]
820 CLIPW m4, m8, [pw_pixel_max_10]
821 CLIPW m5, m8, [pw_pixel_max_10]
822 CLIPW m6, m8, [pw_pixel_max_10]
823 movdqu [pix0q + strideq], m1; p2
824 movdqu [pix0q + 2 * strideq], m2; p1
825 movdqu [pix0q + src3strideq], m3; p0
826 movdqu [pixq ], m4; q0
827 movdqu [pixq + strideq], m5; q1
828 movdqu [pixq + 2 * strideq], m6; q2
832 cglobal hevc_h_loop_filter_luma_12, 4, 14, 16, pix, stride, beta, tc, pix0, src3stride
833 lea src3strideq, [3 * strideq]
835 sub pix0q, src3strideq
837 movdqu m0, [pix0q]; p3
838 movdqu m1, [pix0q + strideq]; p2
839 movdqu m2, [pix0q + 2 * strideq]; p1
840 movdqu m3, [pix0q + src3strideq]; p0
841 movdqu m4, [pixq]; q0
842 movdqu m5, [pixq + strideq]; q1
843 movdqu m6, [pixq + 2 * strideq]; q2
844 movdqu m7, [pixq + src3strideq]; q3
845 LUMA_DEBLOCK_BODY 12, h
847 pxor m8, m8; zeros reg
848 CLIPW m1, m8, [pw_pixel_max_12]
849 CLIPW m2, m8, [pw_pixel_max_12]
850 CLIPW m3, m8, [pw_pixel_max_12]
851 CLIPW m4, m8, [pw_pixel_max_12]
852 CLIPW m5, m8, [pw_pixel_max_12]
853 CLIPW m6, m8, [pw_pixel_max_12]
854 movdqu [pix0q + strideq], m1; p2
855 movdqu [pix0q + 2 * strideq], m2; p1
856 movdqu [pix0q + src3strideq], m3; p0
857 movdqu [pixq ], m4; q0
858 movdqu [pixq + strideq], m5; q1
859 movdqu [pixq + 2 * strideq], m6; q2