1 ;*****************************************************************************
2 ;* SSE2-optimized HEVC deblocking code
3 ;*****************************************************************************
4 ;* Copyright (C) 2013 VTT
6 ;* Authors: Seppo Tomperi <seppo.tomperi@vtt.fi>
8 ;* This file is part of Libav.
10 ;* Libav is free software; you can redistribute it and/or
11 ;* modify it under the terms of the GNU Lesser General Public
12 ;* License as published by the Free Software Foundation; either
13 ;* version 2.1 of the License, or (at your option) any later version.
15 ;* Libav is distributed in the hope that it will be useful,
16 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
17 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 ;* Lesser General Public License for more details.
20 ;* You should have received a copy of the GNU Lesser General Public
21 ;* License along with Libav; if not, write to the Free Software
22 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 ;******************************************************************************
25 %include "libavutil/x86/x86util.asm"
29 pw_pixel_max: times 8 dw ((1 << 10)-1)
40 ; expands to [base],...,[base+7*stride]
41 %define PASS8ROWS(base, base3, stride, stride3) \
42 [base], [base+stride], [base+stride*2], [base3], \
43 [base3+stride], [base3+stride*2], [base3+stride3], [base3+stride*4]
45 ; in: 8 rows of 4 bytes in %4..%11
46 ; out: 4 rows of 8 words in m0..m3
47 %macro TRANSPOSE4x8B_LOAD 8
76 ; in: 4 rows of 8 words in m0..m3
77 ; out: 8 rows of 4 bytes in %1..%8
78 %macro TRANSPOSE8x4B_STORE 8
107 ; in: 8 rows of 4 words in %4..%11
108 ; out: 4 rows of 8 words in m0..m3
109 %macro TRANSPOSE4x8W_LOAD 8
130 punpckhqdq m1, m0, m4
132 punpckhqdq m3, m2, m6
137 ; in: 4 rows of 8 words in m0..m3
138 ; out: 8 rows of 4 words in %1..%8
139 %macro TRANSPOSE8x4W_STORE 8
140 pxor m5, m5; zeros reg
141 CLIPW m0, m5, [pw_pixel_max]
142 CLIPW m1, m5, [pw_pixel_max]
143 CLIPW m2, m5, [pw_pixel_max]
144 CLIPW m3, m5, [pw_pixel_max]
167 ; in: 8 rows of 8 bytes in %1..%8
168 ; out: 8 rows of 8 words in m0..m7
169 %macro TRANSPOSE8x8B_LOAD 8
190 punpckldq m1, m3, m9; 0, 1
191 punpckhdq m3, m9; 2, 3
193 punpckldq m5, m7, m4; 4, 5
194 punpckhdq m7, m4; 6, 7
198 punpcklbw m0, m1, m13; 0 in 16 bit
199 punpckhbw m1, m13; 1 in 16 bit
201 punpcklbw m2, m3, m13; 2
204 punpcklbw m4, m5, m13; 4
207 punpcklbw m6, m7, m13; 6
212 ; in: 8 rows of 8 words in m0..m8
213 ; out: 8 rows of 8 bytes in %1..%8
214 %macro TRANSPOSE8x8B_STORE 8
236 punpckhdq m10, m0, m4; 2, 3
237 punpckldq m0, m4; 0, 1
239 punpckldq m11, m8, m9; 4, 5
240 punpckhdq m8, m9; 6, 7
251 ; in: 8 rows of 8 words in %1..%8
252 ; out: 8 rows of 8 words in m0..m7
253 %macro TRANSPOSE8x8W_LOAD 8
262 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
265 ; in: 8 rows of 8 words in m0..m8
266 ; out: 8 rows of 8 words in %1..%8
267 %macro TRANSPOSE8x8W_STORE 8
268 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
271 CLIPW m0, m8, [pw_pixel_max]
272 CLIPW m1, m8, [pw_pixel_max]
273 CLIPW m2, m8, [pw_pixel_max]
274 CLIPW m3, m8, [pw_pixel_max]
275 CLIPW m4, m8, [pw_pixel_max]
276 CLIPW m5, m8, [pw_pixel_max]
277 CLIPW m6, m8, [pw_pixel_max]
278 CLIPW m7, m8, [pw_pixel_max]
296 pand %2, m11 ; and mask
297 pandn m10, m11, %1; and -mask
304 ; mask in %3, will be clobbered
305 %macro MASKED_COPY2 3
306 pand %2, %3 ; and mask
307 pandn %3, %1; and -mask
313 ; input in m0 ... m3 and tcs in r2. Output in m1 and m2
314 %macro CHROMA_DEBLOCK_BODY 1
315 psubw m4, m2, m1; q0 - p0
316 psubw m5, m0, m3; p1 - q1
326 shufps m6, m7, 0; tc0, tc1
327 pmullw m4, m6, [pw_m1]; -tc0, -tc1
334 psllw m4, %1-8; << (BIT_DEPTH - 8)
335 psllw m6, %1-8; << (BIT_DEPTH - 8)
339 paddw m1, m5; p0 + delta0
340 psubw m2, m5; q0 - delta0
343 ; input in m0 ... m7, beta in r2 tcs in r3. Output in m1...m6
344 %macro LUMA_DEBLOCK_BODY 2
348 ABS1 m10, m11 ; 0dp0, 0dp3 , 1dp0, 1dp3
353 ABS1 m11, m13 ; 0dq0, 0dq3 , 1dq0, 1dq3
361 ;end beta calculations
363 paddw m9, m10, m11; 0d0, 0d3 , 1d0, 1d3
365 pshufhw m14, m9, q0033 ;0b00001111; 0d3 0d3 0d0 0d0 in high
366 pshuflw m14, m14, q0033 ;0b00001111; 1d3 1d3 1d0 1d0 in low
368 pshufhw m9, m9, q3300 ;0b11110000; 0d0 0d0 0d3 0d3
369 pshuflw m9, m9, q3300 ;0b11110000; 1d0 1d0 1d3 1d3
371 paddw m14, m9; 0d0+0d3, 1d0+1d3
374 pcmpgtw m15, m13, m14
375 movmskps r13, m15 ;filtering mask 0d0 + 0d3 < beta0 (bit 2 or 3) , 1d0 + 1d3 < beta1 (bit 0 or 1)
379 ;weak / strong decision compare to beta_2
380 psraw m15, m13, 2; beta >> 2
382 pcmpgtw m15, m8; (d0 << 1) < beta_2, (d3 << 1) < beta_2
384 ;end weak / strong decision
386 ; weak filter nd_p/q calculation
391 and r7, 0xffff; 1dp0 + 1dp3
394 and r8, 0xffff; 0dp0 + 0dp3
400 and r9, 0xffff; 1dq0 + 1dq3
403 and r10, 0xffff; 0dq0 + 0dq3
404 ; end calc for weak filter
416 pcmpeqd m11, [pd_1]; filtering mask
418 ;decide between strong and weak filtering
431 add r11d, r3d; tc0 + tc1
435 shufps m8, m9, 0; tc0, tc1
438 pavgw m8, m9; tc25 = ((tc * 5 + 1) >> 1)
439 ;end tc25 calculations
441 ;----beta_3 comparison-----
442 psubw m12, m0, m3; p3 - p0
443 ABS1 m12, m14; abs(p3 - p0)
445 psubw m15, m7, m4; q3 - q0
446 ABS1 m15, m14; abs(q3 - q0)
448 paddw m12, m15; abs(p3 - p0) + abs(q3 - q0)
450 pshufhw m12, m12, 0xf0 ;0b11110000;
451 pshuflw m12, m12, 0xf0 ;0b11110000;
453 psraw m13, 3; beta >> 3
456 and r14, r11; strong mask , beta_2 and beta_3 comparisons
457 ;----beta_3 comparison end-----
458 ;----tc25 comparison---
459 psubw m12, m3, m4; p0 - q0
460 ABS1 m12, m14; abs(p0 - q0)
462 pshufhw m12, m12, 0xf0 ;0b11110000;
463 pshuflw m12, m12, 0xf0 ;0b11110000;
465 pcmpgtw m8, m12; tc25 comparisons
467 and r14, r11; strong mask, beta_2, beta_3 and tc25 comparisons
468 ;----tc25 comparison end---
471 and r14, r11; strong mask, bits 2 and 0
473 pmullw m14, m9, [pw_m2]; -tc * 2
477 mov r11, r14; strong mask
479 movd m12, r14d; store to xmm for mask generation
482 movd m10, r11d; store to xmm for mask generation
483 or r14, r11; final strong mask, bits 1 and 0
487 pcmpeqd m10, [pd_1]; strong mask
489 mova m13, [pw_4]; 4 in every cell
490 pand m11, m10; combine filtering mask and strong mask
491 paddw m12, m2, m3; p1 + p0
492 paddw m12, m4; p1 + p0 + q0
494 paddw m12, m12; 2*p1 + 2*p0 + 2*q0
495 paddw m12, m1; p2 + 2*p1 + 2*p0 + 2*q0
496 paddw m12, m5; p2 + 2*p1 + 2*p0 + 2*q0 + q1
497 paddw m12, m13; p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4
498 psraw m12, 3; ((p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4) >> 3)
499 psubw m12, m3; ((p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4) >> 3) - p0
501 pminsw m12, m9; av_clip( , -2 * tc, 2 * tc)
504 paddw m15, m1, m10; p2 + p1 + p0 + q0
505 psrlw m13, 1; 2 in every cell
506 paddw m15, m13; p2 + p1 + p0 + q0 + 2
507 psraw m15, 2; (p2 + p1 + p0 + q0 + 2) >> 2
508 psubw m15, m2;((p2 + p1 + p0 + q0 + 2) >> 2) - p1
510 pminsw m15, m9; av_clip( , -2 * tc, 2 * tc)
513 paddw m8, m1, m0; p3 + p2
514 paddw m8, m8; 2*p3 + 2*p2
515 paddw m8, m1; 2*p3 + 3*p2
516 paddw m8, m10; 2*p3 + 3*p2 + p1 + p0 + q0
518 paddw m8, m13; 2*p3 + 3*p2 + p1 + p0 + q0 + 4
519 psraw m8, 3; (2*p3 + 3*p2 + p1 + p0 + q0 + 4) >> 3
520 psubw m8, m1; ((2*p3 + 3*p2 + p1 + p0 + q0 + 4) >> 3) - p2
522 pminsw m8, m9; av_clip( , -2 * tc, 2 * tc)
526 paddw m8, m3, m4; p0 + q0
527 paddw m8, m5; p0 + q0 + q1
528 paddw m8, m8; 2*p0 + 2*q0 + 2*q1
529 paddw m8, m2; p1 + 2*p0 + 2*q0 + 2*q1
530 paddw m8, m6; p1 + 2*p0 + 2*q0 + 2*q1 + q2
531 paddw m8, m13; p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4
532 psraw m8, 3; (p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4) >>3
535 pminsw m8, m9; av_clip( , -2 * tc, 2 * tc)
539 paddw m15, m3, m4; p0 + q0
540 paddw m15, m5; p0 + q0 + q1
542 paddw m15, m6; p0 + q0 + q1 + q2
543 psrlw m13, 1; 2 in every cell
544 paddw m15, m13; p0 + q0 + q1 + q2 + 2
545 psraw m15, 2; (p0 + q0 + q1 + q2 + 2) >> 2
546 psubw m15, m5; ((p0 + q0 + q1 + q2 + 2) >> 2) - q1
548 pminsw m15, m9; av_clip( , -2 * tc, 2 * tc)
551 paddw m13, m7; q3 + 2
552 paddw m13, m6; q3 + q2 + 2
553 paddw m13, m13; 2*q3 + 2*q2 + 4
554 paddw m13, m6; 2*q3 + 3*q2 + 4
555 paddw m13, m10; 2*q3 + 3*q2 + q1 + q0 + p0 + 4
556 psraw m13, 3; (2*q3 + 3*q2 + q1 + q0 + p0 + 4) >> 3
557 psubw m13, m6; ((2*q3 + 3*q2 + q1 + q0 + p0 + 4) >> 3) - q2
559 pminsw m13, m9; av_clip( , -2 * tc, 2 * tc)
568 not r14; strong mask -> weak mask
569 and r14, r13; final weak filtering mask, bits 0 and 1
572 ; weak filtering mask
579 pcmpeqd m11, [pd_1]; filtering mask
584 shr betaq, 3; ((beta + (beta >> 1)) >> 3))
587 psubw m12, m4, m3 ; q0 - p0
588 psllw m10, m12, 3; 8 * (q0 - p0)
589 paddw m12, m10 ; 9 * (q0 - p0)
591 psubw m10, m5, m2 ; q1 - p1
592 psllw m8, m10, 1; 2 * ( q1 - p1 )
593 paddw m10, m8; 3 * ( q1 - p1 )
594 psubw m12, m10; 9 * (q0 - p0) - 3 * ( q1 - p1 )
596 psraw m12, 4; >> 4 , delta0
597 PABSW m13, m12; abs(delta0)
600 psllw m10, m9, 2; 8 * tc
601 paddw m10, m9; 10 * tc
605 psraw m9, 1; tc * 2 -> tc
606 psraw m14, 1; -tc * 2 -> -tc
609 pminsw m12, m9; av_clip(delta0, -tc, tc)
611 psraw m9, 1; tc -> tc / 2
612 pmullw m14, m9, [pw_m1]; -tc / 2
614 pavgw m15, m1, m3; (p2 + p0 + 1) >> 1
615 psubw m15, m2; ((p2 + p0 + 1) >> 1) - p1
616 paddw m15, m12; ((p2 + p0 + 1) >> 1) - p1 + delta0
617 psraw m15, 1; (((p2 + p0 + 1) >> 1) - p1 + delta0) >> 1
619 pminsw m15, m9; av_clip(deltap1, -tc/2, tc/2)
626 movd m13, r7d; 1dp0 + 1dp3
627 movd m8, r8d; 0dp0 + 0dp3
633 ;end beta calculations
634 MASKED_COPY2 m2, m15, m8; write p1'
636 pavgw m8, m6, m4; (q2 + q0 + 1) >> 1
637 psubw m8, m5; ((q2 + q0 + 1) >> 1) - q1
638 psubw m8, m12; ((q2 + q0 + 1) >> 1) - q1 - delta0)
639 psraw m8, 1; ((q2 + q0 + 1) >> 1) - q1 - delta0) >> 1
641 pminsw m8, m9; av_clip(deltaq1, -tc/2, tc/2)
648 shufps m13, m15, 0; dq0 + dq3
650 pcmpgtw m10, m13; compare to ((beta+(beta>>1))>>3)
652 MASKED_COPY2 m5, m8, m10; write q1'
654 paddw m15, m3, m12 ; p0 + delta0
657 psubw m8, m4, m12 ; q0 - delta0
662 ;-----------------------------------------------------------------------------
663 ; void ff_hevc_v_loop_filter_chroma(uint8_t *_pix, ptrdiff_t _stride, int *_tc,
664 ; uint8_t *_no_p, uint8_t *_no_q);
665 ;-----------------------------------------------------------------------------
666 cglobal hevc_v_loop_filter_chroma_8, 3, 6, 8
671 TRANSPOSE4x8B_LOAD PASS8ROWS(r4, r0, r1, r5)
672 CHROMA_DEBLOCK_BODY 8
673 TRANSPOSE8x4B_STORE PASS8ROWS(r4, r0, r1, r5)
676 cglobal hevc_v_loop_filter_chroma_10, 3, 6, 8
681 TRANSPOSE4x8W_LOAD PASS8ROWS(r4, r0, r1, r5)
682 CHROMA_DEBLOCK_BODY 10
683 TRANSPOSE8x4W_STORE PASS8ROWS(r4, r0, r1, r5)
686 ;-----------------------------------------------------------------------------
687 ; void ff_hevc_h_loop_filter_chroma(uint8_t *_pix, ptrdiff_t _stride, int *_tc,
688 ; uint8_t *_no_p, uint8_t *_no_q);
689 ;-----------------------------------------------------------------------------
690 cglobal hevc_h_loop_filter_chroma_8, 3, 6, 8
695 movh m1, [r5 + r1]; p0
697 movh m3, [r0 + r1]; q1
698 pxor m5, m5; zeros reg
703 CHROMA_DEBLOCK_BODY 8
709 cglobal hevc_h_loop_filter_chroma_10, 3, 6, 8
714 movdqu m1, [r5+r1]; p0
716 movdqu m3, [r0 + r1]; q1
717 CHROMA_DEBLOCK_BODY 10
718 pxor m5, m5; zeros reg
719 CLIPW m1, m5, [pw_pixel_max]
720 CLIPW m2, m5, [pw_pixel_max]
727 ;-----------------------------------------------------------------------------
728 ; void ff_hevc_v_loop_filter_luma(uint8_t *_pix, ptrdiff_t _stride, int beta,
729 ; int *_tc, uint8_t *_no_p, uint8_t *_no_q);
730 ;-----------------------------------------------------------------------------
731 cglobal hevc_v_loop_filter_luma_8, 4, 15, 16, pix, stride, beta, tc
736 TRANSPOSE8x8B_LOAD PASS8ROWS(r6, r0, r1, r5)
737 LUMA_DEBLOCK_BODY 8, v
739 TRANSPOSE8x8B_STORE PASS8ROWS(r6, r0, r1, r5)
743 cglobal hevc_v_loop_filter_luma_10, 4, 15, 16, pix, stride, beta, tc
745 lea r5, [3 * strideq]
748 TRANSPOSE8x8W_LOAD PASS8ROWS(r6, pixq, strideq, r5)
749 LUMA_DEBLOCK_BODY 10, v
751 TRANSPOSE8x8W_STORE PASS8ROWS(r6, r0, r1, r5)
755 ;-----------------------------------------------------------------------------
756 ; void ff_hevc_h_loop_filter_luma(uint8_t *_pix, ptrdiff_t _stride, int beta,
757 ; int *_tc, uint8_t *_no_p, uint8_t *_no_q);
758 ;-----------------------------------------------------------------------------
759 cglobal hevc_h_loop_filter_luma_8, 4, 15, 16, pix, stride, beta, tc, count, pix0, src3stride
760 lea src3strideq, [3 * strideq]
762 sub pix0q, src3strideq
764 movdqu m0, [pix0q]; p3
765 movdqu m1, [pix0q + strideq]; p2
766 movdqu m2, [pix0q + 2 * strideq]; p1
767 movdqu m3, [pix0q + src3strideq]; p0
768 movdqu m4, [pixq]; q0
769 movdqu m5, [pixq + strideq]; q1
770 movdqu m6, [pixq + 2 * strideq]; q2
771 movdqu m7, [pixq + src3strideq]; q3
781 LUMA_DEBLOCK_BODY 8, h
787 movhps [r5 + 2 * r1], m1
791 movhps [r0 + 2 * r1], m5
795 cglobal hevc_h_loop_filter_luma_10, 4, 15, 16, pix, stride, beta, tc, count, pix0, src3stride
796 lea src3strideq, [3 * strideq]
798 sub pix0q, src3strideq
800 movdqu m0, [pix0q]; p3
801 movdqu m1, [pix0q + strideq]; p2
802 movdqu m2, [pix0q + 2 * strideq]; p1
803 movdqu m3, [pix0q + src3strideq]; p0
804 movdqu m4, [pixq]; q0
805 movdqu m5, [pixq + strideq]; q1
806 movdqu m6, [pixq + 2 * strideq]; q2
807 movdqu m7, [pixq + src3strideq]; q3
808 LUMA_DEBLOCK_BODY 10, h
810 pxor m8, m8; zeros reg
811 CLIPW m1, m8, [pw_pixel_max]
812 CLIPW m2, m8, [pw_pixel_max]
813 CLIPW m3, m8, [pw_pixel_max]
814 CLIPW m4, m8, [pw_pixel_max]
815 CLIPW m5, m8, [pw_pixel_max]
816 CLIPW m6, m8, [pw_pixel_max]
817 movdqu [pix0q + strideq], m1; p2
818 movdqu [pix0q + 2 * strideq], m2; p1
819 movdqu [pix0q + src3strideq], m3; p0
820 movdqu [pixq ], m4; q0
821 movdqu [pixq + strideq], m5; q1
822 movdqu [pixq + 2 * strideq], m6; q2