1 ;*****************************************************************************
2 ;* SSE2-optimized HEVC deblocking code
3 ;*****************************************************************************
4 ;* Copyright (C) 2013 VTT
6 ;* Authors: Seppo Tomperi <seppo.tomperi@vtt.fi>
8 ;* This file is part of FFmpeg.
10 ;* FFmpeg is free software; you can redistribute it and/or
11 ;* modify it under the terms of the GNU Lesser General Public
12 ;* License as published by the Free Software Foundation; either
13 ;* version 2.1 of the License, or (at your option) any later version.
15 ;* FFmpeg is distributed in the hope that it will be useful,
16 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
17 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 ;* Lesser General Public License for more details.
20 ;* You should have received a copy of the GNU Lesser General Public
21 ;* License along with FFmpeg; if not, write to the Free Software
22 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 ;******************************************************************************
25 %include "libavutil/x86/x86util.asm"
29 pw_pixel_max: times 8 dw ((1 << 10)-1)
34 ; expands to [base],...,[base+7*stride]
35 %define PASS8ROWS(base, base3, stride, stride3) \
36 [base], [base+stride], [base+stride*2], [base3], \
37 [base3+stride], [base3+stride*2], [base3+stride3], [base3+stride*4]
39 ; in: 8 rows of 4 bytes in %4..%11
40 ; out: 4 rows of 8 words in m0..m3
41 %macro TRANSPOSE4x8B_LOAD 8
70 ; in: 4 rows of 8 words in m0..m3
71 ; out: 8 rows of 4 bytes in %1..%8
72 %macro TRANSPOSE8x4B_STORE 8
101 ; in: 8 rows of 4 words in %4..%11
102 ; out: 4 rows of 8 words in m0..m3
103 %macro TRANSPOSE4x8W_LOAD 8
124 punpckhqdq m1, m0, m4
126 punpckhqdq m3, m2, m6
131 ; in: 4 rows of 8 words in m0..m3
132 ; out: 8 rows of 4 words in %1..%8
133 %macro TRANSPOSE8x4W_STORE 8
134 pxor m5, m5; zeros reg
135 CLIPW m0, m5, [pw_pixel_max]
136 CLIPW m1, m5, [pw_pixel_max]
137 CLIPW m2, m5, [pw_pixel_max]
138 CLIPW m3, m5, [pw_pixel_max]
161 ; in: 8 rows of 8 bytes in %1..%8
162 ; out: 8 rows of 8 words in m0..m7
163 %macro TRANSPOSE8x8B_LOAD 8
184 punpckldq m1, m3, m9; 0, 1
185 punpckhdq m3, m9; 2, 3
187 punpckldq m5, m7, m4; 4, 5
188 punpckhdq m7, m4; 6, 7
192 punpcklbw m0, m1, m13; 0 in 16 bit
193 punpckhbw m1, m13; 1 in 16 bit
195 punpcklbw m2, m3, m13; 2
198 punpcklbw m4, m5, m13; 4
201 punpcklbw m6, m7, m13; 6
206 ; in: 8 rows of 8 words in m0..m8
207 ; out: 8 rows of 8 bytes in %1..%8
208 %macro TRANSPOSE8x8B_STORE 8
230 punpckhdq m10, m0, m4; 2, 3
231 punpckldq m0, m4; 0, 1
233 punpckldq m11, m8, m9; 4, 5
234 punpckhdq m8, m9; 6, 7
245 ; in: 8 rows of 8 words in %1..%8
246 ; out: 8 rows of 8 words in m0..m7
247 %macro TRANSPOSE8x8W_LOAD 8
256 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
259 ; in: 8 rows of 8 words in m0..m8
260 ; out: 8 rows of 8 words in %1..%8
261 %macro TRANSPOSE8x8W_STORE 8
262 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
265 CLIPW m0, m8, [pw_pixel_max]
266 CLIPW m1, m8, [pw_pixel_max]
267 CLIPW m2, m8, [pw_pixel_max]
268 CLIPW m3, m8, [pw_pixel_max]
269 CLIPW m4, m8, [pw_pixel_max]
270 CLIPW m5, m8, [pw_pixel_max]
271 CLIPW m6, m8, [pw_pixel_max]
272 CLIPW m7, m8, [pw_pixel_max]
290 pand %2, m11 ; and mask
291 pandn m10, m11, %1; and -mask
298 ; mask in %3, will be clobbered
299 %macro MASKED_COPY2 3
300 pand %2, %3 ; and mask
301 pandn %3, %1; and -mask
307 ; input in m0 ... m3 and tcs in r2. Output in m1 and m2
308 %macro CHROMA_DEBLOCK_BODY 1
309 psubw m4, m2, m1; q0 - p0
310 psubw m5, m0, m3; p1 - q1
320 shufps m6, m7, 0; tc0, tc1
321 pcmpeqw m7, m7; set all bits to 1
322 pxor m4, m6, m7; flip all bits of first reg
323 psrlw m7, 15; 1 in every cell
324 paddw m4, m7; -tc0, -tc1
327 psllw m7, 2; 4 in every cell
331 psllw m4, %1-8; << (BIT_DEPTH - 8)
332 psllw m6, %1-8; << (BIT_DEPTH - 8)
335 paddw m1, m5; p0 + delta0
336 psubw m2, m5; q0 - delta0
339 ; input in m0 ... m7, betas in r2 tcs in r3. Output in m1...m6
340 %macro LUMA_DEBLOCK_BODY 2
344 ABS1 m10, m11 ; 0dp0, 0dp3 , 1dp0, 1dp3
349 ABS1 m11, m13 ; 0dq0, 0dq3 , 1dq0, 1dq3
354 movd m13, r11d; beta0
359 movd m14, r12d; beta1
361 pshufd m13, m14, 0; beta0, beta1
362 ;end beta calculations
364 paddw m9, m10, m11; 0d0, 0d3 , 1d0, 1d3
366 pshufhw m14, m9, 0x0f ;0b00001111; 0d3 0d3 0d0 0d0 in high
367 pshuflw m14, m14, 0x0f ;0b00001111; 1d3 1d3 1d0 1d0 in low
369 pshufhw m9, m9, 0xf0 ;0b11110000; 0d0 0d0 0d3 0d3
370 pshuflw m9, m9, 0xf0 ;0b11110000; 1d0 1d0 1d3 1d3
372 paddw m14, m9; 0d0+0d3, 1d0+1d3
375 pcmpgtw m15, m13, m14; beta0, beta1
376 movmskps r13, m15 ;filtering mask 0d0 + 0d3 < beta0 (bit 2 or 3) , 1d0 + 1d3 < beta1 (bit 0 or 1)
380 ;weak / strong decision compare to beta_2
381 psraw m15, m13, 2; beta >> 2
383 pcmpgtw m15, m8; (d0 << 1) < beta_2, (d3 << 1) < beta_2
385 ;end weak / strong decision
387 ; weak filter nd_p/q calculation
392 and r7, 0xffff; 1dp0 + 1dp3
395 and r8, 0xffff; 0dp0 + 0dp3
401 and r9, 0xffff; 1dq0 + 1dq3
404 and r10, 0xffff; 0dq0 + 0dq3
405 ; end calc for weak filter
417 pcmpeqd m15, m15; set all bits to 1
418 psrld m15, 31; set to 32bit 1
419 pcmpeqd m11, m15; filtering mask
421 ;decide between strong and weak filtering
430 add r2d, r3d; tc0 + tc1
434 shufps m8, m9, 0; tc0, tc1
437 pavgw m8, m9; tc25 = ((tc * 5 + 1) >> 1)
438 ;end tc25 calculations
440 ;----beta_3 comparison-----
441 psubw m12, m0, m3; p3 - p0
442 ABS1 m12, m14; abs(p3 - p0)
444 psubw m15, m7, m4; q3 - q0
445 ABS1 m15, m14; abs(q3 - q0)
447 paddw m12, m15; abs(p3 - p0) + abs(q3 - q0)
449 pshufhw m12, m12, 0xf0 ;0b11110000;
450 pshuflw m12, m12, 0xf0 ;0b11110000;
452 psraw m13, 3; beta >> 3
455 and r14, r2; strong mask , beta_2 and beta_3 comparisons
456 ;----beta_3 comparison end-----
457 ;----tc25 comparison---
458 psubw m12, m3, m4; p0 - q0
459 ABS1 m12, m14; abs(p0 - q0)
461 pshufhw m12, m12, 0xf0 ;0b11110000;
462 pshuflw m12, m12, 0xf0 ;0b11110000;
464 pcmpgtw m8, m12; tc25 comparisons
466 and r14, r2; strong mask, beta_2, beta_3 and tc25 comparisons
467 ;----tc25 comparison end---
470 and r14, r2; strong mask, bits 2 and 0
472 pcmpeqw m13, m13; set all bits to 1
473 pxor m14, m9, m13; invert bits
474 psrlw m13, 15; 1 in every cell
478 psllw m14, 1; -tc * 2
481 mov r2, r14; strong mask
483 movd m12, r14d; store to xmm for mask generation
486 movd m10, r2d; store to xmm for mask generation
487 or r14, r2; final strong mask, bits 1 and 0
492 pcmpeqd m12, m12; set all bits to 1
493 psrld m12, 31; set to 32bit 1
494 pcmpeqd m10, m12; strong mask
496 psllw m13, 2; 4 in every cell
497 pand m11, m10; combine filtering mask and strong mask
498 paddw m12, m2, m3; p1 + p0
499 paddw m12, m4; p1 + p0 + q0
501 psllw m12, 1; 2*p1 + 2*p0 + 2*q0
502 paddw m12, m1; p2 + 2*p1 + 2*p0 + 2*q0
503 paddw m12, m5; p2 + 2*p1 + 2*p0 + 2*q0 + q1
504 paddw m12, m13; p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4
505 psraw m12, 3; ((p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4) >> 3)
506 psubw m12, m3; ((p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4) >> 3) - p0
508 pminsw m12, m9; av_clip( , -2 * tc, 2 * tc)
511 paddw m15, m1, m10; p2 + p1 + p0 + q0
512 psrlw m13, 1; 2 in every cell
513 paddw m15, m13; p2 + p1 + p0 + q0 + 2
514 psraw m15, 2; (p2 + p1 + p0 + q0 + 2) >> 2
515 psubw m15, m2;((p2 + p1 + p0 + q0 + 2) >> 2) - p1
517 pminsw m15, m9; av_clip( , -2 * tc, 2 * tc)
520 paddw m8, m1, m0; p3 + p2
521 psllw m8, 1; 2*p3 + 2*p2
522 paddw m8, m1; 2*p3 + 3*p2
523 paddw m8, m10; 2*p3 + 3*p2 + p1 + p0 + q0
524 psllw m13, 1; 4 in every cell
525 paddw m8, m13; 2*p3 + 3*p2 + p1 + p0 + q0 + 4
526 psraw m8, 3; (2*p3 + 3*p2 + p1 + p0 + q0 + 4) >> 3
527 psubw m8, m1; ((2*p3 + 3*p2 + p1 + p0 + q0 + 4) >> 3) - p2
529 pminsw m8, m9; av_clip( , -2 * tc, 2 * tc)
533 paddw m8, m3, m4; p0 + q0
534 paddw m8, m5; p0 + q0 + q1
535 psllw m8, 1; 2*p0 + 2*q0 + 2*q1
536 paddw m8, m2; p1 + 2*p0 + 2*q0 + 2*q1
537 paddw m8, m6; p1 + 2*p0 + 2*q0 + 2*q1 + q2
538 paddw m8, m13; p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4
539 psraw m8, 3; (p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4) >>3
542 pminsw m8, m9; av_clip( , -2 * tc, 2 * tc)
546 paddw m15, m3, m4; p0 + q0
547 paddw m15, m5; p0 + q0 + q1
549 paddw m15, m6; p0 + q0 + q1 + q2
550 psrlw m13, 1; 2 in every cell
551 paddw m15, m13; p0 + q0 + q1 + q2 + 2
552 psraw m15, 2; (p0 + q0 + q1 + q2 + 2) >> 2
553 psubw m15, m5; ((p0 + q0 + q1 + q2 + 2) >> 2) - q1
555 pminsw m15, m9; av_clip( , -2 * tc, 2 * tc)
558 paddw m13, m7; q3 + 2
559 paddw m13, m6; q3 + q2 + 2
560 psllw m13, 1; 2*q3 + 2*q2 + 4
561 paddw m13, m6; 2*q3 + 3*q2 + 4
562 paddw m13, m10; 2*q3 + 3*q2 + q1 + q0 + p0 + 4
563 psraw m13, 3; (2*q3 + 3*q2 + q1 + q0 + p0 + 4) >> 3
564 psubw m13, m6; ((2*q3 + 3*q2 + q1 + q0 + p0 + 4) >> 3) - q2
566 pminsw m13, m9; av_clip( , -2 * tc, 2 * tc)
575 not r14; strong mask -> weak mask
576 and r14, r13; final weak filtering mask, bits 0 and 1
579 ; weak filtering mask
587 pcmpeqd m12, m12; set all bits to 1
588 psrld m12, 31; set to 32bit 1
589 pcmpeqd m11, m12; filtering mask
594 shr r11, 3; ((beta0+(beta0>>1))>>3))
599 shr r12, 3; ((beta1+(beta1>>1))>>3))
601 pcmpeqw m13, m13; set all bits to 1
602 psrlw m13, 15; 1 in every cell
603 psllw m13, 3; 8 in every cell
605 psubw m12, m4, m3 ; q0 - p0
606 psllw m10, m12, 3; 8 * (q0 - p0)
607 paddw m12, m10 ; 9 * (q0 - p0)
609 psubw m10, m5, m2 ; q1 - p1
610 psllw m8, m10, 1; 2 * ( q1 - p1 )
611 paddw m10, m8; 3 * ( q1 - p1 )
612 psubw m12, m10; 9 * (q0 - p0) - 3 * ( q1 - p1 )
614 psraw m12, 4; >> 4 , delta0
615 PABSW m13, m12; abs(delta0)
618 psllw m10, m9, 2; 8 * tc
619 paddw m10, m9; 10 * tc
623 psraw m9, 1; tc * 2 -> tc
624 psraw m14, 1; -tc * 2 -> -tc
627 pminsw m12, m9; av_clip(delta0, -tc, tc)
629 pcmpeqw m13, m13; set all bits to 1
630 psraw m9, 1; tc -> tc / 2
631 pxor m14, m9, m13; complement -tc
632 psrlw m13, 15; set all cells to 1
633 paddw m14, m13; add 1, -tc / 2
635 pavgw m15, m1, m3; (p2 + p0 + 1) >> 1
636 psubw m15, m2; ((p2 + p0 + 1) >> 1) - p1
637 paddw m15, m12; ((p2 + p0 + 1) >> 1) - p1 + delta0
638 psraw m15, 1; (((p2 + p0 + 1) >> 1) - p1 + delta0) >> 1
640 pminsw m15, m9; av_clip(deltap1, -tc/2, tc/2)
644 movd m10, r11d; beta0
646 movd m13, r12d; beta1
648 shufps m10, m13, 0; betax0, betax1
650 movd m13, r7d; 1dp0 + 1dp3
651 movd m8, r8d; 0dp0 + 0dp3
657 ;end beta calculations
658 MASKED_COPY2 m2, m15, m8; write p1'
660 pavgw m8, m6, m4; (q2 + q0 + 1) >> 1
661 psubw m8, m5; ((q2 + q0 + 1) >> 1) - q1
662 psubw m8, m12; ((q2 + q0 + 1) >> 1) - q1 - delta0)
663 psraw m8, 1; ((q2 + q0 + 1) >> 1) - q1 - delta0) >> 1
665 pminsw m8, m9; av_clip(deltaq1, -tc/2, tc/2)
672 shufps m13, m15, 0; dq0 + dq3
674 pcmpgtw m10, m13; compare to ((beta+(beta>>1))>>3)
676 MASKED_COPY2 m5, m8, m10; write q1'
678 paddw m15, m3, m12 ; p0 + delta0
681 psubw m8, m4, m12 ; q0 - delta0
686 ;-----------------------------------------------------------------------------
687 ; void ff_hevc_v_loop_filter_chroma(uint8_t *_pix, ptrdiff_t _stride, int *_tc, uint8_t *_no_p, uint8_t *_no_q)
688 ;-----------------------------------------------------------------------------
689 cglobal hevc_v_loop_filter_chroma_8, 3, 6, 8
694 TRANSPOSE4x8B_LOAD PASS8ROWS(r4, r0, r1, r5)
695 CHROMA_DEBLOCK_BODY 8
696 TRANSPOSE8x4B_STORE PASS8ROWS(r4, r0, r1, r5)
699 cglobal hevc_v_loop_filter_chroma_10, 3, 6, 8
704 TRANSPOSE4x8W_LOAD PASS8ROWS(r4, r0, r1, r5)
705 CHROMA_DEBLOCK_BODY 10
706 TRANSPOSE8x4W_STORE PASS8ROWS(r4, r0, r1, r5)
709 ;-----------------------------------------------------------------------------
710 ; void ff_hevc_h_loop_filter_chroma(uint8_t *_pix, ptrdiff_t _stride, int *_tc, uint8_t *_no_p, uint8_t *_no_q
711 ;-----------------------------------------------------------------------------
712 cglobal hevc_h_loop_filter_chroma_8, 3, 6, 8
720 pxor m5, m5; zeros reg
725 CHROMA_DEBLOCK_BODY 8
726 packuswb m1, m1 ; p0' packed in bytes on low quadword
727 packuswb m2, m2 ; q0' packed in bytes on low quadword
732 cglobal hevc_h_loop_filter_chroma_10, 3, 6, 8
737 movdqu m1, [r5+r1]; p0
739 movdqu m3, [r0+r1]; q1
740 CHROMA_DEBLOCK_BODY 10
741 pxor m5, m5; zeros reg
742 CLIPW m1, m5, [pw_pixel_max]
743 CLIPW m2, m5, [pw_pixel_max]
750 ;-----------------------------------------------------------------------------
751 ; void ff_hevc_v_loop_filter_luma(uint8_t *_pix, ptrdiff_t _stride, int *_beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
752 ;-----------------------------------------------------------------------------
753 cglobal hevc_v_loop_filter_luma_8, 4, 15, 16, pix, stride, beta, tc
758 TRANSPOSE8x8B_LOAD PASS8ROWS(r6, r0, r1, r5)
759 LUMA_DEBLOCK_BODY 8, v
761 TRANSPOSE8x8B_STORE PASS8ROWS(r6, r0, r1, r5)
765 cglobal hevc_v_loop_filter_luma_10, 4, 15, 16, pix, stride, beta, tc
770 TRANSPOSE8x8W_LOAD PASS8ROWS(r6, pixq, strideq, r5)
771 LUMA_DEBLOCK_BODY 10, v
773 TRANSPOSE8x8W_STORE PASS8ROWS(r6, r0, r1, r5)
777 ;-----------------------------------------------------------------------------
778 ; void ff_hevc_h_loop_filter_luma(uint8_t *_pix, ptrdiff_t _stride, int *_beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
779 ;-----------------------------------------------------------------------------
780 cglobal hevc_h_loop_filter_luma_8, 4, 15, 16, pix, stride, beta, tc, count, pix0, src3stride
781 lea src3strideq, [3*strideq]
783 sub pix0q, src3strideq
785 movdqu m0, [pix0q]; p3
786 movdqu m1, [pix0q+strideq]; p2
787 movdqu m2, [pix0q+2*strideq]; p1
788 movdqu m3, [pix0q+src3strideq]; p0
789 movdqu m4, [pixq]; q0
790 movdqu m5, [pixq+strideq]; q1
791 movdqu m6, [pixq+2*strideq]; q2
792 movdqu m7, [pixq+src3strideq]; q3
802 LUMA_DEBLOCK_BODY 8, h
811 movq [r5+2*r1], m2; p1
815 movq [r0+2*r1], m6; q2
819 cglobal hevc_h_loop_filter_luma_10, 4, 15, 16, pix, stride, beta, tc, count, pix0, src3stride
820 lea src3strideq, [3*strideq]
822 sub pix0q, src3strideq
824 movdqu m0, [pix0q]; p3
825 movdqu m1, [pix0q+strideq]; p2
826 movdqu m2, [pix0q+2*strideq]; p1
827 movdqu m3, [pix0q+src3strideq]; p0
828 movdqu m4, [pixq]; q0
829 movdqu m5, [pixq+strideq]; q1
830 movdqu m6, [pixq+2*strideq]; q2
831 movdqu m7, [pixq+src3strideq]; q3
832 LUMA_DEBLOCK_BODY 10, h
834 pxor m8, m8; zeros reg
835 CLIPW m1, m8, [pw_pixel_max]
836 CLIPW m2, m8, [pw_pixel_max]
837 CLIPW m3, m8, [pw_pixel_max]
838 CLIPW m4, m8, [pw_pixel_max]
839 CLIPW m5, m8, [pw_pixel_max]
840 CLIPW m6, m8, [pw_pixel_max]
841 movdqu [pix0q+strideq], m1; p2
842 movdqu [pix0q+2*strideq], m2; p1
843 movdqu [pix0q+src3strideq], m3; p0
844 movdqu [pixq], m4; q0
845 movdqu [pixq+strideq], m5; q1
846 movdqu [pixq+2*strideq], m6; q2