1 ;******************************************************************************
2 ;* MMX/SSSE3-optimized functions for H.264 chroma MC
3 ;* Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>,
4 ;* 2005-2008 Loren Merritt
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
27 rnd_rv40_2d_tbl: times 4 dw 0
43 rnd_rv40_1d_tbl: times 4 dw 0
69 %macro mv0_pixels_mc8 0
75 CHROMAMC_AVG mm0, [r0 ]
76 CHROMAMC_AVG mm1, [r0+r2]
83 CHROMAMC_AVG mm0, [r0 ]
84 CHROMAMC_AVG mm1, [r0+r2]
92 %macro chroma_mc8_mmx_func 2-3
95 %define rnd_1d_rv40 r8
96 %define rnd_2d_rv40 r8
99 %define rnd_1d_rv40 rnd_rv40_1d_tbl
100 %define rnd_2d_rv40 rnd_rv40_2d_tbl
106 ; void ff_put/avg_h264_chroma_mc8_*(uint8_t *dst /* align 8 */,
107 ; uint8_t *src /* align 1 */,
108 ; ptrdiff_t stride, int h, int mx, int my)
109 cglobal %1_%2_chroma_mc8%3, 6, 7 + extra_regs, 0
112 jne .at_least_one_non_zero
113 ; mx == 0 AND my == 0 - no filter needed
117 .at_least_one_non_zero:
121 and r7, 6 ; &~1 for mx/my=[0,7]
128 and r0, 6 ; &~1 for mx/my=[0,7]
143 mov r6, r2 ; dxy = x ? 1 : stride
146 ; mx == 0 XOR my == 0 - 1 dimensional filter only
151 lea r8, [rnd_rv40_1d_tbl]
160 movq m6, [rnd_1d_%2+rnd_bias*8] ; mm6 = rnd >> 3
162 punpckldq m5, m5 ; mm5 = B = x
164 psubw m4, m5 ; mm4 = A = 8-x
167 movq m0, [r1 ] ; mm0 = src[0..7]
168 movq m2, [r1+r6] ; mm1 = src[1..8]
176 pmullw m0, m4 ; [mm0,mm1] = A * src[0..7]
178 pmullw m2, m5 ; [mm2,mm3] = B * src[1..8]
188 CHROMAMC_AVG m0, [dest_reg]
189 movq [dest_reg], m0 ; dst[0..7] = (A * src[0..7] + B * src[1..8] + (rnd >> 3)) >> 3
197 .both_non_zero: ; general case, bilinear
202 lea r8, [rnd_rv40_2d_tbl]
208 mov r6, rsp ; backup stack pointer
209 and rsp, ~(mmsize-1) ; align stack
210 sub rsp, 16 ; AA and DD
214 punpckldq m4, m4 ; mm4 = x words
215 punpckldq m6, m6 ; mm6 = y words
217 pmullw m4, m6 ; mm4 = x * y
222 movq [rsp+8], m4 ; DD = x * y
223 psubw m5, m4 ; mm5 = B = 8x - xy
224 psubw m6, m4 ; mm6 = C = 8y - xy
226 psubw m4, m7 ; mm4 = A = xy - (8x+8y) + 64
230 movq m0, [r1 ] ; mm0 = src[0..7]
231 movq m1, [r1+1] ; mm1 = src[1..8]
245 paddw m2, m1 ; mm2 = A * src[0..3] + B * src[1..4]
246 paddw m3, m0 ; mm3 = A * src[4..7] + B * src[5..8]
255 paddw m3, m1 ; [mm2,mm3] += C * src[0..7]
265 paddw m3, m4 ; [mm2,mm3] += D * src[1..8]
268 paddw m2, [rnd_2d_%2+rnd_bias*8]
269 paddw m3, [rnd_2d_%2+rnd_bias*8]
273 CHROMAMC_AVG m2, [dest_reg]
274 movq [dest_reg], m2 ; dst[0..7] = ([mm2,mm3] + rnd) >> 6
279 mov rsp, r6 ; restore stack pointer
283 %macro chroma_mc4_mmx_func 2
290 cglobal %1_%2_chroma_mc4, 6, 6 + extra_regs, 0
305 lea r6, [rnd_rv40_2d_tbl]
306 %define rnd_2d_rv40 r6
308 %define rnd_2d_rv40 rnd_rv40_2d_tbl
310 and r5, 6 ; &~1 for mx/my=[0,7]
340 paddw m6, [rnd_2d_%2+rnd_bias*8]
344 CHROMAMC_AVG4 m1, m6, [r0]
359 paddw m0, [rnd_2d_%2+rnd_bias*8]
363 CHROMAMC_AVG4 m1, m0, [r0]
371 %macro chroma_mc2_mmx_func 2
372 cglobal %1_%2_chroma_mc2, 6, 7, 0
377 imul r5d, r4d ; x*y<<16 | y*(8-x)
379 sub r4d, r5d ; x*(8-y)<<16 | (8-x)*(8-y)
383 punpckldq m5, m5 ; mm5 = {A,B,A,B}
384 punpckldq m6, m6 ; mm6 = {C,D,C,D}
388 pshufw m2, m2, 0x94 ; mm0 = src[0,1,1,2]
393 pmaddwd m1, m5 ; mm1 = A * src[0,1] + B * src[1,2]
396 pshufw m0, m0, 0x94 ; mm0 = src[0,1,1,2]
399 paddw m1, [rnd_2d_%2]
400 paddw m1, m0 ; mm1 += C * src[0,1] + D * src[1,2]
404 CHROMAMC_AVG4 m1, m3, [r0]
413 %define rnd_1d_h264 pw_4
414 %define rnd_2d_h264 pw_32
415 %define rnd_1d_vc1 pw_3
416 %define rnd_2d_vc1 pw_28
429 %define CHROMAMC_AVG NOTHING
430 %define CHROMAMC_AVG4 NOTHING
431 chroma_mc8_mmx_func put, h264, _rnd
432 chroma_mc8_mmx_func put, vc1, _nornd
433 chroma_mc8_mmx_func put, rv40
434 chroma_mc4_mmx_func put, h264
435 chroma_mc4_mmx_func put, rv40
438 chroma_mc2_mmx_func put, h264
440 %define CHROMAMC_AVG DIRECT_AVG
441 %define CHROMAMC_AVG4 COPY_AVG
442 chroma_mc8_mmx_func avg, h264, _rnd
443 chroma_mc8_mmx_func avg, vc1, _nornd
444 chroma_mc8_mmx_func avg, rv40
445 chroma_mc4_mmx_func avg, h264
446 chroma_mc4_mmx_func avg, rv40
447 chroma_mc2_mmx_func avg, h264
450 chroma_mc8_mmx_func avg, h264, _rnd
451 chroma_mc8_mmx_func avg, vc1, _nornd
452 chroma_mc8_mmx_func avg, rv40
453 chroma_mc4_mmx_func avg, h264
454 chroma_mc4_mmx_func avg, rv40
456 %macro chroma_mc8_ssse3_func 2-3
457 cglobal %1_%2_chroma_mc8%3, 6, 7, 8
460 jne .at_least_one_non_zero
461 ; mx == 0 AND my == 0 - no filter needed
465 .at_least_one_non_zero:
471 ; general case, bilinear
476 add r4, 8 ; x*288+8 = x<<8 | (8-x)
478 imul r6, r4 ; (8-y)*(x*255+8) = (8-y)*x<<8 | (8-y)*(8-x)
479 imul r4d, r5d ; y *(x*255+8) = y *x<<8 | y *(8-x)
483 movdqa m5, [rnd_2d_%2]
530 sub r4, r5 ; 255*x+8 = x<<8 | (8-x)
532 movdqa m6, [rnd_1d_%2]
567 sub r5, r4 ; 255*y+8 = y<<8 | (8-y)
569 movdqa m6, [rnd_1d_%2]
601 %macro chroma_mc4_ssse3_func 2
602 cglobal %1_%2_chroma_mc4, 6, 7, 0
609 imul r6d, r4d ; (8-y)*(x*255+8) = (8-y)*x<<8 | (8-y)*(8-x)
610 imul r4d, r5d ; y *(x*255+8) = y *x<<8 | y *(8-x)
623 punpcklbw m1, [r1+r2*1+1]
624 punpcklbw m3, [r1+r2*2+1]
641 CHROMAMC_AVG m1, [r0 ]
642 CHROMAMC_AVG m3, [r0+r2]
651 %define CHROMAMC_AVG NOTHING
653 chroma_mc8_ssse3_func put, h264, _rnd
654 chroma_mc8_ssse3_func put, vc1, _nornd
656 chroma_mc4_ssse3_func put, h264
658 %define CHROMAMC_AVG DIRECT_AVG
660 chroma_mc8_ssse3_func avg, h264, _rnd
661 chroma_mc8_ssse3_func avg, vc1, _nornd
663 chroma_mc4_ssse3_func avg, h264