X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fx86%2Fdsputil_mmx.c;h=d23279b389965f17971c5f938833badfff5f8866;hb=d8eda3708023db388d80027a79d5df7ee25a5a3f;hp=b2f389bb6146a033c7ca63c20c4d4ce196c66ff7;hpb=14bc1f24858a8e83a59dd61a88bdd2bc65993e2b;p=ffmpeg diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index b2f389bb614..d23279b3899 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -3,26 +3,27 @@ * Copyright (c) 2000, 2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer * - * This file is part of FFmpeg. + * MMX optimization by Nick Kurshev + * + * This file is part of Libav. * - * FFmpeg is free software; you can redistribute it and/or + * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * FFmpeg is distributed in the hope that it will be useful, + * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software + * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * - * MMX optimization by Nick Kurshev */ -#include "libavutil/x86_cpu.h" +#include "libavutil/cpu.h" +#include "libavutil/x86/asm.h" #include "libavcodec/dsputil.h" #include "libavcodec/h264dsp.h" #include "libavcodec/mpegvideo.h" @@ -38,129 +39,137 @@ DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL; DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL; DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] = -{0x8000000080000000ULL, 0x8000000080000000ULL}; - -DECLARE_ALIGNED(8, const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL; -DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4 ) = {0x0004000400040004ULL, 0x0004000400040004ULL}; -DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL}; -DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL}; -DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9 ) = {0x0009000900090009ULL, 0x0009000900090009ULL}; -DECLARE_ALIGNED(8, const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL; -DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL}; -DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18 ) = {0x0012001200120012ULL, 0x0012001200120012ULL}; -DECLARE_ALIGNED(8, const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL; -DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27 ) = {0x001B001B001B001BULL, 0x001B001B001B001BULL}; -DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL}; -DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL}; -DECLARE_ALIGNED(8, const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL; -DECLARE_ALIGNED(8, const uint64_t, ff_pw_53 ) = 0x0035003500350035ULL; -DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63 ) = {0x003F003F003F003FULL, 0x003F003F003F003FULL}; -DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL}; -DECLARE_ALIGNED(8, const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL; -DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL; -DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL; - -DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1 ) = {0x0101010101010101ULL, 0x0101010101010101ULL}; -DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3 ) = {0x0303030303030303ULL, 0x0303030303030303ULL}; -DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4 ) = {0x0404040404040404ULL, 0x0404040404040404ULL}; -DECLARE_ALIGNED(8, const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL; -DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL; -DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL; -DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80 ) = {0x8080808080808080ULL, 0x8080808080808080ULL}; -DECLARE_ALIGNED(8, const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL; -DECLARE_ALIGNED(8, const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL; -DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8 ) = {0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL}; -DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL; -DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL}; + { 0x8000000080000000ULL, 0x8000000080000000ULL }; + +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1) = { 0x0001000100010001ULL, 0x0001000100010001ULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_2) = { 0x0002000200020002ULL, 0x0002000200020002ULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3) = { 0x0003000300030003ULL, 0x0003000300030003ULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4) = { 0x0004000400040004ULL, 0x0004000400040004ULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5) = { 0x0005000500050005ULL, 0x0005000500050005ULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8) = { 0x0008000800080008ULL, 0x0008000800080008ULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9) = { 0x0009000900090009ULL, 0x0009000900090009ULL }; +DECLARE_ALIGNED(8, const uint64_t, ff_pw_15) = 0x000F000F000F000FULL; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16) = { 0x0010001000100010ULL, 0x0010001000100010ULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17) = { 0x0011001100110011ULL, 0x0011001100110011ULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18) = { 0x0012001200120012ULL, 0x0012001200120012ULL }; +DECLARE_ALIGNED(8, const uint64_t, ff_pw_20) = 0x0014001400140014ULL; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27) = { 0x001B001B001B001BULL, 0x001B001B001B001BULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28) = { 0x001C001C001C001CULL, 0x001C001C001C001CULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32) = { 0x0020002000200020ULL, 0x0020002000200020ULL }; +DECLARE_ALIGNED(8, const uint64_t, ff_pw_42) = 0x002A002A002A002AULL; +DECLARE_ALIGNED(8, const uint64_t, ff_pw_53) = 0x0035003500350035ULL; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63) = { 0x003F003F003F003FULL, 0x003F003F003F003FULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64) = { 0x0040004000400040ULL, 0x0040004000400040ULL }; +DECLARE_ALIGNED(8, const uint64_t, ff_pw_96) = 0x0060006000600060ULL; +DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL; +DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_512) = { 0x0200020002000200ULL, 0x0200020002000200ULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL }; + +DECLARE_ALIGNED(16, const xmm_reg, ff_pb_0) = { 0x0000000000000000ULL, 0x0000000000000000ULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1) = { 0x0101010101010101ULL, 0x0101010101010101ULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3) = { 0x0303030303030303ULL, 0x0303030303030303ULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4) = { 0x0404040404040404ULL, 0x0404040404040404ULL }; +DECLARE_ALIGNED(8, const uint64_t, ff_pb_7) = 0x0707070707070707ULL; +DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F) = 0x1F1F1F1F1F1F1F1FULL; +DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F) = 0x3F3F3F3F3F3F3F3FULL; +DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80) = { 0x8080808080808080ULL, 0x8080808080808080ULL }; +DECLARE_ALIGNED(8, const uint64_t, ff_pb_81) = 0x8181818181818181ULL; +DECLARE_ALIGNED(16, const xmm_reg, ff_pb_A1) = { 0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL }; +DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8) = { 0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL }; +DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC) = 0xFCFCFCFCFCFCFCFCULL; +DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE) = { 0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL }; DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 }; DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; -#define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::) -#define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::) +#if HAVE_INLINE_ASM + +#define JUMPALIGN() __asm__ volatile (".p2align 3"::) +#define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::) -#define MOVQ_BFE(regd) \ - __asm__ volatile ( \ - "pcmpeqd %%" #regd ", %%" #regd " \n\t"\ - "paddb %%" #regd ", %%" #regd " \n\t" ::) +#define MOVQ_BFE(regd) \ + __asm__ volatile ( \ + "pcmpeqd %%"#regd", %%"#regd" \n\t" \ + "paddb %%"#regd", %%"#regd" \n\t" ::) #ifndef PIC -#define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone)) -#define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo)) +#define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone)) +#define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo)) #else // for shared library it's better to use this way for accessing constants // pcmpeqd -> -1 -#define MOVQ_BONE(regd) \ - __asm__ volatile ( \ - "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ - "psrlw $15, %%" #regd " \n\t" \ - "packuswb %%" #regd ", %%" #regd " \n\t" ::) - -#define MOVQ_WTWO(regd) \ - __asm__ volatile ( \ - "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ - "psrlw $15, %%" #regd " \n\t" \ - "psllw $1, %%" #regd " \n\t"::) +#define MOVQ_BONE(regd) \ + __asm__ volatile ( \ + "pcmpeqd %%"#regd", %%"#regd" \n\t" \ + "psrlw $15, %%"#regd" \n\t" \ + "packuswb %%"#regd", %%"#regd" \n\t" ::) + +#define MOVQ_WTWO(regd) \ + __asm__ volatile ( \ + "pcmpeqd %%"#regd", %%"#regd" \n\t" \ + "psrlw $15, %%"#regd" \n\t" \ + "psllw $1, %%"#regd" \n\t"::) #endif // using regr as temporary and for the output result // first argument is unmodifed and second is trashed // regfe is supposed to contain 0xfefefefefefefefe -#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \ - "movq " #rega ", " #regr " \n\t"\ - "pand " #regb ", " #regr " \n\t"\ - "pxor " #rega ", " #regb " \n\t"\ - "pand " #regfe "," #regb " \n\t"\ - "psrlq $1, " #regb " \n\t"\ - "paddb " #regb ", " #regr " \n\t" - -#define PAVGB_MMX(rega, regb, regr, regfe) \ - "movq " #rega ", " #regr " \n\t"\ - "por " #regb ", " #regr " \n\t"\ - "pxor " #rega ", " #regb " \n\t"\ - "pand " #regfe "," #regb " \n\t"\ - "psrlq $1, " #regb " \n\t"\ - "psubb " #regb ", " #regr " \n\t" +#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \ + "movq "#rega", "#regr" \n\t" \ + "pand "#regb", "#regr" \n\t" \ + "pxor "#rega", "#regb" \n\t" \ + "pand "#regfe", "#regb" \n\t" \ + "psrlq $1, "#regb" \n\t" \ + "paddb "#regb", "#regr" \n\t" + +#define PAVGB_MMX(rega, regb, regr, regfe) \ + "movq "#rega", "#regr" \n\t" \ + "por "#regb", "#regr" \n\t" \ + "pxor "#rega", "#regb" \n\t" \ + "pand "#regfe", "#regb" \n\t" \ + "psrlq $1, "#regb" \n\t" \ + "psubb "#regb", "#regr" \n\t" // mm6 is supposed to contain 0xfefefefefefefefe -#define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \ - "movq " #rega ", " #regr " \n\t"\ - "movq " #regc ", " #regp " \n\t"\ - "pand " #regb ", " #regr " \n\t"\ - "pand " #regd ", " #regp " \n\t"\ - "pxor " #rega ", " #regb " \n\t"\ - "pxor " #regc ", " #regd " \n\t"\ - "pand %%mm6, " #regb " \n\t"\ - "pand %%mm6, " #regd " \n\t"\ - "psrlq $1, " #regb " \n\t"\ - "psrlq $1, " #regd " \n\t"\ - "paddb " #regb ", " #regr " \n\t"\ - "paddb " #regd ", " #regp " \n\t" - -#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \ - "movq " #rega ", " #regr " \n\t"\ - "movq " #regc ", " #regp " \n\t"\ - "por " #regb ", " #regr " \n\t"\ - "por " #regd ", " #regp " \n\t"\ - "pxor " #rega ", " #regb " \n\t"\ - "pxor " #regc ", " #regd " \n\t"\ - "pand %%mm6, " #regb " \n\t"\ - "pand %%mm6, " #regd " \n\t"\ - "psrlq $1, " #regd " \n\t"\ - "psrlq $1, " #regb " \n\t"\ - "psubb " #regb ", " #regr " \n\t"\ - "psubb " #regd ", " #regp " \n\t" +#define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \ + "movq "#rega", "#regr" \n\t" \ + "movq "#regc", "#regp" \n\t" \ + "pand "#regb", "#regr" \n\t" \ + "pand "#regd", "#regp" \n\t" \ + "pxor "#rega", "#regb" \n\t" \ + "pxor "#regc", "#regd" \n\t" \ + "pand %%mm6, "#regb" \n\t" \ + "pand %%mm6, "#regd" \n\t" \ + "psrlq $1, "#regb" \n\t" \ + "psrlq $1, "#regd" \n\t" \ + "paddb "#regb", "#regr" \n\t" \ + "paddb "#regd", "#regp" \n\t" + +#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \ + "movq "#rega", "#regr" \n\t" \ + "movq "#regc", "#regp" \n\t" \ + "por "#regb", "#regr" \n\t" \ + "por "#regd", "#regp" \n\t" \ + "pxor "#rega", "#regb" \n\t" \ + "pxor "#regc", "#regd" \n\t" \ + "pand %%mm6, "#regb" \n\t" \ + "pand %%mm6, "#regd" \n\t" \ + "psrlq $1, "#regd" \n\t" \ + "psrlq $1, "#regb" \n\t" \ + "psubb "#regb", "#regr" \n\t" \ + "psubb "#regd", "#regp" \n\t" /***********************************/ /* MMX no rounding */ -#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx +#define DEF(x, y) x ## _no_rnd_ ## y ## _mmx #define SET_RND MOVQ_WONE #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f) #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e) #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e) -#include "dsputil_mmx_rnd_template.c" +#include "dsputil_rnd_template.c" #undef DEF #undef SET_RND @@ -169,12 +178,12 @@ DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; /***********************************/ /* MMX rounding */ -#define DEF(x, y) x ## _ ## y ##_mmx +#define DEF(x, y) x ## _ ## y ## _mmx #define SET_RND MOVQ_WTWO #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f) #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e) -#include "dsputil_mmx_rnd_template.c" +#include "dsputil_rnd_template.c" #undef DEF #undef SET_RND @@ -189,22 +198,22 @@ DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; #define PAVGB "pavgusb" #define OP_AVG PAVGB -#include "dsputil_mmx_avg_template.c" +#include "dsputil_avg_template.c" #undef DEF #undef PAVGB #undef OP_AVG /***********************************/ -/* MMX2 specific */ +/* MMXEXT specific */ -#define DEF(x) x ## _mmx2 +#define DEF(x) x ## _mmxext -/* Introduced only in MMX2 set */ +/* Introduced only in MMXEXT set */ #define PAVGB "pavgb" #define OP_AVG PAVGB -#include "dsputil_mmx_avg_template.c" +#include "dsputil_avg_template.c" #undef DEF #undef PAVGB @@ -212,11 +221,11 @@ DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; #define put_no_rnd_pixels16_mmx put_pixels16_mmx #define put_no_rnd_pixels8_mmx put_pixels8_mmx -#define put_pixels16_mmx2 put_pixels16_mmx -#define put_pixels8_mmx2 put_pixels8_mmx -#define put_pixels4_mmx2 put_pixels4_mmx -#define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx -#define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx +#define put_pixels16_mmxext put_pixels16_mmx +#define put_pixels8_mmxext put_pixels8_mmx +#define put_pixels4_mmxext put_pixels4_mmx +#define put_no_rnd_pixels16_mmxext put_no_rnd_pixels16_mmx +#define put_no_rnd_pixels8_mmxext put_no_rnd_pixels8_mmx #define put_pixels16_3dnow put_pixels16_mmx #define put_pixels8_3dnow put_pixels8_mmx #define put_pixels4_3dnow put_pixels4_mmx @@ -226,562 +235,580 @@ DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; /***********************************/ /* standard MMX */ -void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) +void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, + int line_size) { const DCTELEM *p; uint8_t *pix; /* read the pixels */ - p = block; + p = block; pix = pixels; /* unrolled loop */ - __asm__ volatile( - "movq %3, %%mm0 \n\t" - "movq 8%3, %%mm1 \n\t" - "movq 16%3, %%mm2 \n\t" - "movq 24%3, %%mm3 \n\t" - "movq 32%3, %%mm4 \n\t" - "movq 40%3, %%mm5 \n\t" - "movq 48%3, %%mm6 \n\t" - "movq 56%3, %%mm7 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "packuswb %%mm3, %%mm2 \n\t" - "packuswb %%mm5, %%mm4 \n\t" - "packuswb %%mm7, %%mm6 \n\t" - "movq %%mm0, (%0) \n\t" - "movq %%mm2, (%0, %1) \n\t" - "movq %%mm4, (%0, %1, 2) \n\t" - "movq %%mm6, (%0, %2) \n\t" - ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p) - :"memory"); - pix += line_size*4; - p += 32; + __asm__ volatile ( + "movq (%3), %%mm0 \n\t" + "movq 8(%3), %%mm1 \n\t" + "movq 16(%3), %%mm2 \n\t" + "movq 24(%3), %%mm3 \n\t" + "movq 32(%3), %%mm4 \n\t" + "movq 40(%3), %%mm5 \n\t" + "movq 48(%3), %%mm6 \n\t" + "movq 56(%3), %%mm7 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "packuswb %%mm3, %%mm2 \n\t" + "packuswb %%mm5, %%mm4 \n\t" + "packuswb %%mm7, %%mm6 \n\t" + "movq %%mm0, (%0) \n\t" + "movq %%mm2, (%0, %1) \n\t" + "movq %%mm4, (%0, %1, 2) \n\t" + "movq %%mm6, (%0, %2) \n\t" + :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), + "r"(p) + : "memory"); + pix += line_size * 4; + p += 32; // if here would be an exact copy of the code above // compiler would generate some very strange code // thus using "r" - __asm__ volatile( - "movq (%3), %%mm0 \n\t" - "movq 8(%3), %%mm1 \n\t" - "movq 16(%3), %%mm2 \n\t" - "movq 24(%3), %%mm3 \n\t" - "movq 32(%3), %%mm4 \n\t" - "movq 40(%3), %%mm5 \n\t" - "movq 48(%3), %%mm6 \n\t" - "movq 56(%3), %%mm7 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "packuswb %%mm3, %%mm2 \n\t" - "packuswb %%mm5, %%mm4 \n\t" - "packuswb %%mm7, %%mm6 \n\t" - "movq %%mm0, (%0) \n\t" - "movq %%mm2, (%0, %1) \n\t" - "movq %%mm4, (%0, %1, 2) \n\t" - "movq %%mm6, (%0, %2) \n\t" - ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p) - :"memory"); + __asm__ volatile ( + "movq (%3), %%mm0 \n\t" + "movq 8(%3), %%mm1 \n\t" + "movq 16(%3), %%mm2 \n\t" + "movq 24(%3), %%mm3 \n\t" + "movq 32(%3), %%mm4 \n\t" + "movq 40(%3), %%mm5 \n\t" + "movq 48(%3), %%mm6 \n\t" + "movq 56(%3), %%mm7 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "packuswb %%mm3, %%mm2 \n\t" + "packuswb %%mm5, %%mm4 \n\t" + "packuswb %%mm7, %%mm6 \n\t" + "movq %%mm0, (%0) \n\t" + "movq %%mm2, (%0, %1) \n\t" + "movq %%mm4, (%0, %1, 2) \n\t" + "movq %%mm6, (%0, %2) \n\t" + :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p) + : "memory"); } -DECLARE_ASM_CONST(8, uint8_t, ff_vector128)[8] = - { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }; - -#define put_signed_pixels_clamped_mmx_half(off) \ - "movq "#off"(%2), %%mm1 \n\t"\ - "movq 16+"#off"(%2), %%mm2 \n\t"\ - "movq 32+"#off"(%2), %%mm3 \n\t"\ - "movq 48+"#off"(%2), %%mm4 \n\t"\ - "packsswb 8+"#off"(%2), %%mm1 \n\t"\ - "packsswb 24+"#off"(%2), %%mm2 \n\t"\ - "packsswb 40+"#off"(%2), %%mm3 \n\t"\ - "packsswb 56+"#off"(%2), %%mm4 \n\t"\ - "paddb %%mm0, %%mm1 \n\t"\ - "paddb %%mm0, %%mm2 \n\t"\ - "paddb %%mm0, %%mm3 \n\t"\ - "paddb %%mm0, %%mm4 \n\t"\ - "movq %%mm1, (%0) \n\t"\ - "movq %%mm2, (%0, %3) \n\t"\ - "movq %%mm3, (%0, %3, 2) \n\t"\ - "movq %%mm4, (%0, %1) \n\t" - -void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) +#define put_signed_pixels_clamped_mmx_half(off) \ + "movq "#off"(%2), %%mm1 \n\t" \ + "movq 16 + "#off"(%2), %%mm2 \n\t" \ + "movq 32 + "#off"(%2), %%mm3 \n\t" \ + "movq 48 + "#off"(%2), %%mm4 \n\t" \ + "packsswb 8 + "#off"(%2), %%mm1 \n\t" \ + "packsswb 24 + "#off"(%2), %%mm2 \n\t" \ + "packsswb 40 + "#off"(%2), %%mm3 \n\t" \ + "packsswb 56 + "#off"(%2), %%mm4 \n\t" \ + "paddb %%mm0, %%mm1 \n\t" \ + "paddb %%mm0, %%mm2 \n\t" \ + "paddb %%mm0, %%mm3 \n\t" \ + "paddb %%mm0, %%mm4 \n\t" \ + "movq %%mm1, (%0) \n\t" \ + "movq %%mm2, (%0, %3) \n\t" \ + "movq %%mm3, (%0, %3, 2) \n\t" \ + "movq %%mm4, (%0, %1) \n\t" + +void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, + int line_size) { x86_reg line_skip = line_size; x86_reg line_skip3; __asm__ volatile ( - "movq "MANGLE(ff_vector128)", %%mm0 \n\t" - "lea (%3, %3, 2), %1 \n\t" - put_signed_pixels_clamped_mmx_half(0) - "lea (%0, %3, 4), %0 \n\t" - put_signed_pixels_clamped_mmx_half(64) - :"+&r" (pixels), "=&r" (line_skip3) - :"r" (block), "r"(line_skip) - :"memory"); + "movq "MANGLE(ff_pb_80)", %%mm0 \n\t" + "lea (%3, %3, 2), %1 \n\t" + put_signed_pixels_clamped_mmx_half(0) + "lea (%0, %3, 4), %0 \n\t" + put_signed_pixels_clamped_mmx_half(64) + : "+&r"(pixels), "=&r"(line_skip3) + : "r"(block), "r"(line_skip) + : "memory"); } -void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) +void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, + int line_size) { const DCTELEM *p; uint8_t *pix; int i; /* read the pixels */ - p = block; + p = block; pix = pixels; MOVQ_ZERO(mm7); i = 4; do { - __asm__ volatile( - "movq (%2), %%mm0 \n\t" - "movq 8(%2), %%mm1 \n\t" - "movq 16(%2), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "movq %0, %%mm4 \n\t" - "movq %1, %%mm6 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddsw %%mm4, %%mm0 \n\t" - "paddsw %%mm5, %%mm1 \n\t" - "movq %%mm6, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm6 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddsw %%mm6, %%mm2 \n\t" - "paddsw %%mm5, %%mm3 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "packuswb %%mm3, %%mm2 \n\t" - "movq %%mm0, %0 \n\t" - "movq %%mm2, %1 \n\t" - :"+m"(*pix), "+m"(*(pix+line_size)) - :"r"(p) - :"memory"); - pix += line_size*2; - p += 16; + __asm__ volatile ( + "movq (%2), %%mm0 \n\t" + "movq 8(%2), %%mm1 \n\t" + "movq 16(%2), %%mm2 \n\t" + "movq 24(%2), %%mm3 \n\t" + "movq %0, %%mm4 \n\t" + "movq %1, %%mm6 \n\t" + "movq %%mm4, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpckhbw %%mm7, %%mm5 \n\t" + "paddsw %%mm4, %%mm0 \n\t" + "paddsw %%mm5, %%mm1 \n\t" + "movq %%mm6, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm6 \n\t" + "punpckhbw %%mm7, %%mm5 \n\t" + "paddsw %%mm6, %%mm2 \n\t" + "paddsw %%mm5, %%mm3 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "packuswb %%mm3, %%mm2 \n\t" + "movq %%mm0, %0 \n\t" + "movq %%mm2, %1 \n\t" + : "+m"(*pix), "+m"(*(pix + line_size)) + : "r"(p) + : "memory"); + pix += line_size * 2; + p += 16; } while (--i); } -static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) +static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, + int line_size, int h) { - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - ASMALIGN(3) - "1: \n\t" - "movd (%1), %%mm0 \n\t" - "movd (%1, %3), %%mm1 \n\t" - "movd %%mm0, (%2) \n\t" - "movd %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movd (%1), %%mm0 \n\t" - "movd (%1, %3), %%mm1 \n\t" - "movd %%mm0, (%2) \n\t" - "movd %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - : "+g"(h), "+r" (pixels), "+r" (block) - : "r"((x86_reg)line_size) - : "%"REG_a, "memory" + __asm__ volatile ( + "lea (%3, %3), %%"REG_a" \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movd (%1 ), %%mm0 \n\t" + "movd (%1, %3), %%mm1 \n\t" + "movd %%mm0, (%2) \n\t" + "movd %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movd (%1 ), %%mm0 \n\t" + "movd (%1, %3), %%mm1 \n\t" + "movd %%mm0, (%2) \n\t" + "movd %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + : "+g"(h), "+r"(pixels), "+r"(block) + : "r"((x86_reg)line_size) + : "%"REG_a, "memory" ); } -static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) +static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, + int line_size, int h) { - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - ASMALIGN(3) - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - : "+g"(h), "+r" (pixels), "+r" (block) - : "r"((x86_reg)line_size) - : "%"REG_a, "memory" + __asm__ volatile ( + "lea (%3, %3), %%"REG_a" \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1 ), %%mm0 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%1 ), %%mm0 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + : "+g"(h), "+r"(pixels), "+r"(block) + : "r"((x86_reg)line_size) + : "%"REG_a, "memory" ); } -static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) +static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, + int line_size, int h) { - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - ASMALIGN(3) - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm4 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq 8(%1, %3), %%mm5 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm4, 8(%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm4 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq 8(%1, %3), %%mm5 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm4, 8(%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - : "+g"(h), "+r" (pixels), "+r" (block) - : "r"((x86_reg)line_size) - : "%"REG_a, "memory" + __asm__ volatile ( + "lea (%3, %3), %%"REG_a" \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1 ), %%mm0 \n\t" + "movq 8(%1 ), %%mm4 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq 8(%1, %3), %%mm5 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm4, 8(%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq %%mm5, 8(%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%1 ), %%mm0 \n\t" + "movq 8(%1 ), %%mm4 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq 8(%1, %3), %%mm5 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm4, 8(%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq %%mm5, 8(%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + : "+g"(h), "+r"(pixels), "+r"(block) + : "r"((x86_reg)line_size) + : "%"REG_a, "memory" ); } -static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) +static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, + int line_size, int h) { - __asm__ volatile( - "1: \n\t" - "movdqu (%1), %%xmm0 \n\t" - "movdqu (%1,%3), %%xmm1 \n\t" - "movdqu (%1,%3,2), %%xmm2 \n\t" - "movdqu (%1,%4), %%xmm3 \n\t" - "movdqa %%xmm0, (%2) \n\t" - "movdqa %%xmm1, (%2,%3) \n\t" - "movdqa %%xmm2, (%2,%3,2) \n\t" - "movdqa %%xmm3, (%2,%4) \n\t" - "subl $4, %0 \n\t" - "lea (%1,%3,4), %1 \n\t" - "lea (%2,%3,4), %2 \n\t" - "jnz 1b \n\t" - : "+g"(h), "+r" (pixels), "+r" (block) - : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) - : "memory" + __asm__ volatile ( + "1: \n\t" + "movdqu (%1 ), %%xmm0 \n\t" + "movdqu (%1, %3 ), %%xmm1 \n\t" + "movdqu (%1, %3, 2), %%xmm2 \n\t" + "movdqu (%1, %4 ), %%xmm3 \n\t" + "lea (%1, %3, 4), %1 \n\t" + "movdqa %%xmm0, (%2) \n\t" + "movdqa %%xmm1, (%2, %3) \n\t" + "movdqa %%xmm2, (%2, %3, 2) \n\t" + "movdqa %%xmm3, (%2, %4) \n\t" + "subl $4, %0 \n\t" + "lea (%2, %3, 4), %2 \n\t" + "jnz 1b \n\t" + : "+g"(h), "+r"(pixels), "+r"(block) + : "r"((x86_reg)line_size), "r"((x86_reg)3L * line_size) + : "memory" ); } -static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) +static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, + int line_size, int h) { - __asm__ volatile( - "1: \n\t" - "movdqu (%1), %%xmm0 \n\t" - "movdqu (%1,%3), %%xmm1 \n\t" - "movdqu (%1,%3,2), %%xmm2 \n\t" - "movdqu (%1,%4), %%xmm3 \n\t" - "pavgb (%2), %%xmm0 \n\t" - "pavgb (%2,%3), %%xmm1 \n\t" - "pavgb (%2,%3,2), %%xmm2 \n\t" - "pavgb (%2,%4), %%xmm3 \n\t" - "movdqa %%xmm0, (%2) \n\t" - "movdqa %%xmm1, (%2,%3) \n\t" - "movdqa %%xmm2, (%2,%3,2) \n\t" - "movdqa %%xmm3, (%2,%4) \n\t" - "subl $4, %0 \n\t" - "lea (%1,%3,4), %1 \n\t" - "lea (%2,%3,4), %2 \n\t" - "jnz 1b \n\t" - : "+g"(h), "+r" (pixels), "+r" (block) - : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) - : "memory" + __asm__ volatile ( + "1: \n\t" + "movdqu (%1 ), %%xmm0 \n\t" + "movdqu (%1, %3 ), %%xmm1 \n\t" + "movdqu (%1, %3, 2), %%xmm2 \n\t" + "movdqu (%1, %4 ), %%xmm3 \n\t" + "lea (%1, %3, 4), %1 \n\t" + "pavgb (%2 ), %%xmm0 \n\t" + "pavgb (%2, %3 ), %%xmm1 \n\t" + "pavgb (%2, %3, 2), %%xmm2 \n\t" + "pavgb (%2, %4), %%xmm3 \n\t" + "movdqa %%xmm0, (%2) \n\t" + "movdqa %%xmm1, (%2, %3) \n\t" + "movdqa %%xmm2, (%2, %3, 2) \n\t" + "movdqa %%xmm3, (%2, %4) \n\t" + "subl $4, %0 \n\t" + "lea (%2, %3, 4), %2 \n\t" + "jnz 1b \n\t" + : "+g"(h), "+r"(pixels), "+r"(block) + : "r"((x86_reg)line_size), "r"((x86_reg)3L * line_size) + : "memory" ); } -#define CLEAR_BLOCKS(name,n) \ -static void name(DCTELEM *blocks)\ -{\ - __asm__ volatile(\ - "pxor %%mm7, %%mm7 \n\t"\ - "mov %1, %%"REG_a" \n\t"\ - "1: \n\t"\ - "movq %%mm7, (%0, %%"REG_a") \n\t"\ - "movq %%mm7, 8(%0, %%"REG_a") \n\t"\ - "movq %%mm7, 16(%0, %%"REG_a") \n\t"\ - "movq %%mm7, 24(%0, %%"REG_a") \n\t"\ - "add $32, %%"REG_a" \n\t"\ - " js 1b \n\t"\ - : : "r" (((uint8_t *)blocks)+128*n),\ - "i" (-128*n)\ - : "%"REG_a\ - );\ +#define CLEAR_BLOCKS(name, n) \ +static void name(DCTELEM *blocks) \ +{ \ + __asm__ volatile ( \ + "pxor %%mm7, %%mm7 \n\t" \ + "mov %1, %%"REG_a" \n\t" \ + "1: \n\t" \ + "movq %%mm7, (%0, %%"REG_a") \n\t" \ + "movq %%mm7, 8(%0, %%"REG_a") \n\t" \ + "movq %%mm7, 16(%0, %%"REG_a") \n\t" \ + "movq %%mm7, 24(%0, %%"REG_a") \n\t" \ + "add $32, %%"REG_a" \n\t" \ + "js 1b \n\t" \ + :: "r"(((uint8_t *)blocks) + 128 * n), \ + "i"(-128 * n) \ + : "%"REG_a \ + ); \ } CLEAR_BLOCKS(clear_blocks_mmx, 6) CLEAR_BLOCKS(clear_block_mmx, 1) static void clear_block_sse(DCTELEM *block) { - __asm__ volatile( - "xorps %%xmm0, %%xmm0 \n" - "movaps %%xmm0, (%0) \n" - "movaps %%xmm0, 16(%0) \n" - "movaps %%xmm0, 32(%0) \n" - "movaps %%xmm0, 48(%0) \n" - "movaps %%xmm0, 64(%0) \n" - "movaps %%xmm0, 80(%0) \n" - "movaps %%xmm0, 96(%0) \n" - "movaps %%xmm0, 112(%0) \n" + __asm__ volatile ( + "xorps %%xmm0, %%xmm0 \n" + "movaps %%xmm0, (%0) \n" + "movaps %%xmm0, 16(%0) \n" + "movaps %%xmm0, 32(%0) \n" + "movaps %%xmm0, 48(%0) \n" + "movaps %%xmm0, 64(%0) \n" + "movaps %%xmm0, 80(%0) \n" + "movaps %%xmm0, 96(%0) \n" + "movaps %%xmm0, 112(%0) \n" :: "r"(block) : "memory" ); } static void clear_blocks_sse(DCTELEM *blocks) -{\ - __asm__ volatile( - "xorps %%xmm0, %%xmm0 \n" - "mov %1, %%"REG_a" \n" - "1: \n" - "movaps %%xmm0, (%0, %%"REG_a") \n" - "movaps %%xmm0, 16(%0, %%"REG_a") \n" - "movaps %%xmm0, 32(%0, %%"REG_a") \n" - "movaps %%xmm0, 48(%0, %%"REG_a") \n" - "movaps %%xmm0, 64(%0, %%"REG_a") \n" - "movaps %%xmm0, 80(%0, %%"REG_a") \n" - "movaps %%xmm0, 96(%0, %%"REG_a") \n" - "movaps %%xmm0, 112(%0, %%"REG_a") \n" - "add $128, %%"REG_a" \n" - " js 1b \n" - : : "r" (((uint8_t *)blocks)+128*6), - "i" (-128*6) +{ + __asm__ volatile ( + "xorps %%xmm0, %%xmm0 \n" + "mov %1, %%"REG_a" \n" + "1: \n" + "movaps %%xmm0, (%0, %%"REG_a") \n" + "movaps %%xmm0, 16(%0, %%"REG_a") \n" + "movaps %%xmm0, 32(%0, %%"REG_a") \n" + "movaps %%xmm0, 48(%0, %%"REG_a") \n" + "movaps %%xmm0, 64(%0, %%"REG_a") \n" + "movaps %%xmm0, 80(%0, %%"REG_a") \n" + "movaps %%xmm0, 96(%0, %%"REG_a") \n" + "movaps %%xmm0, 112(%0, %%"REG_a") \n" + "add $128, %%"REG_a" \n" + "js 1b \n" + :: "r"(((uint8_t *)blocks) + 128 * 6), + "i"(-128 * 6) : "%"REG_a ); } -static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){ - x86_reg i=0; - __asm__ volatile( - "jmp 2f \n\t" - "1: \n\t" - "movq (%1, %0), %%mm0 \n\t" - "movq (%2, %0), %%mm1 \n\t" - "paddb %%mm0, %%mm1 \n\t" - "movq %%mm1, (%2, %0) \n\t" - "movq 8(%1, %0), %%mm0 \n\t" - "movq 8(%2, %0), %%mm1 \n\t" - "paddb %%mm0, %%mm1 \n\t" - "movq %%mm1, 8(%2, %0) \n\t" - "add $16, %0 \n\t" - "2: \n\t" - "cmp %3, %0 \n\t" - " js 1b \n\t" - : "+r" (i) - : "r"(src), "r"(dst), "r"((x86_reg)w-15) - ); - for(; i= h) { + src_y_add = h - 1 - src_y; + src_y = h - 1; + } else if (src_y <= -block_h) { + src_y_add = 1 - block_h - src_y; + src_y = 1 - block_h; + } + if (src_x >= w) { + src += w - 1 - src_x; + src_x = w - 1; + } else if (src_x <= -block_w) { + src += 1 - block_w - src_x; + src_x = 1 - block_w; + } + + start_y = FFMAX(0, -src_y); + start_x = FFMAX(0, -src_x); + end_y = FFMIN(block_h, h-src_y); + end_x = FFMIN(block_w, w-src_x); + assert(start_x < end_x && block_w > 0); + assert(start_y < end_y && block_h > 0); + + // fill in the to-be-copied part plus all above/below + src += (src_y_add + start_y) * linesize + start_x; + buf += start_x; + core_fn(buf, src, linesize, start_y, end_y, + block_h, start_x, end_x, block_w); +} -#if 0 -static void just_return(void) { return; } +#if ARCH_X86_32 +static av_noinline void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src, + int linesize, + int block_w, int block_h, + int src_x, int src_y, int w, int h) +{ + emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y, + w, h, &ff_emu_edge_core_mmx); +} #endif -static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, - int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){ - const int w = 8; - const int ix = ox>>(16+shift); - const int iy = oy>>(16+shift); - const int oxs = ox>>4; - const int oys = oy>>4; - const int dxxs = dxx>>4; - const int dxys = dxy>>4; - const int dyxs = dyx>>4; - const int dyys = dyy>>4; - const uint16_t r4[4] = {r,r,r,r}; - const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys}; - const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys}; - const uint64_t shift2 = 2*shift; - uint8_t edge_buf[(h+1)*stride]; +static av_noinline void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src, + int linesize, + int block_w, int block_h, + int src_x, int src_y, int w, int h) +{ + emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y, + w, h, &ff_emu_edge_core_sse); +} +#endif /* HAVE_YASM */ + +#if HAVE_INLINE_ASM + +static void gmc_mmx(uint8_t *dst, uint8_t *src, + int stride, int h, int ox, int oy, + int dxx, int dxy, int dyx, int dyy, + int shift, int r, int width, int height) +{ + const int w = 8; + const int ix = ox >> (16 + shift); + const int iy = oy >> (16 + shift); + const int oxs = ox >> 4; + const int oys = oy >> 4; + const int dxxs = dxx >> 4; + const int dxys = dxy >> 4; + const int dyxs = dyx >> 4; + const int dyys = dyy >> 4; + const uint16_t r4[4] = { r, r, r, r }; + const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys }; + const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys }; + const uint64_t shift2 = 2 * shift; int x, y; - const int dxw = (dxx-(1<<(16+shift)))*(w-1); - const int dyh = (dyy-(1<<(16+shift)))*(h-1); - const int dxh = dxy*(h-1); - const int dyw = dyx*(w-1); - if( // non-constant fullpel offset (3% of blocks) - ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) | - (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift) + const int dxw = (dxx - (1 << (16 + shift))) * (w - 1); + const int dyh = (dyy - (1 << (16 + shift))) * (h - 1); + const int dxh = dxy * (h - 1); + const int dyw = dyx * (w - 1); + if ( // non-constant fullpel offset (3% of blocks) + ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) | + (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift) // uses more than 16 bits of subpel mv (only at huge resolution) - || (dxx|dxy|dyx|dyy)&15 ) - { - //FIXME could still use mmx for some of the rows - ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height); + || (dxx | dxy | dyx | dyy) & 15 || + (unsigned)ix >= width - w || + (unsigned)iy >= height - h) { + // FIXME could still use mmx for some of the rows + ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, + shift, r, width, height); return; } - src += ix + iy*stride; - if( (unsigned)ix >= width-w || - (unsigned)iy >= height-h ) - { - ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height); - src = edge_buf; - } + src += ix + iy * stride; - __asm__ volatile( - "movd %0, %%mm6 \n\t" - "pxor %%mm7, %%mm7 \n\t" - "punpcklwd %%mm6, %%mm6 \n\t" - "punpcklwd %%mm6, %%mm6 \n\t" + __asm__ volatile ( + "movd %0, %%mm6 \n\t" + "pxor %%mm7, %%mm7 \n\t" + "punpcklwd %%mm6, %%mm6 \n\t" + "punpcklwd %%mm6, %%mm6 \n\t" :: "r"(1<0) & (a ^ sign(m))) - "movq %%mm3, %1 \n\t" - "movq %%mm0, %0 \n\t" - :"+m"(mag[i]), "+m"(ang[i]) - ::"memory" + __asm__ volatile ("pxor %%mm7, %%mm7":); + for (i = 0; i < blocksize; i += 2) { + __asm__ volatile ( + "movq %0, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm1, %%mm3 \n\t" + "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0 + "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0 + "pslld $31, %%mm2 \n\t" // keep only the sign bit + "pxor %%mm2, %%mm1 \n\t" + "movq %%mm3, %%mm4 \n\t" + "pand %%mm1, %%mm3 \n\t" + "pandn %%mm1, %%mm4 \n\t" + "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a < 0) & (a ^ sign(m))) + "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a > 0) & (a ^ sign(m))) + "movq %%mm3, %1 \n\t" + "movq %%mm0, %0 \n\t" + : "+m"(mag[i]), "+m"(ang[i]) + :: "memory" ); } - __asm__ volatile("femms"); + __asm__ volatile ("femms"); } + static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize) { int i; - __asm__ volatile( - "movaps %0, %%xmm5 \n\t" - ::"m"(ff_pdw_80000000[0]) + __asm__ volatile ( + "movaps %0, %%xmm5 \n\t" + :: "m"(ff_pdw_80000000[0]) ); - for(i=0; i0) & (a ^ sign(m))) + "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a < 0) & (a ^ sign(m))) + "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a > 0) & (a ^ sign(m))) "movaps %%xmm3, %1 \n\t" "movaps %%xmm0, %0 \n\t" - :"+m"(mag[i]), "+m"(ang[i]) - ::"memory" + : "+m"(mag[i]), "+m"(ang[i]) + :: "memory" ); } } -#define IF1(x) x -#define IF0(x) - -#define MIX5(mono,stereo)\ - __asm__ volatile(\ - "movss 0(%2), %%xmm5 \n"\ - "movss 8(%2), %%xmm6 \n"\ - "movss 24(%2), %%xmm7 \n"\ - "shufps $0, %%xmm5, %%xmm5 \n"\ - "shufps $0, %%xmm6, %%xmm6 \n"\ - "shufps $0, %%xmm7, %%xmm7 \n"\ - "1: \n"\ - "movaps (%0,%1), %%xmm0 \n"\ - "movaps 0x400(%0,%1), %%xmm1 \n"\ - "movaps 0x800(%0,%1), %%xmm2 \n"\ - "movaps 0xc00(%0,%1), %%xmm3 \n"\ - "movaps 0x1000(%0,%1), %%xmm4 \n"\ - "mulps %%xmm5, %%xmm0 \n"\ - "mulps %%xmm6, %%xmm1 \n"\ - "mulps %%xmm5, %%xmm2 \n"\ - "mulps %%xmm7, %%xmm3 \n"\ - "mulps %%xmm7, %%xmm4 \n"\ - stereo("addps %%xmm1, %%xmm0 \n")\ - "addps %%xmm1, %%xmm2 \n"\ - "addps %%xmm3, %%xmm0 \n"\ - "addps %%xmm4, %%xmm2 \n"\ - mono("addps %%xmm2, %%xmm0 \n")\ - "movaps %%xmm0, (%0,%1) \n"\ - stereo("movaps %%xmm2, 0x400(%0,%1) \n")\ - "add $16, %0 \n"\ - "jl 1b \n"\ - :"+&r"(i)\ - :"r"(samples[0]+len), "r"(matrix)\ - :"memory"\ - ); - -#define MIX_MISC(stereo)\ - __asm__ volatile(\ - "1: \n"\ - "movaps (%3,%0), %%xmm0 \n"\ - stereo("movaps %%xmm0, %%xmm1 \n")\ - "mulps %%xmm6, %%xmm0 \n"\ - stereo("mulps %%xmm7, %%xmm1 \n")\ - "lea 1024(%3,%0), %1 \n"\ - "mov %5, %2 \n"\ - "2: \n"\ - "movaps (%1), %%xmm2 \n"\ - stereo("movaps %%xmm2, %%xmm3 \n")\ - "mulps (%4,%2), %%xmm2 \n"\ - stereo("mulps 16(%4,%2), %%xmm3 \n")\ - "addps %%xmm2, %%xmm0 \n"\ - stereo("addps %%xmm3, %%xmm1 \n")\ - "add $1024, %1 \n"\ - "add $32, %2 \n"\ - "jl 2b \n"\ - "movaps %%xmm0, (%3,%0) \n"\ - stereo("movaps %%xmm1, 1024(%3,%0) \n")\ - "add $16, %0 \n"\ - "jl 1b \n"\ - :"+&r"(i), "=&r"(j), "=&r"(k)\ - :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\ - :"memory"\ - ); - -static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len) +#if HAVE_6REGS +static void vector_fmul_window_3dnowext(float *dst, const float *src0, + const float *src1, const float *win, + int len) { - int (*matrix_cmp)[2] = (int(*)[2])matrix; - intptr_t i,j,k; - - i = -len*sizeof(float); - if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) { - MIX5(IF0,IF1); - } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) { - MIX5(IF1,IF0); - } else { - DECLARE_ALIGNED(16, float, matrix_simd)[in_ch][2][4]; - j = 2*in_ch*sizeof(float); - __asm__ volatile( - "1: \n" - "sub $8, %0 \n" - "movss (%2,%0), %%xmm6 \n" - "movss 4(%2,%0), %%xmm7 \n" - "shufps $0, %%xmm6, %%xmm6 \n" - "shufps $0, %%xmm7, %%xmm7 \n" - "movaps %%xmm6, (%1,%0,4) \n" - "movaps %%xmm7, 16(%1,%0,4) \n" - "jg 1b \n" - :"+&r"(j) - :"r"(matrix_simd), "r"(matrix) - :"memory" - ); - if(out_ch == 2) { - MIX_MISC(IF1); - } else { - MIX_MISC(IF0); - } - } -} - -static void vector_fmul_3dnow(float *dst, const float *src, int len){ - x86_reg i = (len-4)*4; - __asm__ volatile( - "1: \n\t" - "movq (%1,%0), %%mm0 \n\t" - "movq 8(%1,%0), %%mm1 \n\t" - "pfmul (%2,%0), %%mm0 \n\t" - "pfmul 8(%2,%0), %%mm1 \n\t" - "movq %%mm0, (%1,%0) \n\t" - "movq %%mm1, 8(%1,%0) \n\t" - "sub $16, %0 \n\t" - "jge 1b \n\t" - "femms \n\t" - :"+r"(i) - :"r"(dst), "r"(src) - :"memory" - ); -} -static void vector_fmul_sse(float *dst, const float *src, int len){ - x86_reg i = (len-8)*4; - __asm__ volatile( - "1: \n\t" - "movaps (%1,%0), %%xmm0 \n\t" - "movaps 16(%1,%0), %%xmm1 \n\t" - "mulps (%2,%0), %%xmm0 \n\t" - "mulps 16(%2,%0), %%xmm1 \n\t" - "movaps %%xmm0, (%1,%0) \n\t" - "movaps %%xmm1, 16(%1,%0) \n\t" - "sub $32, %0 \n\t" - "jge 1b \n\t" - :"+r"(i) - :"r"(dst), "r"(src) - :"memory" + x86_reg i = -len * 4; + x86_reg j = len * 4 - 8; + __asm__ volatile ( + "1: \n" + "pswapd (%5, %1), %%mm1 \n" + "movq (%5, %0), %%mm0 \n" + "pswapd (%4, %1), %%mm5 \n" + "movq (%3, %0), %%mm4 \n" + "movq %%mm0, %%mm2 \n" + "movq %%mm1, %%mm3 \n" + "pfmul %%mm4, %%mm2 \n" // src0[len + i] * win[len + i] + "pfmul %%mm5, %%mm3 \n" // src1[j] * win[len + j] + "pfmul %%mm4, %%mm1 \n" // src0[len + i] * win[len + j] + "pfmul %%mm5, %%mm0 \n" // src1[j] * win[len + i] + "pfadd %%mm3, %%mm2 \n" + "pfsub %%mm0, %%mm1 \n" + "pswapd %%mm2, %%mm2 \n" + "movq %%mm1, (%2, %0) \n" + "movq %%mm2, (%2, %1) \n" + "sub $8, %1 \n" + "add $8, %0 \n" + "jl 1b \n" + "femms \n" + : "+r"(i), "+r"(j) + : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len) ); } -static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){ - x86_reg i = len*4-16; - __asm__ volatile( - "1: \n\t" - "pswapd 8(%1), %%mm0 \n\t" - "pswapd (%1), %%mm1 \n\t" - "pfmul (%3,%0), %%mm0 \n\t" - "pfmul 8(%3,%0), %%mm1 \n\t" - "movq %%mm0, (%2,%0) \n\t" - "movq %%mm1, 8(%2,%0) \n\t" - "add $16, %1 \n\t" - "sub $16, %0 \n\t" - "jge 1b \n\t" - :"+r"(i), "+r"(src1) - :"r"(dst), "r"(src0) - ); - __asm__ volatile("femms"); -} -static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){ - x86_reg i = len*4-32; - __asm__ volatile( - "1: \n\t" - "movaps 16(%1), %%xmm0 \n\t" - "movaps (%1), %%xmm1 \n\t" - "shufps $0x1b, %%xmm0, %%xmm0 \n\t" - "shufps $0x1b, %%xmm1, %%xmm1 \n\t" - "mulps (%3,%0), %%xmm0 \n\t" - "mulps 16(%3,%0), %%xmm1 \n\t" - "movaps %%xmm0, (%2,%0) \n\t" - "movaps %%xmm1, 16(%2,%0) \n\t" - "add $32, %1 \n\t" - "sub $32, %0 \n\t" - "jge 1b \n\t" - :"+r"(i), "+r"(src1) - :"r"(dst), "r"(src0) +static void vector_fmul_window_sse(float *dst, const float *src0, + const float *src1, const float *win, int len) +{ + x86_reg i = -len * 4; + x86_reg j = len * 4 - 16; + __asm__ volatile ( + "1: \n" + "movaps (%5, %1), %%xmm1 \n" + "movaps (%5, %0), %%xmm0 \n" + "movaps (%4, %1), %%xmm5 \n" + "movaps (%3, %0), %%xmm4 \n" + "shufps $0x1b, %%xmm1, %%xmm1 \n" + "shufps $0x1b, %%xmm5, %%xmm5 \n" + "movaps %%xmm0, %%xmm2 \n" + "movaps %%xmm1, %%xmm3 \n" + "mulps %%xmm4, %%xmm2 \n" // src0[len + i] * win[len + i] + "mulps %%xmm5, %%xmm3 \n" // src1[j] * win[len + j] + "mulps %%xmm4, %%xmm1 \n" // src0[len + i] * win[len + j] + "mulps %%xmm5, %%xmm0 \n" // src1[j] * win[len + i] + "addps %%xmm3, %%xmm2 \n" + "subps %%xmm0, %%xmm1 \n" + "shufps $0x1b, %%xmm2, %%xmm2 \n" + "movaps %%xmm1, (%2, %0) \n" + "movaps %%xmm2, (%2, %1) \n" + "sub $16, %1 \n" + "add $16, %0 \n" + "jl 1b \n" + : "+r"(i), "+r"(j) + : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len) ); } +#endif /* HAVE_6REGS */ -static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1, - const float *src2, int len){ - x86_reg i = (len-4)*4; - __asm__ volatile( - "1: \n\t" - "movq (%2,%0), %%mm0 \n\t" - "movq 8(%2,%0), %%mm1 \n\t" - "pfmul (%3,%0), %%mm0 \n\t" - "pfmul 8(%3,%0), %%mm1 \n\t" - "pfadd (%4,%0), %%mm0 \n\t" - "pfadd 8(%4,%0), %%mm1 \n\t" - "movq %%mm0, (%1,%0) \n\t" - "movq %%mm1, 8(%1,%0) \n\t" - "sub $16, %0 \n\t" - "jge 1b \n\t" - :"+r"(i) - :"r"(dst), "r"(src0), "r"(src1), "r"(src2) - :"memory" - ); - __asm__ volatile("femms"); -} -static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1, - const float *src2, int len){ - x86_reg i = (len-8)*4; - __asm__ volatile( - "1: \n\t" - "movaps (%2,%0), %%xmm0 \n\t" - "movaps 16(%2,%0), %%xmm1 \n\t" - "mulps (%3,%0), %%xmm0 \n\t" - "mulps 16(%3,%0), %%xmm1 \n\t" - "addps (%4,%0), %%xmm0 \n\t" - "addps 16(%4,%0), %%xmm1 \n\t" - "movaps %%xmm0, (%1,%0) \n\t" - "movaps %%xmm1, 16(%1,%0) \n\t" - "sub $32, %0 \n\t" - "jge 1b \n\t" - :"+r"(i) - :"r"(dst), "r"(src0), "r"(src1), "r"(src2) - :"memory" +static void vector_clipf_sse(float *dst, const float *src, + float min, float max, int len) +{ + x86_reg i = (len - 16) * 4; + __asm__ volatile ( + "movss %3, %%xmm4 \n\t" + "movss %4, %%xmm5 \n\t" + "shufps $0, %%xmm4, %%xmm4 \n\t" + "shufps $0, %%xmm5, %%xmm5 \n\t" + "1: \n\t" + "movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel + "movaps 16(%2, %0), %%xmm1 \n\t" + "movaps 32(%2, %0), %%xmm2 \n\t" + "movaps 48(%2, %0), %%xmm3 \n\t" + "maxps %%xmm4, %%xmm0 \n\t" + "maxps %%xmm4, %%xmm1 \n\t" + "maxps %%xmm4, %%xmm2 \n\t" + "maxps %%xmm4, %%xmm3 \n\t" + "minps %%xmm5, %%xmm0 \n\t" + "minps %%xmm5, %%xmm1 \n\t" + "minps %%xmm5, %%xmm2 \n\t" + "minps %%xmm5, %%xmm3 \n\t" + "movaps %%xmm0, (%1, %0) \n\t" + "movaps %%xmm1, 16(%1, %0) \n\t" + "movaps %%xmm2, 32(%1, %0) \n\t" + "movaps %%xmm3, 48(%1, %0) \n\t" + "sub $64, %0 \n\t" + "jge 1b \n\t" + : "+&r"(i) + : "r"(dst), "r"(src), "m"(min), "m"(max) + : "memory" ); } -static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1, - const float *win, float add_bias, int len){ -#if HAVE_6REGS - if(add_bias == 0){ - x86_reg i = -len*4; - x86_reg j = len*4-8; - __asm__ volatile( - "1: \n" - "pswapd (%5,%1), %%mm1 \n" - "movq (%5,%0), %%mm0 \n" - "pswapd (%4,%1), %%mm5 \n" - "movq (%3,%0), %%mm4 \n" - "movq %%mm0, %%mm2 \n" - "movq %%mm1, %%mm3 \n" - "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i] - "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j] - "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j] - "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i] - "pfadd %%mm3, %%mm2 \n" - "pfsub %%mm0, %%mm1 \n" - "pswapd %%mm2, %%mm2 \n" - "movq %%mm1, (%2,%0) \n" - "movq %%mm2, (%2,%1) \n" - "sub $8, %1 \n" - "add $8, %0 \n" - "jl 1b \n" - "femms \n" - :"+r"(i), "+r"(j) - :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) - ); - }else -#endif - ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len); -} +#endif /* HAVE_INLINE_ASM */ + +int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, + int order); +int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, + int order); +int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, + const int16_t *v3, + int order, int mul); +int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, + const int16_t *v3, + int order, int mul); +int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, + const int16_t *v3, + int order, int mul); + +void ff_apply_window_int16_mmxext (int16_t *output, const int16_t *input, + const int16_t *window, unsigned int len); +void ff_apply_window_int16_mmxext_ba (int16_t *output, const int16_t *input, + const int16_t *window, unsigned int len); +void ff_apply_window_int16_sse2 (int16_t *output, const int16_t *input, + const int16_t *window, unsigned int len); +void ff_apply_window_int16_sse2_ba (int16_t *output, const int16_t *input, + const int16_t *window, unsigned int len); +void ff_apply_window_int16_ssse3 (int16_t *output, const int16_t *input, + const int16_t *window, unsigned int len); +void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input, + const int16_t *window, unsigned int len); + +void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w); +void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w); + +void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, + const uint8_t *diff, int w, + int *left, int *left_top); +int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, + int w, int left); +int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, + int w, int left); -static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1, - const float *win, float add_bias, int len){ -#if HAVE_6REGS - if(add_bias == 0){ - x86_reg i = -len*4; - x86_reg j = len*4-16; - __asm__ volatile( - "1: \n" - "movaps (%5,%1), %%xmm1 \n" - "movaps (%5,%0), %%xmm0 \n" - "movaps (%4,%1), %%xmm5 \n" - "movaps (%3,%0), %%xmm4 \n" - "shufps $0x1b, %%xmm1, %%xmm1 \n" - "shufps $0x1b, %%xmm5, %%xmm5 \n" - "movaps %%xmm0, %%xmm2 \n" - "movaps %%xmm1, %%xmm3 \n" - "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i] - "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j] - "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j] - "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i] - "addps %%xmm3, %%xmm2 \n" - "subps %%xmm0, %%xmm1 \n" - "shufps $0x1b, %%xmm2, %%xmm2 \n" - "movaps %%xmm1, (%2,%0) \n" - "movaps %%xmm2, (%2,%1) \n" - "sub $16, %1 \n" - "add $16, %0 \n" - "jl 1b \n" - :"+r"(i), "+r"(j) - :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) - ); - }else -#endif - ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len); -} +float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order); -static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len) +void ff_vector_fmul_reverse_sse(float *dst, const float *src0, + const float *src1, int len); +void ff_vector_fmul_reverse_avx(float *dst, const float *src0, + const float *src1, int len); + +void ff_vector_fmul_add_sse(float *dst, const float *src0, const float *src1, + const float *src2, int len); +void ff_vector_fmul_add_avx(float *dst, const float *src0, const float *src1, + const float *src2, int len); + +void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src, + int32_t min, int32_t max, unsigned int len); +void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src, + int32_t min, int32_t max, unsigned int len); +void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src, + int32_t min, int32_t max, unsigned int len); +void ff_vector_clip_int32_sse4 (int32_t *dst, const int32_t *src, + int32_t min, int32_t max, unsigned int len); + +extern void ff_butterflies_float_interleave_sse(float *dst, const float *src0, + const float *src1, int len); +extern void ff_butterflies_float_interleave_avx(float *dst, const float *src0, + const float *src1, int len); + +#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \ + do { \ + c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \ + } while (0) + +#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \ + do { \ + c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \ + c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \ + } while (0) + +#define H264_QPEL_FUNCS(x, y, CPU) \ + do { \ + c->put_h264_qpel_pixels_tab[0][x + y * 4] = put_h264_qpel16_mc ## x ## y ## _ ## CPU; \ + c->put_h264_qpel_pixels_tab[1][x + y * 4] = put_h264_qpel8_mc ## x ## y ## _ ## CPU; \ + c->avg_h264_qpel_pixels_tab[0][x + y * 4] = avg_h264_qpel16_mc ## x ## y ## _ ## CPU; \ + c->avg_h264_qpel_pixels_tab[1][x + y * 4] = avg_h264_qpel8_mc ## x ## y ## _ ## CPU; \ + } while (0) + +#define H264_QPEL_FUNCS_10(x, y, CPU) \ + do { \ + c->put_h264_qpel_pixels_tab[0][x + y * 4] = ff_put_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \ + c->put_h264_qpel_pixels_tab[1][x + y * 4] = ff_put_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \ + c->avg_h264_qpel_pixels_tab[0][x + y * 4] = ff_avg_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \ + c->avg_h264_qpel_pixels_tab[1][x + y * 4] = ff_avg_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \ + } while (0) + +static void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, int mm_flags) { - x86_reg i = -4*len; - __asm__ volatile( - "movss %3, %%xmm4 \n" - "shufps $0, %%xmm4, %%xmm4 \n" - "1: \n" - "cvtpi2ps (%2,%0), %%xmm0 \n" - "cvtpi2ps 8(%2,%0), %%xmm1 \n" - "cvtpi2ps 16(%2,%0), %%xmm2 \n" - "cvtpi2ps 24(%2,%0), %%xmm3 \n" - "movlhps %%xmm1, %%xmm0 \n" - "movlhps %%xmm3, %%xmm2 \n" - "mulps %%xmm4, %%xmm0 \n" - "mulps %%xmm4, %%xmm2 \n" - "movaps %%xmm0, (%1,%0) \n" - "movaps %%xmm2, 16(%1,%0) \n" - "add $32, %0 \n" - "jl 1b \n" - :"+r"(i) - :"r"(dst+len), "r"(src+len), "m"(mul) - ); -} + const int high_bit_depth = avctx->bits_per_raw_sample > 8; -static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len) -{ - x86_reg i = -4*len; - __asm__ volatile( - "movss %3, %%xmm4 \n" - "shufps $0, %%xmm4, %%xmm4 \n" - "1: \n" - "cvtdq2ps (%2,%0), %%xmm0 \n" - "cvtdq2ps 16(%2,%0), %%xmm1 \n" - "mulps %%xmm4, %%xmm0 \n" - "mulps %%xmm4, %%xmm1 \n" - "movaps %%xmm0, (%1,%0) \n" - "movaps %%xmm1, 16(%1,%0) \n" - "add $32, %0 \n" - "jl 1b \n" - :"+r"(i) - :"r"(dst+len), "r"(src+len), "m"(mul) - ); -} +#if HAVE_INLINE_ASM + c->put_pixels_clamped = ff_put_pixels_clamped_mmx; + c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx; + c->add_pixels_clamped = ff_add_pixels_clamped_mmx; -static void vector_clipf_sse(float *dst, const float *src, float min, float max, - int len) -{ - x86_reg i = (len-16)*4; - __asm__ volatile( - "movss %3, %%xmm4 \n" - "movss %4, %%xmm5 \n" - "shufps $0, %%xmm4, %%xmm4 \n" - "shufps $0, %%xmm5, %%xmm5 \n" - "1: \n\t" - "movaps (%2,%0), %%xmm0 \n\t" // 3/1 on intel - "movaps 16(%2,%0), %%xmm1 \n\t" - "movaps 32(%2,%0), %%xmm2 \n\t" - "movaps 48(%2,%0), %%xmm3 \n\t" - "maxps %%xmm4, %%xmm0 \n\t" - "maxps %%xmm4, %%xmm1 \n\t" - "maxps %%xmm4, %%xmm2 \n\t" - "maxps %%xmm4, %%xmm3 \n\t" - "minps %%xmm5, %%xmm0 \n\t" - "minps %%xmm5, %%xmm1 \n\t" - "minps %%xmm5, %%xmm2 \n\t" - "minps %%xmm5, %%xmm3 \n\t" - "movaps %%xmm0, (%1,%0) \n\t" - "movaps %%xmm1, 16(%1,%0) \n\t" - "movaps %%xmm2, 32(%1,%0) \n\t" - "movaps %%xmm3, 48(%1,%0) \n\t" - "sub $64, %0 \n\t" - "jge 1b \n\t" - :"+&r"(i) - :"r"(dst), "r"(src), "m"(min), "m"(max) - :"memory" - ); -} + if (!high_bit_depth) { + c->clear_block = clear_block_mmx; + c->clear_blocks = clear_blocks_mmx; + c->draw_edges = draw_edges_mmx; -static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){ - x86_reg reglen = len; - // not bit-exact: pf2id uses different rounding than C and SSE - __asm__ volatile( - "add %0 , %0 \n\t" - "lea (%2,%0,2) , %2 \n\t" - "add %0 , %1 \n\t" - "neg %0 \n\t" - "1: \n\t" - "pf2id (%2,%0,2) , %%mm0 \n\t" - "pf2id 8(%2,%0,2) , %%mm1 \n\t" - "pf2id 16(%2,%0,2) , %%mm2 \n\t" - "pf2id 24(%2,%0,2) , %%mm3 \n\t" - "packssdw %%mm1 , %%mm0 \n\t" - "packssdw %%mm3 , %%mm2 \n\t" - "movq %%mm0 , (%1,%0) \n\t" - "movq %%mm2 , 8(%1,%0) \n\t" - "add $16 , %0 \n\t" - " js 1b \n\t" - "femms \n\t" - :"+r"(reglen), "+r"(dst), "+r"(src) - ); -} -static void float_to_int16_sse(int16_t *dst, const float *src, long len){ - x86_reg reglen = len; - __asm__ volatile( - "add %0 , %0 \n\t" - "lea (%2,%0,2) , %2 \n\t" - "add %0 , %1 \n\t" - "neg %0 \n\t" - "1: \n\t" - "cvtps2pi (%2,%0,2) , %%mm0 \n\t" - "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t" - "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t" - "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t" - "packssdw %%mm1 , %%mm0 \n\t" - "packssdw %%mm3 , %%mm2 \n\t" - "movq %%mm0 , (%1,%0) \n\t" - "movq %%mm2 , 8(%1,%0) \n\t" - "add $16 , %0 \n\t" - " js 1b \n\t" - "emms \n\t" - :"+r"(reglen), "+r"(dst), "+r"(src) - ); -} + SET_HPEL_FUNCS(put, 0, 16, mmx); + SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx); + SET_HPEL_FUNCS(avg, 0, 16, mmx); + SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx); + SET_HPEL_FUNCS(put, 1, 8, mmx); + SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx); + SET_HPEL_FUNCS(avg, 1, 8, mmx); + SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx); + + switch (avctx->idct_algo) { + case FF_IDCT_AUTO: + case FF_IDCT_SIMPLEMMX: + c->idct_put = ff_simple_idct_put_mmx; + c->idct_add = ff_simple_idct_add_mmx; + c->idct = ff_simple_idct_mmx; + c->idct_permutation_type = FF_SIMPLE_IDCT_PERM; + break; + case FF_IDCT_XVIDMMX: + c->idct_put = ff_idct_xvid_mmx_put; + c->idct_add = ff_idct_xvid_mmx_add; + c->idct = ff_idct_xvid_mmx; + break; + } + } -static void float_to_int16_sse2(int16_t *dst, const float *src, long len){ - x86_reg reglen = len; - __asm__ volatile( - "add %0 , %0 \n\t" - "lea (%2,%0,2) , %2 \n\t" - "add %0 , %1 \n\t" - "neg %0 \n\t" - "1: \n\t" - "cvtps2dq (%2,%0,2) , %%xmm0 \n\t" - "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t" - "packssdw %%xmm1 , %%xmm0 \n\t" - "movdqa %%xmm0 , (%1,%0) \n\t" - "add $16 , %0 \n\t" - " js 1b \n\t" - :"+r"(reglen), "+r"(dst), "+r"(src) - ); -} + c->gmc = gmc_mmx; -void ff_vp3_idct_mmx(int16_t *input_data); -void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block); -void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block); - -void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size, const DCTELEM *block); - -void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values); -void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values); - -void ff_vp3_idct_sse2(int16_t *input_data); -void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block); -void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block); - -void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len); -void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len); -void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len); -int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, int order, int shift); -int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order, int shift); -int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul); -int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul); -int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul); -void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top); -int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left); -int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left); - -#if !HAVE_YASM -#define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6) -#define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6) -#define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6) -#endif -#define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse - -#define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \ -/* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\ -static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\ - DECLARE_ALIGNED(16, int16_t, tmp)[len];\ - int i,j,c;\ - for(c=0; cadd_bytes = add_bytes_mmx; -FLOAT_TO_INT16_INTERLEAVE(3dnow, - "1: \n" - "pf2id (%2,%0), %%mm0 \n" - "pf2id 8(%2,%0), %%mm1 \n" - "pf2id (%3,%0), %%mm2 \n" - "pf2id 8(%3,%0), %%mm3 \n" - "packssdw %%mm1, %%mm0 \n" - "packssdw %%mm3, %%mm2 \n" - "movq %%mm0, %%mm1 \n" - "punpcklwd %%mm2, %%mm0 \n" - "punpckhwd %%mm2, %%mm1 \n" - "movq %%mm0, (%1,%0)\n" - "movq %%mm1, 8(%1,%0)\n" - "add $16, %0 \n" - "js 1b \n" - "femms \n" -) - -FLOAT_TO_INT16_INTERLEAVE(sse, - "1: \n" - "cvtps2pi (%2,%0), %%mm0 \n" - "cvtps2pi 8(%2,%0), %%mm1 \n" - "cvtps2pi (%3,%0), %%mm2 \n" - "cvtps2pi 8(%3,%0), %%mm3 \n" - "packssdw %%mm1, %%mm0 \n" - "packssdw %%mm3, %%mm2 \n" - "movq %%mm0, %%mm1 \n" - "punpcklwd %%mm2, %%mm0 \n" - "punpckhwd %%mm2, %%mm1 \n" - "movq %%mm0, (%1,%0)\n" - "movq %%mm1, 8(%1,%0)\n" - "add $16, %0 \n" - "js 1b \n" - "emms \n" -) - -FLOAT_TO_INT16_INTERLEAVE(sse2, - "1: \n" - "cvtps2dq (%2,%0), %%xmm0 \n" - "cvtps2dq (%3,%0), %%xmm1 \n" - "packssdw %%xmm1, %%xmm0 \n" - "movhlps %%xmm0, %%xmm1 \n" - "punpcklwd %%xmm1, %%xmm0 \n" - "movdqa %%xmm0, (%1,%0) \n" - "add $16, %0 \n" - "js 1b \n" -) - -static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){ - if(channels==6) - ff_float_to_int16_interleave6_3dn2(dst, src, len); - else - float_to_int16_interleave_3dnow(dst, src, len, channels); -} - -float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order); + if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) { + c->h263_v_loop_filter = h263_v_loop_filter_mmx; + c->h263_h_loop_filter = h263_h_loop_filter_mmx; + } +#endif /* HAVE_INLINE_ASM */ -void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) -{ - int mm_flags = mm_support(); +#if HAVE_YASM +#if ARCH_X86_32 + if (!high_bit_depth) + c->emulated_edge_mc = emulated_edge_mc_mmx; +#endif - if (avctx->dsp_mask) { - if (avctx->dsp_mask & FF_MM_FORCE) - mm_flags |= (avctx->dsp_mask & 0xffff); - else - mm_flags &= ~(avctx->dsp_mask & 0xffff); + if (!high_bit_depth && CONFIG_H264CHROMA) { + c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_mmx_rnd; + c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_mmx; } -#if 0 - av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:"); - if (mm_flags & FF_MM_MMX) - av_log(avctx, AV_LOG_INFO, " mmx"); - if (mm_flags & FF_MM_MMX2) - av_log(avctx, AV_LOG_INFO, " mmx2"); - if (mm_flags & FF_MM_3DNOW) - av_log(avctx, AV_LOG_INFO, " 3dnow"); - if (mm_flags & FF_MM_SSE) - av_log(avctx, AV_LOG_INFO, " sse"); - if (mm_flags & FF_MM_SSE2) - av_log(avctx, AV_LOG_INFO, " sse2"); - av_log(avctx, AV_LOG_INFO, "\n"); + c->vector_clip_int32 = ff_vector_clip_int32_mmx; #endif - if (mm_flags & FF_MM_MMX) { - const int idct_algo= avctx->idct_algo; - - if(avctx->lowres==0){ - if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){ - c->idct_put= ff_simple_idct_put_mmx; - c->idct_add= ff_simple_idct_add_mmx; - c->idct = ff_simple_idct_mmx; - c->idct_permutation_type= FF_SIMPLE_IDCT_PERM; -#if CONFIG_GPL - }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){ - if(mm_flags & FF_MM_MMX2){ - c->idct_put= ff_libmpeg2mmx2_idct_put; - c->idct_add= ff_libmpeg2mmx2_idct_add; - c->idct = ff_mmxext_idct; - }else{ - c->idct_put= ff_libmpeg2mmx_idct_put; - c->idct_add= ff_libmpeg2mmx_idct_add; - c->idct = ff_mmx_idct; - } - c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; -#endif - }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) && - idct_algo==FF_IDCT_VP3 && HAVE_YASM){ - if(mm_flags & FF_MM_SSE2){ - c->idct_put= ff_vp3_idct_put_sse2; - c->idct_add= ff_vp3_idct_add_sse2; - c->idct = ff_vp3_idct_sse2; - c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; - }else{ - c->idct_put= ff_vp3_idct_put_mmx; - c->idct_add= ff_vp3_idct_add_mmx; - c->idct = ff_vp3_idct_mmx; - c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM; - } - }else if(idct_algo==FF_IDCT_CAVS){ - c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; - }else if(idct_algo==FF_IDCT_XVIDMMX){ - if(mm_flags & FF_MM_SSE2){ - c->idct_put= ff_idct_xvid_sse2_put; - c->idct_add= ff_idct_xvid_sse2_add; - c->idct = ff_idct_xvid_sse2; - c->idct_permutation_type= FF_SSE2_IDCT_PERM; - }else if(mm_flags & FF_MM_MMX2){ - c->idct_put= ff_idct_xvid_mmx2_put; - c->idct_add= ff_idct_xvid_mmx2_add; - c->idct = ff_idct_xvid_mmx2; - }else{ - c->idct_put= ff_idct_xvid_mmx_put; - c->idct_add= ff_idct_xvid_mmx_add; - c->idct = ff_idct_xvid_mmx; - } - } - } +} - c->put_pixels_clamped = ff_put_pixels_clamped_mmx; - c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx; - c->add_pixels_clamped = ff_add_pixels_clamped_mmx; - c->clear_block = clear_block_mmx; - c->clear_blocks = clear_blocks_mmx; - if ((mm_flags & FF_MM_SSE) && - !(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){ - /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */ - c->clear_block = clear_block_sse; - c->clear_blocks = clear_blocks_sse; - } +static void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx, + int mm_flags) +{ + const int bit_depth = avctx->bits_per_raw_sample; + const int high_bit_depth = bit_depth > 8; -#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \ - c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \ - c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU +#if HAVE_INLINE_ASM + c->prefetch = prefetch_mmxext; - SET_HPEL_FUNCS(put, 0, 16, mmx); - SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx); - SET_HPEL_FUNCS(avg, 0, 16, mmx); - SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx); - SET_HPEL_FUNCS(put, 1, 8, mmx); - SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx); - SET_HPEL_FUNCS(avg, 1, 8, mmx); - SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx); + if (!high_bit_depth) { + c->put_pixels_tab[0][1] = put_pixels16_x2_mmxext; + c->put_pixels_tab[0][2] = put_pixels16_y2_mmxext; + + c->avg_pixels_tab[0][0] = avg_pixels16_mmxext; + c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmxext; + c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmxext; - c->gmc= gmc_mmx; + c->put_pixels_tab[1][1] = put_pixels8_x2_mmxext; + c->put_pixels_tab[1][2] = put_pixels8_y2_mmxext; - c->add_bytes= add_bytes_mmx; - c->add_bytes_l2= add_bytes_l2_mmx; + c->avg_pixels_tab[1][0] = avg_pixels8_mmxext; + c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmxext; + c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmxext; + } - c->draw_edges = draw_edges_mmx; + if (!(avctx->flags & CODEC_FLAG_BITEXACT)) { + if (!high_bit_depth) { + c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmxext; + c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmxext; + c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmxext; + c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmxext; - if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) { - c->h263_v_loop_filter= h263_v_loop_filter_mmx; - c->h263_h_loop_filter= h263_h_loop_filter_mmx; + c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmxext; + c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmxext; } + } -#if HAVE_YASM - c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_mmx_rnd; - c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx; - c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_mmx_nornd; + if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) { + c->idct_put = ff_idct_xvid_mmxext_put; + c->idct_add = ff_idct_xvid_mmxext_add; + c->idct = ff_idct_xvid_mmxext; + } - c->put_rv40_chroma_pixels_tab[0]= ff_put_rv40_chroma_mc8_mmx; - c->put_rv40_chroma_pixels_tab[1]= ff_put_rv40_chroma_mc4_mmx; + if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 || + avctx->codec_id == AV_CODEC_ID_THEORA)) { + c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmxext; + c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmxext; + } +#endif /* HAVE_INLINE_ASM */ + + if (CONFIG_H264QPEL) { +#if HAVE_INLINE_ASM + SET_QPEL_FUNCS(put_qpel, 0, 16, mmxext, ); + SET_QPEL_FUNCS(put_qpel, 1, 8, mmxext, ); + SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, ); + SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmxext, ); + SET_QPEL_FUNCS(avg_qpel, 0, 16, mmxext, ); + SET_QPEL_FUNCS(avg_qpel, 1, 8, mmxext, ); +#endif /* HAVE_INLINE_ASM */ + + if (!high_bit_depth) { +#if HAVE_INLINE_ASM + SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmxext, ); + SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmxext, ); + SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmxext, ); + SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmxext, ); + SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmxext, ); + SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmxext, ); +#endif /* HAVE_INLINE_ASM */ + } else if (bit_depth == 10) { +#if HAVE_YASM +#if !ARCH_X86_64 + SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_); + SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_); + SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_mmxext, ff_); + SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_mmxext, ff_); #endif + SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 10_mmxext, ff_); + SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 10_mmxext, ff_); +#endif /* HAVE_YASM */ + } - if (mm_flags & FF_MM_MMX2) { - c->prefetch = prefetch_mmx2; - - c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; - c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2; - - c->avg_pixels_tab[0][0] = avg_pixels16_mmx2; - c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2; - c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2; - - c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2; - c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2; - - c->avg_pixels_tab[1][0] = avg_pixels8_mmx2; - c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2; - c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2; - - if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ - c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2; - c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2; - c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2; - c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2; - c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2; - c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2; - - if (CONFIG_VP3_DECODER && HAVE_YASM) { - c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2; - c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2; - } - } - if (CONFIG_VP3_DECODER && HAVE_YASM) { - c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2; - } - - if (CONFIG_VP3_DECODER - && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) { - c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2; - c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2; - } - -#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \ - c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU - - SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2); - SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2); - SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2); - - SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2); - SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2); - SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2); - SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2); - - SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2); - SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2); - SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2); +#if HAVE_INLINE_ASM + SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmxext, ); + SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmxext, ); + SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmxext, ); + SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmxext, ); +#endif /* HAVE_INLINE_ASM */ + } #if HAVE_YASM - c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_mmx2; - c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_mmx2; + if (!high_bit_depth && CONFIG_H264CHROMA) { + c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_mmx2_rnd; + c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_mmx2; + c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_mmx2; + c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_mmx2; + } + if (bit_depth == 10 && CONFIG_H264CHROMA) { + c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_10_mmx2; + c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_10_mmx2; + c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_10_mmx2; + c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmx2; + } - c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_mmx2_nornd; + /* slower than cmov version on AMD */ + if (!(mm_flags & AV_CPU_FLAG_3DNOW)) + c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; - c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd; - c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2; - c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2; - c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_mmx2; + c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2; + c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2; - c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; -#endif -#if HAVE_7REGS && HAVE_TEN_OPERANDS - if( mm_flags&FF_MM_3DNOW ) - c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov; -#endif + if (avctx->flags & CODEC_FLAG_BITEXACT) { + c->apply_window_int16 = ff_apply_window_int16_mmxext_ba; + } else { + c->apply_window_int16 = ff_apply_window_int16_mmxext; + } +#endif /* HAVE_YASM */ +} - if (CONFIG_VC1_DECODER) - ff_vc1dsp_init_mmx(c, avctx); - - c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2; - } else if (mm_flags & FF_MM_3DNOW) { - c->prefetch = prefetch_3dnow; - - c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; - c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; - - c->avg_pixels_tab[0][0] = avg_pixels16_3dnow; - c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow; - c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow; - - c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow; - c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow; - - c->avg_pixels_tab[1][0] = avg_pixels8_3dnow; - c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow; - c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow; - - if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ - c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow; - c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow; - c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow; - c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow; - c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow; - c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow; - } - - if (CONFIG_VP3_DECODER - && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) { - c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow; - c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow; - } - - SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow); - SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow); - SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow); - - SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow); - SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow); - SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow); - SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow); - - SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow); - SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow); - SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow); +static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx, + int mm_flags) +{ + const int high_bit_depth = avctx->bits_per_raw_sample > 8; -#if HAVE_YASM - c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_3dnow_rnd; - c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow; +#if HAVE_INLINE_ASM + c->prefetch = prefetch_3dnow; - c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_3dnow_nornd; + if (!high_bit_depth) { + c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; + c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; - c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_3dnow; - c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_3dnow; -#endif - } + c->avg_pixels_tab[0][0] = avg_pixels16_3dnow; + c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow; + c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow; + + c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow; + c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow; + c->avg_pixels_tab[1][0] = avg_pixels8_3dnow; + c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow; + c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow; -#define H264_QPEL_FUNCS(x, y, CPU)\ - c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\ - c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\ - c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\ - c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU; - if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){ - // these functions are slower than mmx on AMD, but faster on Intel - c->put_pixels_tab[0][0] = put_pixels16_sse2; - c->avg_pixels_tab[0][0] = avg_pixels16_sse2; - H264_QPEL_FUNCS(0, 0, sse2); + if (!(avctx->flags & CODEC_FLAG_BITEXACT)){ + c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow; + c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow; + c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow; + c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow; + + c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow; + c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow; } - if(mm_flags & FF_MM_SSE2){ - H264_QPEL_FUNCS(0, 1, sse2); - H264_QPEL_FUNCS(0, 2, sse2); - H264_QPEL_FUNCS(0, 3, sse2); - H264_QPEL_FUNCS(1, 1, sse2); - H264_QPEL_FUNCS(1, 2, sse2); - H264_QPEL_FUNCS(1, 3, sse2); - H264_QPEL_FUNCS(2, 1, sse2); - H264_QPEL_FUNCS(2, 2, sse2); - H264_QPEL_FUNCS(2, 3, sse2); - H264_QPEL_FUNCS(3, 1, sse2); - H264_QPEL_FUNCS(3, 2, sse2); - H264_QPEL_FUNCS(3, 3, sse2); + } + + if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 || + avctx->codec_id == AV_CODEC_ID_THEORA)) { + c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow; + c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow; + } + + if (CONFIG_H264QPEL) { + SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow, ); + SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow, ); + SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow, ); + + if (!high_bit_depth) { + SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow, ); + SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow, ); + SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow, ); + SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow, ); } -#if HAVE_SSSE3 - if(mm_flags & FF_MM_SSSE3){ - H264_QPEL_FUNCS(1, 0, ssse3); - H264_QPEL_FUNCS(1, 1, ssse3); - H264_QPEL_FUNCS(1, 2, ssse3); - H264_QPEL_FUNCS(1, 3, ssse3); - H264_QPEL_FUNCS(2, 0, ssse3); - H264_QPEL_FUNCS(2, 1, ssse3); - H264_QPEL_FUNCS(2, 2, ssse3); - H264_QPEL_FUNCS(2, 3, ssse3); - H264_QPEL_FUNCS(3, 0, ssse3); - H264_QPEL_FUNCS(3, 1, ssse3); - H264_QPEL_FUNCS(3, 2, ssse3); - H264_QPEL_FUNCS(3, 3, ssse3); - c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3; + + SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow, ); + SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow, ); + SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow, ); + } + + c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow; +#endif /* HAVE_INLINE_ASM */ + #if HAVE_YASM - c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_ssse3_nornd; - c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_ssse3_nornd; - c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd; - c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd; - c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3; - c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_ssse3; - c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3; - if (mm_flags & FF_MM_SSE4) // not really sse4, just slow on Conroe - c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4; + if (!high_bit_depth && CONFIG_H264CHROMA) { + c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_3dnow_rnd; + c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_3dnow; + } +#endif /* HAVE_YASM */ +} + +static void dsputil_init_3dnowext(DSPContext *c, AVCodecContext *avctx, + int mm_flags) +{ +#if HAVE_AMD3DNOWEXT_INLINE && HAVE_6REGS + c->vector_fmul_window = vector_fmul_window_3dnowext; #endif +} + +static void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int mm_flags) +{ + const int high_bit_depth = avctx->bits_per_raw_sample > 8; + +#if HAVE_INLINE_ASM + if (!high_bit_depth) { + if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) { + /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */ + c->clear_block = clear_block_sse; + c->clear_blocks = clear_blocks_sse; } + } + + c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse; + +#if HAVE_6REGS + c->vector_fmul_window = vector_fmul_window_sse; #endif - if(mm_flags & FF_MM_3DNOW){ - c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow; - c->vector_fmul = vector_fmul_3dnow; - if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ - c->float_to_int16 = float_to_int16_3dnow; - c->float_to_int16_interleave = float_to_int16_interleave_3dnow; - } - } - if(mm_flags & FF_MM_3DNOWEXT){ - c->vector_fmul_reverse = vector_fmul_reverse_3dnow2; - c->vector_fmul_window = vector_fmul_window_3dnow2; - if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ - c->float_to_int16_interleave = float_to_int16_interleave_3dn2; - } - } - if(mm_flags & FF_MM_MMX2){ + c->vector_clipf = vector_clipf_sse; +#endif /* HAVE_INLINE_ASM */ + #if HAVE_YASM - c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2; - c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2; -#endif + c->vector_fmul_reverse = ff_vector_fmul_reverse_sse; + c->vector_fmul_add = ff_vector_fmul_add_sse; + + c->scalarproduct_float = ff_scalarproduct_float_sse; + c->butterflies_float_interleave = ff_butterflies_float_interleave_sse; + + if (!high_bit_depth) + c->emulated_edge_mc = emulated_edge_mc_sse; +#endif /* HAVE_YASM */ +} + +static void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx, + int mm_flags) +{ + const int bit_depth = avctx->bits_per_raw_sample; + +#if HAVE_INLINE_ASM + const int high_bit_depth = bit_depth > 8; + + if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) { + // these functions are slower than mmx on AMD, but faster on Intel + if (!high_bit_depth) { + c->put_pixels_tab[0][0] = put_pixels16_sse2; + c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2; + c->avg_pixels_tab[0][0] = avg_pixels16_sse2; + if (CONFIG_H264QPEL) + H264_QPEL_FUNCS(0, 0, sse2); } - if(mm_flags & FF_MM_SSE){ - c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse; - c->ac3_downmix = ac3_downmix_sse; - c->vector_fmul = vector_fmul_sse; - c->vector_fmul_reverse = vector_fmul_reverse_sse; - c->vector_fmul_add = vector_fmul_add_sse; - c->vector_fmul_window = vector_fmul_window_sse; - c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse; - c->vector_clipf = vector_clipf_sse; - c->float_to_int16 = float_to_int16_sse; - c->float_to_int16_interleave = float_to_int16_interleave_sse; + } + + if (!high_bit_depth && CONFIG_H264QPEL) { + H264_QPEL_FUNCS(0, 1, sse2); + H264_QPEL_FUNCS(0, 2, sse2); + H264_QPEL_FUNCS(0, 3, sse2); + H264_QPEL_FUNCS(1, 1, sse2); + H264_QPEL_FUNCS(1, 2, sse2); + H264_QPEL_FUNCS(1, 3, sse2); + H264_QPEL_FUNCS(2, 1, sse2); + H264_QPEL_FUNCS(2, 2, sse2); + H264_QPEL_FUNCS(2, 3, sse2); + H264_QPEL_FUNCS(3, 1, sse2); + H264_QPEL_FUNCS(3, 2, sse2); + H264_QPEL_FUNCS(3, 3, sse2); + } + + if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) { + c->idct_put = ff_idct_xvid_sse2_put; + c->idct_add = ff_idct_xvid_sse2_add; + c->idct = ff_idct_xvid_sse2; + c->idct_permutation_type = FF_SSE2_IDCT_PERM; + } +#endif /* HAVE_INLINE_ASM */ + #if HAVE_YASM - c->scalarproduct_float = ff_scalarproduct_float_sse; -#endif + if (bit_depth == 10) { + if (CONFIG_H264QPEL) { + SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_); + SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_sse2, ff_); + SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_); + SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_sse2, ff_); + H264_QPEL_FUNCS_10(1, 0, sse2_cache64); + H264_QPEL_FUNCS_10(2, 0, sse2_cache64); + H264_QPEL_FUNCS_10(3, 0, sse2_cache64); } - if(mm_flags & FF_MM_3DNOW) - c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse - if(mm_flags & FF_MM_SSE2){ - c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2; - c->float_to_int16 = float_to_int16_sse2; - c->float_to_int16_interleave = float_to_int16_interleave_sse2; -#if HAVE_YASM - c->scalarproduct_int16 = ff_scalarproduct_int16_sse2; - c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2; -#endif + if (CONFIG_H264CHROMA) { + c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2; + c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2; } - if((mm_flags & FF_MM_SSSE3) && !(mm_flags & (FF_MM_SSE42|FF_MM_3DNOW)) && HAVE_YASM) // cachesplit - c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3; } - if (CONFIG_ENCODERS) - dsputilenc_init_mmx(c, avctx); - -#if 0 - // for speed testing - get_pixels = just_return; - put_pixels_clamped = just_return; - add_pixels_clamped = just_return; - - pix_abs16x16 = just_return; - pix_abs16x16_x2 = just_return; - pix_abs16x16_y2 = just_return; - pix_abs16x16_xy2 = just_return; - - put_pixels_tab[0] = just_return; - put_pixels_tab[1] = just_return; - put_pixels_tab[2] = just_return; - put_pixels_tab[3] = just_return; - - put_no_rnd_pixels_tab[0] = just_return; - put_no_rnd_pixels_tab[1] = just_return; - put_no_rnd_pixels_tab[2] = just_return; - put_no_rnd_pixels_tab[3] = just_return; - - avg_pixels_tab[0] = just_return; - avg_pixels_tab[1] = just_return; - avg_pixels_tab[2] = just_return; - avg_pixels_tab[3] = just_return; - - avg_no_rnd_pixels_tab[0] = just_return; - avg_no_rnd_pixels_tab[1] = just_return; - avg_no_rnd_pixels_tab[2] = just_return; - avg_no_rnd_pixels_tab[3] = just_return; - - //av_fdct = just_return; - //ff_idct = just_return; + c->scalarproduct_int16 = ff_scalarproduct_int16_sse2; + c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2; + if (mm_flags & AV_CPU_FLAG_ATOM) { + c->vector_clip_int32 = ff_vector_clip_int32_int_sse2; + } else { + c->vector_clip_int32 = ff_vector_clip_int32_sse2; + } + if (avctx->flags & CODEC_FLAG_BITEXACT) { + c->apply_window_int16 = ff_apply_window_int16_sse2_ba; + } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) { + c->apply_window_int16 = ff_apply_window_int16_sse2; + } + c->bswap_buf = ff_bswap32_buf_sse2; +#endif /* HAVE_YASM */ +} + +static void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx, + int mm_flags) +{ + const int high_bit_depth = avctx->bits_per_raw_sample > 8; + const int bit_depth = avctx->bits_per_raw_sample; + +#if HAVE_SSSE3_INLINE + if (!high_bit_depth && CONFIG_H264QPEL) { + H264_QPEL_FUNCS(1, 0, ssse3); + H264_QPEL_FUNCS(1, 1, ssse3); + H264_QPEL_FUNCS(1, 2, ssse3); + H264_QPEL_FUNCS(1, 3, ssse3); + H264_QPEL_FUNCS(2, 0, ssse3); + H264_QPEL_FUNCS(2, 1, ssse3); + H264_QPEL_FUNCS(2, 2, ssse3); + H264_QPEL_FUNCS(2, 3, ssse3); + H264_QPEL_FUNCS(3, 0, ssse3); + H264_QPEL_FUNCS(3, 1, ssse3); + H264_QPEL_FUNCS(3, 2, ssse3); + H264_QPEL_FUNCS(3, 3, ssse3); + } +#endif /* HAVE_SSSE3_INLINE */ + +#if HAVE_SSSE3_EXTERNAL + if (bit_depth == 10 && CONFIG_H264QPEL) { + H264_QPEL_FUNCS_10(1, 0, ssse3_cache64); + H264_QPEL_FUNCS_10(2, 0, ssse3_cache64); + H264_QPEL_FUNCS_10(3, 0, ssse3_cache64); + } + if (!high_bit_depth && CONFIG_H264CHROMA) { + c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_ssse3_rnd; + c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_ssse3_rnd; + c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_ssse3; + c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_ssse3; + } + c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3; + if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe + c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4; + + if (mm_flags & AV_CPU_FLAG_ATOM) + c->apply_window_int16 = ff_apply_window_int16_ssse3_atom; + else + c->apply_window_int16 = ff_apply_window_int16_ssse3; + if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit + c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3; + c->bswap_buf = ff_bswap32_buf_ssse3; +#endif /* HAVE_SSSE3_EXTERNAL */ +} + +static void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx, + int mm_flags) +{ +#if HAVE_SSE4_EXTERNAL + c->vector_clip_int32 = ff_vector_clip_int32_sse4; +#endif /* HAVE_SSE4_EXTERNAL */ +} + +static void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags) +{ +#if HAVE_AVX_EXTERNAL + const int bit_depth = avctx->bits_per_raw_sample; + + if (bit_depth == 10) { + // AVX implies !cache64. + // TODO: Port cache(32|64) detection from x264. + if (CONFIG_H264QPEL) { + H264_QPEL_FUNCS_10(1, 0, sse2); + H264_QPEL_FUNCS_10(2, 0, sse2); + H264_QPEL_FUNCS_10(3, 0, sse2); + } + + if (CONFIG_H264CHROMA) { + c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx; + c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx; + } + } + c->butterflies_float_interleave = ff_butterflies_float_interleave_avx; + c->vector_fmul_reverse = ff_vector_fmul_reverse_avx; + c->vector_fmul_add = ff_vector_fmul_add_avx; +#endif /* HAVE_AVX_EXTERNAL */ +} + +void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx) +{ + int mm_flags = av_get_cpu_flags(); + +#if HAVE_7REGS && HAVE_INLINE_ASM + if (mm_flags & AV_CPU_FLAG_CMOV) + c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov; #endif + + if (mm_flags & AV_CPU_FLAG_MMX) + dsputil_init_mmx(c, avctx, mm_flags); + + if (mm_flags & AV_CPU_FLAG_MMXEXT) + dsputil_init_mmxext(c, avctx, mm_flags); + + if (mm_flags & AV_CPU_FLAG_3DNOW) + dsputil_init_3dnow(c, avctx, mm_flags); + + if (mm_flags & AV_CPU_FLAG_3DNOWEXT) + dsputil_init_3dnowext(c, avctx, mm_flags); + + if (mm_flags & AV_CPU_FLAG_SSE) + dsputil_init_sse(c, avctx, mm_flags); + + if (mm_flags & AV_CPU_FLAG_SSE2) + dsputil_init_sse2(c, avctx, mm_flags); + + if (mm_flags & AV_CPU_FLAG_SSSE3) + dsputil_init_ssse3(c, avctx, mm_flags); + + if (mm_flags & AV_CPU_FLAG_SSE4) + dsputil_init_sse4(c, avctx, mm_flags); + + if (mm_flags & AV_CPU_FLAG_AVX) + dsputil_init_avx(c, avctx, mm_flags); + + if (CONFIG_ENCODERS) + ff_dsputilenc_init_mmx(c, avctx); }