2 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3 * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
5 * MMX-optimized DSP functions, based on H.264 optimizations by
6 * Michael Niedermayer and Loren Merritt
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "libavutil/attributes.h"
26 #include "libavutil/common.h"
27 #include "libavutil/cpu.h"
28 #include "libavutil/x86/asm.h"
29 #include "libavutil/x86/cpu.h"
30 #include "libavcodec/cavsdsp.h"
31 #include "constants.h"
32 #include "dsputil_mmx.h"
37 /* in/out: mma=mma+mmb, mmb=mmb-mma */
38 #define SUMSUB_BA( a, b ) \
39 "paddw "#b", "#a" \n\t"\
40 "paddw "#b", "#b" \n\t"\
41 "psubw "#a", "#b" \n\t"
43 /*****************************************************************************
47 ****************************************************************************/
49 static inline void cavs_idct8_1d(int16_t *block, uint64_t bias)
52 "movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */
53 "movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */
54 "movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */
55 "movq 48(%0), %%mm7 \n\t" /* mm7 = src3 */
56 "movq %%mm4, %%mm0 \n\t"
57 "movq %%mm5, %%mm3 \n\t"
58 "movq %%mm2, %%mm6 \n\t"
59 "movq %%mm7, %%mm1 \n\t"
61 "paddw %%mm4, %%mm4 \n\t" /* mm4 = 2*src7 */
62 "paddw %%mm3, %%mm3 \n\t" /* mm3 = 2*src1 */
63 "paddw %%mm6, %%mm6 \n\t" /* mm6 = 2*src5 */
64 "paddw %%mm1, %%mm1 \n\t" /* mm1 = 2*src3 */
65 "paddw %%mm4, %%mm0 \n\t" /* mm0 = 3*src7 */
66 "paddw %%mm3, %%mm5 \n\t" /* mm5 = 3*src1 */
67 "paddw %%mm6, %%mm2 \n\t" /* mm2 = 3*src5 */
68 "paddw %%mm1, %%mm7 \n\t" /* mm7 = 3*src3 */
69 "psubw %%mm4, %%mm5 \n\t" /* mm5 = 3*src1 - 2*src7 = a0 */
70 "paddw %%mm6, %%mm7 \n\t" /* mm7 = 3*src3 + 2*src5 = a1 */
71 "psubw %%mm2, %%mm1 \n\t" /* mm1 = 2*src3 - 3*src5 = a2 */
72 "paddw %%mm0, %%mm3 \n\t" /* mm3 = 2*src1 + 3*src7 = a3 */
74 "movq %%mm5, %%mm4 \n\t"
75 "movq %%mm7, %%mm6 \n\t"
76 "movq %%mm3, %%mm0 \n\t"
77 "movq %%mm1, %%mm2 \n\t"
78 SUMSUB_BA( %%mm7, %%mm5 ) /* mm7 = a0 + a1 mm5 = a0 - a1 */
79 "paddw %%mm3, %%mm7 \n\t" /* mm7 = a0 + a1 + a3 */
80 "paddw %%mm1, %%mm5 \n\t" /* mm5 = a0 - a1 + a2 */
81 "paddw %%mm7, %%mm7 \n\t"
82 "paddw %%mm5, %%mm5 \n\t"
83 "paddw %%mm6, %%mm7 \n\t" /* mm7 = b4 */
84 "paddw %%mm4, %%mm5 \n\t" /* mm5 = b5 */
86 SUMSUB_BA( %%mm1, %%mm3 ) /* mm1 = a3 + a2 mm3 = a3 - a2 */
87 "psubw %%mm1, %%mm4 \n\t" /* mm4 = a0 - a2 - a3 */
88 "movq %%mm4, %%mm1 \n\t" /* mm1 = a0 - a2 - a3 */
89 "psubw %%mm6, %%mm3 \n\t" /* mm3 = a3 - a2 - a1 */
90 "paddw %%mm1, %%mm1 \n\t"
91 "paddw %%mm3, %%mm3 \n\t"
92 "psubw %%mm2, %%mm1 \n\t" /* mm1 = b7 */
93 "paddw %%mm0, %%mm3 \n\t" /* mm3 = b6 */
95 "movq 32(%0), %%mm2 \n\t" /* mm2 = src2 */
96 "movq 96(%0), %%mm6 \n\t" /* mm6 = src6 */
97 "movq %%mm2, %%mm4 \n\t"
98 "movq %%mm6, %%mm0 \n\t"
99 "psllw $2, %%mm4 \n\t" /* mm4 = 4*src2 */
100 "psllw $2, %%mm6 \n\t" /* mm6 = 4*src6 */
101 "paddw %%mm4, %%mm2 \n\t" /* mm2 = 5*src2 */
102 "paddw %%mm6, %%mm0 \n\t" /* mm0 = 5*src6 */
103 "paddw %%mm2, %%mm2 \n\t"
104 "paddw %%mm0, %%mm0 \n\t"
105 "psubw %%mm0, %%mm4 \n\t" /* mm4 = 4*src2 - 10*src6 = a7 */
106 "paddw %%mm2, %%mm6 \n\t" /* mm6 = 4*src6 + 10*src2 = a6 */
108 "movq (%0), %%mm2 \n\t" /* mm2 = src0 */
109 "movq 64(%0), %%mm0 \n\t" /* mm0 = src4 */
110 SUMSUB_BA( %%mm0, %%mm2 ) /* mm0 = src0+src4 mm2 = src0-src4 */
111 "psllw $3, %%mm0 \n\t"
112 "psllw $3, %%mm2 \n\t"
113 "paddw %1, %%mm0 \n\t" /* add rounding bias */
114 "paddw %1, %%mm2 \n\t" /* add rounding bias */
116 SUMSUB_BA( %%mm6, %%mm0 ) /* mm6 = a4 + a6 mm0 = a4 - a6 */
117 SUMSUB_BA( %%mm4, %%mm2 ) /* mm4 = a5 + a7 mm2 = a5 - a7 */
118 SUMSUB_BA( %%mm7, %%mm6 ) /* mm7 = dst0 mm6 = dst7 */
119 SUMSUB_BA( %%mm5, %%mm4 ) /* mm5 = dst1 mm4 = dst6 */
120 SUMSUB_BA( %%mm3, %%mm2 ) /* mm3 = dst2 mm2 = dst5 */
121 SUMSUB_BA( %%mm1, %%mm0 ) /* mm1 = dst3 mm0 = dst4 */
122 :: "r"(block), "m"(bias)
126 static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
129 DECLARE_ALIGNED(8, int16_t, b2)[64];
132 DECLARE_ALIGNED(8, uint64_t, tmp);
134 cavs_idct8_1d(block+4*i, ff_pw_4.a);
137 "psraw $3, %%mm7 \n\t"
138 "psraw $3, %%mm6 \n\t"
139 "psraw $3, %%mm5 \n\t"
140 "psraw $3, %%mm4 \n\t"
141 "psraw $3, %%mm3 \n\t"
142 "psraw $3, %%mm2 \n\t"
143 "psraw $3, %%mm1 \n\t"
144 "psraw $3, %%mm0 \n\t"
145 "movq %%mm7, %0 \n\t"
146 TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
147 "movq %%mm0, 8(%1) \n\t"
148 "movq %%mm6, 24(%1) \n\t"
149 "movq %%mm7, 40(%1) \n\t"
150 "movq %%mm4, 56(%1) \n\t"
151 "movq %0, %%mm7 \n\t"
152 TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
153 "movq %%mm7, (%1) \n\t"
154 "movq %%mm1, 16(%1) \n\t"
155 "movq %%mm0, 32(%1) \n\t"
156 "movq %%mm3, 48(%1) \n\t"
164 cavs_idct8_1d(b2+4*i, ff_pw_64.a);
167 "psraw $7, %%mm7 \n\t"
168 "psraw $7, %%mm6 \n\t"
169 "psraw $7, %%mm5 \n\t"
170 "psraw $7, %%mm4 \n\t"
171 "psraw $7, %%mm3 \n\t"
172 "psraw $7, %%mm2 \n\t"
173 "psraw $7, %%mm1 \n\t"
174 "psraw $7, %%mm0 \n\t"
175 "movq %%mm7, (%0) \n\t"
176 "movq %%mm5, 16(%0) \n\t"
177 "movq %%mm3, 32(%0) \n\t"
178 "movq %%mm1, 48(%0) \n\t"
179 "movq %%mm0, 64(%0) \n\t"
180 "movq %%mm2, 80(%0) \n\t"
181 "movq %%mm4, 96(%0) \n\t"
182 "movq %%mm6, 112(%0) \n\t"
188 ff_add_pixels_clamped_mmx(b2, dst, stride);
191 #endif /* HAVE_MMX_INLINE */
193 #if (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE)
195 /*****************************************************************************
197 * motion compensation
199 ****************************************************************************/
201 /* vertical filter [-1 -2 96 42 -7 0] */
202 #define QPEL_CAVSV1(A,B,C,D,E,F,OP,MUL2) \
203 "movd (%0), "#F" \n\t"\
204 "movq "#C", %%mm6 \n\t"\
205 "pmullw %5, %%mm6 \n\t"\
206 "movq "#D", %%mm7 \n\t"\
207 "pmullw "MANGLE(MUL2)", %%mm7\n\t"\
208 "psllw $3, "#E" \n\t"\
209 "psubw "#E", %%mm6 \n\t"\
210 "psraw $3, "#E" \n\t"\
211 "paddw %%mm7, %%mm6 \n\t"\
212 "paddw "#E", %%mm6 \n\t"\
213 "paddw "#B", "#B" \n\t"\
214 "pxor %%mm7, %%mm7 \n\t"\
216 "punpcklbw %%mm7, "#F" \n\t"\
217 "psubw "#B", %%mm6 \n\t"\
218 "psraw $1, "#B" \n\t"\
219 "psubw "#A", %%mm6 \n\t"\
220 "paddw %4, %%mm6 \n\t"\
221 "psraw $7, %%mm6 \n\t"\
222 "packuswb %%mm6, %%mm6 \n\t"\
223 OP(%%mm6, (%1), A, d) \
226 /* vertical filter [ 0 -1 5 5 -1 0] */
227 #define QPEL_CAVSV2(A,B,C,D,E,F,OP,MUL2) \
228 "movd (%0), "#F" \n\t"\
229 "movq "#C", %%mm6 \n\t"\
230 "paddw "#D", %%mm6 \n\t"\
231 "pmullw %5, %%mm6 \n\t"\
233 "punpcklbw %%mm7, "#F" \n\t"\
234 "psubw "#B", %%mm6 \n\t"\
235 "psubw "#E", %%mm6 \n\t"\
236 "paddw %4, %%mm6 \n\t"\
237 "psraw $3, %%mm6 \n\t"\
238 "packuswb %%mm6, %%mm6 \n\t"\
239 OP(%%mm6, (%1), A, d) \
242 /* vertical filter [ 0 -7 42 96 -2 -1] */
243 #define QPEL_CAVSV3(A,B,C,D,E,F,OP,MUL2) \
244 "movd (%0), "#F" \n\t"\
245 "movq "#C", %%mm6 \n\t"\
246 "pmullw "MANGLE(MUL2)", %%mm6\n\t"\
247 "movq "#D", %%mm7 \n\t"\
248 "pmullw %5, %%mm7 \n\t"\
249 "psllw $3, "#B" \n\t"\
250 "psubw "#B", %%mm6 \n\t"\
251 "psraw $3, "#B" \n\t"\
252 "paddw %%mm7, %%mm6 \n\t"\
253 "paddw "#B", %%mm6 \n\t"\
254 "paddw "#E", "#E" \n\t"\
255 "pxor %%mm7, %%mm7 \n\t"\
257 "punpcklbw %%mm7, "#F" \n\t"\
258 "psubw "#E", %%mm6 \n\t"\
259 "psraw $1, "#E" \n\t"\
260 "psubw "#F", %%mm6 \n\t"\
261 "paddw %4, %%mm6 \n\t"\
262 "psraw $7, %%mm6 \n\t"\
263 "packuswb %%mm6, %%mm6 \n\t"\
264 OP(%%mm6, (%1), A, d) \
268 #define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\
274 "pxor %%mm7, %%mm7 \n\t"\
275 "movd (%0), %%mm0 \n\t"\
277 "movd (%0), %%mm1 \n\t"\
279 "movd (%0), %%mm2 \n\t"\
281 "movd (%0), %%mm3 \n\t"\
283 "movd (%0), %%mm4 \n\t"\
285 "punpcklbw %%mm7, %%mm0 \n\t"\
286 "punpcklbw %%mm7, %%mm1 \n\t"\
287 "punpcklbw %%mm7, %%mm2 \n\t"\
288 "punpcklbw %%mm7, %%mm3 \n\t"\
289 "punpcklbw %%mm7, %%mm4 \n\t"\
290 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\
291 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\
292 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\
293 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\
294 VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\
295 VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\
296 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\
297 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\
299 : "+a"(src), "+c"(dst)\
300 : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\
305 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\
306 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\
307 VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\
308 VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\
309 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\
310 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\
311 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\
312 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\
314 : "+a"(src), "+c"(dst)\
315 : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\
319 src += 4-(h+5)*srcStride;\
320 dst += 4-h*dstStride;\
323 #define QPEL_CAVS(OPNAME, OP, MMX)\
324 static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
327 "pxor %%mm7, %%mm7 \n\t"\
328 "movq %5, %%mm6 \n\t"\
330 "movq (%0), %%mm0 \n\t"\
331 "movq 1(%0), %%mm2 \n\t"\
332 "movq %%mm0, %%mm1 \n\t"\
333 "movq %%mm2, %%mm3 \n\t"\
334 "punpcklbw %%mm7, %%mm0 \n\t"\
335 "punpckhbw %%mm7, %%mm1 \n\t"\
336 "punpcklbw %%mm7, %%mm2 \n\t"\
337 "punpckhbw %%mm7, %%mm3 \n\t"\
338 "paddw %%mm2, %%mm0 \n\t"\
339 "paddw %%mm3, %%mm1 \n\t"\
340 "pmullw %%mm6, %%mm0 \n\t"\
341 "pmullw %%mm6, %%mm1 \n\t"\
342 "movq -1(%0), %%mm2 \n\t"\
343 "movq 2(%0), %%mm4 \n\t"\
344 "movq %%mm2, %%mm3 \n\t"\
345 "movq %%mm4, %%mm5 \n\t"\
346 "punpcklbw %%mm7, %%mm2 \n\t"\
347 "punpckhbw %%mm7, %%mm3 \n\t"\
348 "punpcklbw %%mm7, %%mm4 \n\t"\
349 "punpckhbw %%mm7, %%mm5 \n\t"\
350 "paddw %%mm4, %%mm2 \n\t"\
351 "paddw %%mm3, %%mm5 \n\t"\
352 "psubw %%mm2, %%mm0 \n\t"\
353 "psubw %%mm5, %%mm1 \n\t"\
354 "movq %6, %%mm5 \n\t"\
355 "paddw %%mm5, %%mm0 \n\t"\
356 "paddw %%mm5, %%mm1 \n\t"\
357 "psraw $3, %%mm0 \n\t"\
358 "psraw $3, %%mm1 \n\t"\
359 "packuswb %%mm1, %%mm0 \n\t"\
360 OP(%%mm0, (%1),%%mm5, q) \
365 : "+a"(src), "+c"(dst), "+m"(h)\
366 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_4)\
371 static inline void OPNAME ## cavs_qpel8or16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
372 QPEL_CAVSVNUM(QPEL_CAVSV1,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
375 static inline void OPNAME ## cavs_qpel8or16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
376 QPEL_CAVSVNUM(QPEL_CAVSV2,OP,ff_pw_4,ff_pw_5,ff_pw_5) \
379 static inline void OPNAME ## cavs_qpel8or16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
380 QPEL_CAVSVNUM(QPEL_CAVSV3,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
383 static void OPNAME ## cavs_qpel8_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
384 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 8);\
386 static void OPNAME ## cavs_qpel16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
387 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 16);\
388 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
391 static void OPNAME ## cavs_qpel8_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
392 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 8);\
394 static void OPNAME ## cavs_qpel16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
395 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 16);\
396 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
399 static void OPNAME ## cavs_qpel8_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
400 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 8);\
402 static void OPNAME ## cavs_qpel16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
403 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 16);\
404 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
407 static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
408 OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
409 OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
412 OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
413 OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
416 #define CAVS_MC(OPNAME, SIZE, MMX) \
417 static void OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
419 OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\
422 static void OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
424 OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\
427 static void OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
429 OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\
432 static void OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
434 OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\
437 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
438 #define AVG_3DNOW_OP(a,b,temp, size) \
439 "mov" #size " " #b ", " #temp " \n\t"\
440 "pavgusb " #temp ", " #a " \n\t"\
441 "mov" #size " " #a ", " #b " \n\t"
442 #define AVG_MMXEXT_OP(a, b, temp, size) \
443 "mov" #size " " #b ", " #temp " \n\t"\
444 "pavgb " #temp ", " #a " \n\t"\
445 "mov" #size " " #a ", " #b " \n\t"
447 #endif /* (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE) */
450 static void put_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src,
453 ff_put_pixels8_mmx(dst, src, stride, 8);
456 static void avg_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src,
459 ff_avg_pixels8_mmx(dst, src, stride, 8);
462 static void put_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src,
465 ff_put_pixels16_mmx(dst, src, stride, 16);
468 static void avg_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src,
471 ff_avg_pixels16_mmx(dst, src, stride, 16);
474 static av_cold void cavsdsp_init_mmx(CAVSDSPContext *c,
475 AVCodecContext *avctx)
477 c->put_cavs_qpel_pixels_tab[0][0] = put_cavs_qpel16_mc00_mmx;
478 c->put_cavs_qpel_pixels_tab[1][0] = put_cavs_qpel8_mc00_mmx;
479 c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_mmx;
480 c->avg_cavs_qpel_pixels_tab[1][0] = avg_cavs_qpel8_mc00_mmx;
482 c->cavs_idct8_add = cavs_idct8_add_mmx;
483 c->idct_perm = FF_TRANSPOSE_IDCT_PERM;
485 #endif /* HAVE_MMX_INLINE */
487 #define DSPFUNC(PFX, IDX, NUM, EXT) \
488 c->PFX ## _cavs_qpel_pixels_tab[IDX][ 2] = PFX ## _cavs_qpel ## NUM ## _mc20_ ## EXT; \
489 c->PFX ## _cavs_qpel_pixels_tab[IDX][ 4] = PFX ## _cavs_qpel ## NUM ## _mc01_ ## EXT; \
490 c->PFX ## _cavs_qpel_pixels_tab[IDX][ 8] = PFX ## _cavs_qpel ## NUM ## _mc02_ ## EXT; \
491 c->PFX ## _cavs_qpel_pixels_tab[IDX][12] = PFX ## _cavs_qpel ## NUM ## _mc03_ ## EXT; \
493 #if HAVE_MMXEXT_INLINE
494 QPEL_CAVS(put_, PUT_OP, mmxext)
495 QPEL_CAVS(avg_, AVG_MMXEXT_OP, mmxext)
497 CAVS_MC(put_, 8, mmxext)
498 CAVS_MC(put_, 16, mmxext)
499 CAVS_MC(avg_, 8, mmxext)
500 CAVS_MC(avg_, 16, mmxext)
502 static av_cold void cavsdsp_init_mmxext(CAVSDSPContext *c,
503 AVCodecContext *avctx)
505 DSPFUNC(put, 0, 16, mmxext);
506 DSPFUNC(put, 1, 8, mmxext);
507 DSPFUNC(avg, 0, 16, mmxext);
508 DSPFUNC(avg, 1, 8, mmxext);
510 #endif /* HAVE_MMXEXT_INLINE */
512 #if HAVE_AMD3DNOW_INLINE
513 QPEL_CAVS(put_, PUT_OP, 3dnow)
514 QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow)
516 CAVS_MC(put_, 8, 3dnow)
517 CAVS_MC(put_, 16,3dnow)
518 CAVS_MC(avg_, 8, 3dnow)
519 CAVS_MC(avg_, 16,3dnow)
521 static av_cold void cavsdsp_init_3dnow(CAVSDSPContext *c,
522 AVCodecContext *avctx)
524 DSPFUNC(put, 0, 16, 3dnow);
525 DSPFUNC(put, 1, 8, 3dnow);
526 DSPFUNC(avg, 0, 16, 3dnow);
527 DSPFUNC(avg, 1, 8, 3dnow);
529 #endif /* HAVE_AMD3DNOW_INLINE */
531 av_cold void ff_cavsdsp_init_x86(CAVSDSPContext *c, AVCodecContext *avctx)
534 int mm_flags = av_get_cpu_flags();
536 if (mm_flags & AV_CPU_FLAG_MMX)
537 cavsdsp_init_mmx(c, avctx);
538 #endif /* HAVE_MMX_INLINE */
539 #if HAVE_MMXEXT_INLINE
540 if (mm_flags & AV_CPU_FLAG_MMXEXT)
541 cavsdsp_init_mmxext(c, avctx);
542 #endif /* HAVE_MMXEXT_INLINE */
543 #if HAVE_AMD3DNOW_INLINE
544 if (mm_flags & AV_CPU_FLAG_3DNOW)
545 cavsdsp_init_3dnow(c, avctx);
546 #endif /* HAVE_AMD3DNOW_INLINE */