]> git.sesse.net Git - ffmpeg/blob - libavcodec/x86/vc1dsp_mmx.c
avfilter/vf_psnr: remove unnecessary check
[ffmpeg] / libavcodec / x86 / vc1dsp_mmx.c
1 /*
2  * VC-1 and WMV3 - DSP functions MMX-optimized
3  * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
4  *
5  * Permission is hereby granted, free of charge, to any person
6  * obtaining a copy of this software and associated documentation
7  * files (the "Software"), to deal in the Software without
8  * restriction, including without limitation the rights to use,
9  * copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following
12  * conditions:
13  *
14  * The above copyright notice and this permission notice shall be
15  * included in all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24  * OTHER DEALINGS IN THE SOFTWARE.
25  */
26
27 #include "libavutil/attributes.h"
28 #include "libavutil/cpu.h"
29 #include "libavutil/mem.h"
30 #include "libavutil/mem_internal.h"
31 #include "libavutil/x86/asm.h"
32 #include "libavutil/x86/cpu.h"
33 #include "libavcodec/vc1dsp.h"
34 #include "constants.h"
35 #include "fpel.h"
36 #include "vc1dsp.h"
37
38 #if HAVE_6REGS && HAVE_INLINE_ASM && HAVE_MMX_EXTERNAL
39
40 void ff_vc1_put_ver_16b_shift2_mmx(int16_t *dst,
41                                    const uint8_t *src, x86_reg stride,
42                                    int rnd, int64_t shift);
43 void ff_vc1_put_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,
44                                    const int16_t *src, int rnd);
45 void ff_vc1_avg_hor_16b_shift2_mmxext(uint8_t *dst, x86_reg stride,
46                                       const int16_t *src, int rnd);
47
48 #define OP_PUT(S,D)
49 #define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t"
50
51 /** Add rounder from mm7 to mm3 and pack result at destination */
52 #define NORMALIZE_MMX(SHIFT)                                    \
53      "paddw     %%mm7, %%mm3           \n\t" /* +bias-r */      \
54      "paddw     %%mm7, %%mm4           \n\t" /* +bias-r */      \
55      "psraw     "SHIFT", %%mm3         \n\t"                    \
56      "psraw     "SHIFT", %%mm4         \n\t"
57
58 #define TRANSFER_DO_PACK(OP)                    \
59      "packuswb  %%mm4, %%mm3           \n\t"    \
60      OP((%2), %%mm3)                            \
61      "movq      %%mm3, (%2)            \n\t"
62
63 #define TRANSFER_DONT_PACK(OP)                  \
64      OP(0(%2), %%mm3)                           \
65      OP(8(%2), %%mm4)                           \
66      "movq      %%mm3, 0(%2)           \n\t"    \
67      "movq      %%mm4, 8(%2)           \n\t"
68
69 /** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
70 #define DO_UNPACK(reg)  "punpcklbw %%mm0, " reg "\n\t"
71 #define DONT_UNPACK(reg)
72
73 /** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
74 #define LOAD_ROUNDER_MMX(ROUND)                 \
75      "movd      "ROUND", %%mm7         \n\t"    \
76      "punpcklwd %%mm7, %%mm7           \n\t"    \
77      "punpckldq %%mm7, %%mm7           \n\t"
78
79 /**
80  * Purely vertical or horizontal 1/2 shift interpolation.
81  * Sacrifice mm6 for *9 factor.
82  */
83 #define VC1_SHIFT2(OP, OPNAME)\
84 static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
85                                      x86_reg stride, int rnd, x86_reg offset)\
86 {\
87     rnd = 8-rnd;\
88     __asm__ volatile(\
89         "mov       $8, %%"FF_REG_c"        \n\t"\
90         LOAD_ROUNDER_MMX("%5")\
91         "movq      "MANGLE(ff_pw_9)", %%mm6\n\t"\
92         "1:                                \n\t"\
93         "movd      0(%0   ), %%mm3         \n\t"\
94         "movd      4(%0   ), %%mm4         \n\t"\
95         "movd      0(%0,%2), %%mm1         \n\t"\
96         "movd      4(%0,%2), %%mm2         \n\t"\
97         "add       %2, %0                  \n\t"\
98         "punpcklbw %%mm0, %%mm3            \n\t"\
99         "punpcklbw %%mm0, %%mm4            \n\t"\
100         "punpcklbw %%mm0, %%mm1            \n\t"\
101         "punpcklbw %%mm0, %%mm2            \n\t"\
102         "paddw     %%mm1, %%mm3            \n\t"\
103         "paddw     %%mm2, %%mm4            \n\t"\
104         "movd      0(%0,%3), %%mm1         \n\t"\
105         "movd      4(%0,%3), %%mm2         \n\t"\
106         "pmullw    %%mm6, %%mm3            \n\t" /* 0,9,9,0*/\
107         "pmullw    %%mm6, %%mm4            \n\t" /* 0,9,9,0*/\
108         "punpcklbw %%mm0, %%mm1            \n\t"\
109         "punpcklbw %%mm0, %%mm2            \n\t"\
110         "psubw     %%mm1, %%mm3            \n\t" /*-1,9,9,0*/\
111         "psubw     %%mm2, %%mm4            \n\t" /*-1,9,9,0*/\
112         "movd      0(%0,%2), %%mm1         \n\t"\
113         "movd      4(%0,%2), %%mm2         \n\t"\
114         "punpcklbw %%mm0, %%mm1            \n\t"\
115         "punpcklbw %%mm0, %%mm2            \n\t"\
116         "psubw     %%mm1, %%mm3            \n\t" /*-1,9,9,-1*/\
117         "psubw     %%mm2, %%mm4            \n\t" /*-1,9,9,-1*/\
118         NORMALIZE_MMX("$4")\
119         "packuswb  %%mm4, %%mm3            \n\t"\
120         OP((%1), %%mm3)\
121         "movq      %%mm3, (%1)             \n\t"\
122         "add       %6, %0                  \n\t"\
123         "add       %4, %1                  \n\t"\
124         "dec       %%"FF_REG_c"            \n\t"\
125         "jnz 1b                            \n\t"\
126         : "+r"(src),  "+r"(dst)\
127         : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\
128           "g"(stride-offset)\
129           NAMED_CONSTRAINTS_ADD(ff_pw_9)\
130         : "%"FF_REG_c, "memory"\
131     );\
132 }
133
134 VC1_SHIFT2(OP_PUT, put_)
135 VC1_SHIFT2(OP_AVG, avg_)
136
137 /**
138  * Core of the 1/4 and 3/4 shift bicubic interpolation.
139  *
140  * @param UNPACK  Macro unpacking arguments from 8 to 16 bits (can be empty).
141  * @param MOVQ    "movd 1" or "movq 2", if data read is already unpacked.
142  * @param A1      Address of 1st tap (beware of unpacked/packed).
143  * @param A2      Address of 2nd tap
144  * @param A3      Address of 3rd tap
145  * @param A4      Address of 4th tap
146  */
147 #define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4)       \
148      MOVQ "*0+"A1", %%mm1       \n\t"                           \
149      MOVQ "*4+"A1", %%mm2       \n\t"                           \
150      UNPACK("%%mm1")                                            \
151      UNPACK("%%mm2")                                            \
152      "pmullw    "MANGLE(ff_pw_3)", %%mm1\n\t"                   \
153      "pmullw    "MANGLE(ff_pw_3)", %%mm2\n\t"                   \
154      MOVQ "*0+"A2", %%mm3       \n\t"                           \
155      MOVQ "*4+"A2", %%mm4       \n\t"                           \
156      UNPACK("%%mm3")                                            \
157      UNPACK("%%mm4")                                            \
158      "pmullw    %%mm6, %%mm3    \n\t" /* *18 */                 \
159      "pmullw    %%mm6, %%mm4    \n\t" /* *18 */                 \
160      "psubw     %%mm1, %%mm3    \n\t" /* 18,-3 */               \
161      "psubw     %%mm2, %%mm4    \n\t" /* 18,-3 */               \
162      MOVQ "*0+"A4", %%mm1       \n\t"                           \
163      MOVQ "*4+"A4", %%mm2       \n\t"                           \
164      UNPACK("%%mm1")                                            \
165      UNPACK("%%mm2")                                            \
166      "psllw     $2, %%mm1       \n\t" /* 4* */                  \
167      "psllw     $2, %%mm2       \n\t" /* 4* */                  \
168      "psubw     %%mm1, %%mm3    \n\t" /* -4,18,-3 */            \
169      "psubw     %%mm2, %%mm4    \n\t" /* -4,18,-3 */            \
170      MOVQ "*0+"A3", %%mm1       \n\t"                           \
171      MOVQ "*4+"A3", %%mm2       \n\t"                           \
172      UNPACK("%%mm1")                                            \
173      UNPACK("%%mm2")                                            \
174      "pmullw    %%mm5, %%mm1    \n\t" /* *53 */                 \
175      "pmullw    %%mm5, %%mm2    \n\t" /* *53 */                 \
176      "paddw     %%mm1, %%mm3    \n\t" /* 4,53,18,-3 */          \
177      "paddw     %%mm2, %%mm4    \n\t" /* 4,53,18,-3 */
178
179 /**
180  * Macro to build the vertical 16 bits version of vc1_put_shift[13].
181  * Here, offset=src_stride. Parameters passed A1 to A4 must use
182  * %3 (src_stride) and %4 (3*src_stride).
183  *
184  * @param  NAME   Either 1 or 3
185  * @see MSPEL_FILTER13_CORE for information on A1->A4
186  */
187 #define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4)                    \
188 static void                                                             \
189 vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src,      \
190                                  x86_reg src_stride,                   \
191                                  int rnd, int64_t shift)                \
192 {                                                                       \
193     int h = 8;                                                          \
194     src -= src_stride;                                                  \
195     __asm__ volatile(                                                       \
196         LOAD_ROUNDER_MMX("%5")                                          \
197         "movq      "MANGLE(ff_pw_53)", %%mm5\n\t"                       \
198         "movq      "MANGLE(ff_pw_18)", %%mm6\n\t"                       \
199         ".p2align 3                \n\t"                                \
200         "1:                        \n\t"                                \
201         MSPEL_FILTER13_CORE(DO_UNPACK, "movd  1", A1, A2, A3, A4)       \
202         NORMALIZE_MMX("%6")                                             \
203         TRANSFER_DONT_PACK(OP_PUT)                                      \
204         /* Last 3 (in fact 4) bytes on the line */                      \
205         "movd      8+"A1", %%mm1   \n\t"                                \
206         DO_UNPACK("%%mm1")                                              \
207         "movq      %%mm1, %%mm3    \n\t"                                \
208         "paddw     %%mm1, %%mm1    \n\t"                                \
209         "paddw     %%mm3, %%mm1    \n\t" /* 3* */                       \
210         "movd      8+"A2", %%mm3   \n\t"                                \
211         DO_UNPACK("%%mm3")                                              \
212         "pmullw    %%mm6, %%mm3    \n\t" /* *18 */                      \
213         "psubw     %%mm1, %%mm3    \n\t" /*18,-3 */                     \
214         "movd      8+"A3", %%mm1   \n\t"                                \
215         DO_UNPACK("%%mm1")                                              \
216         "pmullw    %%mm5, %%mm1    \n\t" /* *53 */                      \
217         "paddw     %%mm1, %%mm3    \n\t" /*53,18,-3 */                  \
218         "movd      8+"A4", %%mm1   \n\t"                                \
219         DO_UNPACK("%%mm1")                                              \
220         "psllw     $2, %%mm1       \n\t" /* 4* */                       \
221         "psubw     %%mm1, %%mm3    \n\t"                                \
222         "paddw     %%mm7, %%mm3    \n\t"                                \
223         "psraw     %6, %%mm3       \n\t"                                \
224         "movq      %%mm3, 16(%2)   \n\t"                                \
225         "add       %3, %1          \n\t"                                \
226         "add       $24, %2         \n\t"                                \
227         "decl      %0              \n\t"                                \
228         "jnz 1b                    \n\t"                                \
229         : "+r"(h), "+r" (src),  "+r" (dst)                              \
230         : "r"(src_stride), "r"(3*src_stride),                           \
231           "m"(rnd), "m"(shift)                                          \
232           NAMED_CONSTRAINTS_ADD(ff_pw_3,ff_pw_53,ff_pw_18)              \
233         : "memory"                                                      \
234     );                                                                  \
235 }
236
237 /**
238  * Macro to build the horizontal 16 bits version of vc1_put_shift[13].
239  * Here, offset=16 bits, so parameters passed A1 to A4 should be simple.
240  *
241  * @param  NAME   Either 1 or 3
242  * @see MSPEL_FILTER13_CORE for information on A1->A4
243  */
244 #define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME)        \
245 static void                                                             \
246 OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride,    \
247                                  const int16_t *src, int rnd)           \
248 {                                                                       \
249     int h = 8;                                                          \
250     src -= 1;                                                           \
251     rnd -= (-4+58+13-3)*256; /* Add -256 bias */                        \
252     __asm__ volatile(                                                       \
253         LOAD_ROUNDER_MMX("%4")                                          \
254         "movq      "MANGLE(ff_pw_18)", %%mm6   \n\t"                    \
255         "movq      "MANGLE(ff_pw_53)", %%mm5   \n\t"                    \
256         ".p2align 3                \n\t"                                \
257         "1:                        \n\t"                                \
258         MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4)      \
259         NORMALIZE_MMX("$7")                                             \
260         /* Remove bias */                                               \
261         "paddw     "MANGLE(ff_pw_128)", %%mm3  \n\t"                    \
262         "paddw     "MANGLE(ff_pw_128)", %%mm4  \n\t"                    \
263         TRANSFER_DO_PACK(OP)                                            \
264         "add       $24, %1         \n\t"                                \
265         "add       %3, %2          \n\t"                                \
266         "decl      %0              \n\t"                                \
267         "jnz 1b                    \n\t"                                \
268         : "+r"(h), "+r" (src),  "+r" (dst)                              \
269         : "r"(stride), "m"(rnd)                                         \
270           NAMED_CONSTRAINTS_ADD(ff_pw_3,ff_pw_18,ff_pw_53,ff_pw_128)    \
271         : "memory"                                                      \
272     );                                                                  \
273 }
274
275 /**
276  * Macro to build the 8 bits, any direction, version of vc1_put_shift[13].
277  * Here, offset=src_stride. Parameters passed A1 to A4 must use
278  * %3 (offset) and %4 (3*offset).
279  *
280  * @param  NAME   Either 1 or 3
281  * @see MSPEL_FILTER13_CORE for information on A1->A4
282  */
283 #define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME)             \
284 static void                                                             \
285 OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src,         \
286                         x86_reg stride, int rnd, x86_reg offset)      \
287 {                                                                       \
288     int h = 8;                                                          \
289     src -= offset;                                                      \
290     rnd = 32-rnd;                                                       \
291     __asm__ volatile (                                                      \
292         LOAD_ROUNDER_MMX("%6")                                          \
293         "movq      "MANGLE(ff_pw_53)", %%mm5       \n\t"                \
294         "movq      "MANGLE(ff_pw_18)", %%mm6       \n\t"                \
295         ".p2align 3                \n\t"                                \
296         "1:                        \n\t"                                \
297         MSPEL_FILTER13_CORE(DO_UNPACK, "movd   1", A1, A2, A3, A4)      \
298         NORMALIZE_MMX("$6")                                             \
299         TRANSFER_DO_PACK(OP)                                            \
300         "add       %5, %1          \n\t"                                \
301         "add       %5, %2          \n\t"                                \
302         "decl      %0              \n\t"                                \
303         "jnz 1b                    \n\t"                                \
304         : "+r"(h), "+r" (src),  "+r" (dst)                              \
305         : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd)             \
306           NAMED_CONSTRAINTS_ADD(ff_pw_53,ff_pw_18,ff_pw_3)              \
307         : "memory"                                                      \
308     );                                                                  \
309 }
310
311 /** 1/4 shift bicubic interpolation */
312 MSPEL_FILTER13_8B     (shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )", OP_PUT, put_)
313 MSPEL_FILTER13_8B     (shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )", OP_AVG, avg_)
314 MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )")
315 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT, put_)
316 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG, avg_)
317
318 /** 3/4 shift bicubic interpolation */
319 MSPEL_FILTER13_8B     (shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )", OP_PUT, put_)
320 MSPEL_FILTER13_8B     (shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )", OP_AVG, avg_)
321 MSPEL_FILTER13_VER_16B(shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )")
322 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT, put_)
323 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG, avg_)
324
325 typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift);
326 typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd);
327 typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, x86_reg stride, int rnd, x86_reg offset);
328
329 /**
330  * Interpolate fractional pel values by applying proper vertical then
331  * horizontal filter.
332  *
333  * @param  dst     Destination buffer for interpolated pels.
334  * @param  src     Source buffer.
335  * @param  stride  Stride for both src and dst buffers.
336  * @param  hmode   Horizontal filter (expressed in quarter pixels shift).
337  * @param  hmode   Vertical filter.
338  * @param  rnd     Rounding bias.
339  */
340 #define VC1_MSPEL_MC(OP, INSTR)\
341 static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
342                                int hmode, int vmode, int rnd)\
343 {\
344     static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\
345          { NULL, vc1_put_ver_16b_shift1_mmx, ff_vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\
346     static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\
347          { NULL, OP ## vc1_hor_16b_shift1_mmx, ff_vc1_ ## OP ## hor_16b_shift2_ ## INSTR, OP ## vc1_hor_16b_shift3_mmx };\
348     static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\
349          { NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\
350 \
351     __asm__ volatile(\
352         "pxor %%mm0, %%mm0         \n\t"\
353         ::: "memory"\
354     );\
355 \
356     if (vmode) { /* Vertical filter to apply */\
357         if (hmode) { /* Horizontal filter to apply, output to tmp */\
358             static const int shift_value[] = { 0, 5, 1, 5 };\
359             int              shift = (shift_value[hmode]+shift_value[vmode])>>1;\
360             int              r;\
361             LOCAL_ALIGNED(16, int16_t, tmp, [12*8]);\
362 \
363             r = (1<<(shift-1)) + rnd-1;\
364             vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
365 \
366             vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\
367             return;\
368         }\
369         else { /* No horizontal filter, output 8 lines to dst */\
370             vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\
371             return;\
372         }\
373     }\
374 \
375     /* Horizontal mode with no vertical mode */\
376     vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\
377 } \
378 static void OP ## vc1_mspel_mc_16(uint8_t *dst, const uint8_t *src, \
379                                   int stride, int hmode, int vmode, int rnd)\
380 { \
381     OP ## vc1_mspel_mc(dst + 0, src + 0, stride, hmode, vmode, rnd); \
382     OP ## vc1_mspel_mc(dst + 8, src + 8, stride, hmode, vmode, rnd); \
383     dst += 8*stride; src += 8*stride; \
384     OP ## vc1_mspel_mc(dst + 0, src + 0, stride, hmode, vmode, rnd); \
385     OP ## vc1_mspel_mc(dst + 8, src + 8, stride, hmode, vmode, rnd); \
386 }
387
388 VC1_MSPEL_MC(put_, mmx)
389 VC1_MSPEL_MC(avg_, mmxext)
390
391 /** Macro to ease bicubic filter interpolation functions declarations */
392 #define DECLARE_FUNCTION(a, b)                                          \
393 static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst,            \
394                                                const uint8_t *src,      \
395                                                ptrdiff_t stride,        \
396                                                int rnd)                 \
397 {                                                                       \
398      put_vc1_mspel_mc(dst, src, stride, a, b, rnd);                     \
399 }\
400 static void avg_vc1_mspel_mc ## a ## b ## _mmxext(uint8_t *dst,         \
401                                                   const uint8_t *src,   \
402                                                   ptrdiff_t stride,     \
403                                                   int rnd)              \
404 {                                                                       \
405      avg_vc1_mspel_mc(dst, src, stride, a, b, rnd);                     \
406 }\
407 static void put_vc1_mspel_mc ## a ## b ## _16_mmx(uint8_t *dst,         \
408                                                   const uint8_t *src,   \
409                                                   ptrdiff_t stride,     \
410                                                   int rnd)              \
411 {                                                                       \
412      put_vc1_mspel_mc_16(dst, src, stride, a, b, rnd);                  \
413 }\
414 static void avg_vc1_mspel_mc ## a ## b ## _16_mmxext(uint8_t *dst,      \
415                                                      const uint8_t *src,\
416                                                      ptrdiff_t stride,  \
417                                                      int rnd)           \
418 {                                                                       \
419      avg_vc1_mspel_mc_16(dst, src, stride, a, b, rnd);                  \
420 }
421
422 DECLARE_FUNCTION(0, 1)
423 DECLARE_FUNCTION(0, 2)
424 DECLARE_FUNCTION(0, 3)
425
426 DECLARE_FUNCTION(1, 0)
427 DECLARE_FUNCTION(1, 1)
428 DECLARE_FUNCTION(1, 2)
429 DECLARE_FUNCTION(1, 3)
430
431 DECLARE_FUNCTION(2, 0)
432 DECLARE_FUNCTION(2, 1)
433 DECLARE_FUNCTION(2, 2)
434 DECLARE_FUNCTION(2, 3)
435
436 DECLARE_FUNCTION(3, 0)
437 DECLARE_FUNCTION(3, 1)
438 DECLARE_FUNCTION(3, 2)
439 DECLARE_FUNCTION(3, 3)
440
441 #define FN_ASSIGN(OP, X, Y, INSN) \
442     dsp->OP##vc1_mspel_pixels_tab[1][X+4*Y] = OP##vc1_mspel_mc##X##Y##INSN; \
443     dsp->OP##vc1_mspel_pixels_tab[0][X+4*Y] = OP##vc1_mspel_mc##X##Y##_16##INSN
444
445 av_cold void ff_vc1dsp_init_mmx(VC1DSPContext *dsp)
446 {
447     FN_ASSIGN(put_, 0, 1, _mmx);
448     FN_ASSIGN(put_, 0, 2, _mmx);
449     FN_ASSIGN(put_, 0, 3, _mmx);
450
451     FN_ASSIGN(put_, 1, 0, _mmx);
452     FN_ASSIGN(put_, 1, 1, _mmx);
453     FN_ASSIGN(put_, 1, 2, _mmx);
454     FN_ASSIGN(put_, 1, 3, _mmx);
455
456     FN_ASSIGN(put_, 2, 0, _mmx);
457     FN_ASSIGN(put_, 2, 1, _mmx);
458     FN_ASSIGN(put_, 2, 2, _mmx);
459     FN_ASSIGN(put_, 2, 3, _mmx);
460
461     FN_ASSIGN(put_, 3, 0, _mmx);
462     FN_ASSIGN(put_, 3, 1, _mmx);
463     FN_ASSIGN(put_, 3, 2, _mmx);
464     FN_ASSIGN(put_, 3, 3, _mmx);
465 }
466
467 av_cold void ff_vc1dsp_init_mmxext(VC1DSPContext *dsp)
468 {
469     FN_ASSIGN(avg_, 0, 1, _mmxext);
470     FN_ASSIGN(avg_, 0, 2, _mmxext);
471     FN_ASSIGN(avg_, 0, 3, _mmxext);
472
473     FN_ASSIGN(avg_, 1, 0, _mmxext);
474     FN_ASSIGN(avg_, 1, 1, _mmxext);
475     FN_ASSIGN(avg_, 1, 2, _mmxext);
476     FN_ASSIGN(avg_, 1, 3, _mmxext);
477
478     FN_ASSIGN(avg_, 2, 0, _mmxext);
479     FN_ASSIGN(avg_, 2, 1, _mmxext);
480     FN_ASSIGN(avg_, 2, 2, _mmxext);
481     FN_ASSIGN(avg_, 2, 3, _mmxext);
482
483     FN_ASSIGN(avg_, 3, 0, _mmxext);
484     FN_ASSIGN(avg_, 3, 1, _mmxext);
485     FN_ASSIGN(avg_, 3, 2, _mmxext);
486     FN_ASSIGN(avg_, 3, 3, _mmxext);
487 }
488 #endif /* HAVE_6REGS && HAVE_INLINE_ASM && HAVE_MMX_EXTERNAL */