2 * VC-1 and WMV3 - DSP functions MMX-optimized
3 * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
5 * Permission is hereby granted, free of charge, to any person
6 * obtaining a copy of this software and associated documentation
7 * files (the "Software"), to deal in the Software without
8 * restriction, including without limitation the rights to use,
9 * copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "libavutil/cpu.h"
28 #include "libavutil/mem.h"
29 #include "libavutil/x86/asm.h"
30 #include "libavutil/x86/cpu.h"
31 #include "libavcodec/vc1dsp.h"
32 #include "constants.h"
36 #if HAVE_6REGS && HAVE_INLINE_ASM && HAVE_MMX_EXTERNAL
38 void ff_vc1_put_ver_16b_shift2_mmx(int16_t *dst,
39 const uint8_t *src, x86_reg stride,
40 int rnd, int64_t shift);
43 #define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t"
45 /** Add rounder from mm7 to mm3 and pack result at destination */
46 #define NORMALIZE_MMX(SHIFT) \
47 "paddw %%mm7, %%mm3 \n\t" /* +bias-r */ \
48 "paddw %%mm7, %%mm4 \n\t" /* +bias-r */ \
49 "psraw "SHIFT", %%mm3 \n\t" \
50 "psraw "SHIFT", %%mm4 \n\t"
52 #define TRANSFER_DO_PACK(OP) \
53 "packuswb %%mm4, %%mm3 \n\t" \
55 "movq %%mm3, (%2) \n\t"
57 #define TRANSFER_DONT_PACK(OP) \
60 "movq %%mm3, 0(%2) \n\t" \
61 "movq %%mm4, 8(%2) \n\t"
63 /** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
64 #define DO_UNPACK(reg) "punpcklbw %%mm0, " reg "\n\t"
65 #define DONT_UNPACK(reg)
67 /** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
68 #define LOAD_ROUNDER_MMX(ROUND) \
69 "movd "ROUND", %%mm7 \n\t" \
70 "punpcklwd %%mm7, %%mm7 \n\t" \
71 "punpckldq %%mm7, %%mm7 \n\t"
74 * Data is already unpacked, so some operations can directly be made from
77 #define VC1_HOR_16b_SHIFT2(OP, OPNAME)\
78 static void OPNAME ## vc1_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,\
79 const int16_t *src, int rnd)\
84 rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */\
86 LOAD_ROUNDER_MMX("%4")\
87 "movq "MANGLE(ff_pw_128)", %%mm6\n\t"\
88 "movq "MANGLE(ff_pw_9)", %%mm5 \n\t"\
90 "movq 2*0+0(%1), %%mm1 \n\t"\
91 "movq 2*0+8(%1), %%mm2 \n\t"\
92 "movq 2*1+0(%1), %%mm3 \n\t"\
93 "movq 2*1+8(%1), %%mm4 \n\t"\
94 "paddw 2*3+0(%1), %%mm1 \n\t"\
95 "paddw 2*3+8(%1), %%mm2 \n\t"\
96 "paddw 2*2+0(%1), %%mm3 \n\t"\
97 "paddw 2*2+8(%1), %%mm4 \n\t"\
98 "pmullw %%mm5, %%mm3 \n\t"\
99 "pmullw %%mm5, %%mm4 \n\t"\
100 "psubw %%mm1, %%mm3 \n\t"\
101 "psubw %%mm2, %%mm4 \n\t"\
104 "paddw %%mm6, %%mm3 \n\t"\
105 "paddw %%mm6, %%mm4 \n\t"\
106 TRANSFER_DO_PACK(OP)\
111 : "+r"(h), "+r" (src), "+r" (dst)\
112 : "r"(stride), "m"(rnd)\
113 NAMED_CONSTRAINTS_ADD(ff_pw_128,ff_pw_9)\
118 VC1_HOR_16b_SHIFT2(OP_PUT, put_)
119 VC1_HOR_16b_SHIFT2(OP_AVG, avg_)
123 * Purely vertical or horizontal 1/2 shift interpolation.
124 * Sacrify mm6 for *9 factor.
126 #define VC1_SHIFT2(OP, OPNAME)\
127 static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
128 x86_reg stride, int rnd, x86_reg offset)\
132 "mov $8, %%"REG_c" \n\t"\
133 LOAD_ROUNDER_MMX("%5")\
134 "movq "MANGLE(ff_pw_9)", %%mm6\n\t"\
136 "movd 0(%0 ), %%mm3 \n\t"\
137 "movd 4(%0 ), %%mm4 \n\t"\
138 "movd 0(%0,%2), %%mm1 \n\t"\
139 "movd 4(%0,%2), %%mm2 \n\t"\
141 "punpcklbw %%mm0, %%mm3 \n\t"\
142 "punpcklbw %%mm0, %%mm4 \n\t"\
143 "punpcklbw %%mm0, %%mm1 \n\t"\
144 "punpcklbw %%mm0, %%mm2 \n\t"\
145 "paddw %%mm1, %%mm3 \n\t"\
146 "paddw %%mm2, %%mm4 \n\t"\
147 "movd 0(%0,%3), %%mm1 \n\t"\
148 "movd 4(%0,%3), %%mm2 \n\t"\
149 "pmullw %%mm6, %%mm3 \n\t" /* 0,9,9,0*/\
150 "pmullw %%mm6, %%mm4 \n\t" /* 0,9,9,0*/\
151 "punpcklbw %%mm0, %%mm1 \n\t"\
152 "punpcklbw %%mm0, %%mm2 \n\t"\
153 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,0*/\
154 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,0*/\
155 "movd 0(%0,%2), %%mm1 \n\t"\
156 "movd 4(%0,%2), %%mm2 \n\t"\
157 "punpcklbw %%mm0, %%mm1 \n\t"\
158 "punpcklbw %%mm0, %%mm2 \n\t"\
159 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,-1*/\
160 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,-1*/\
162 "packuswb %%mm4, %%mm3 \n\t"\
164 "movq %%mm3, (%1) \n\t"\
167 "dec %%"REG_c" \n\t"\
169 : "+r"(src), "+r"(dst)\
170 : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\
172 NAMED_CONSTRAINTS_ADD(ff_pw_9)\
173 : "%"REG_c, "memory"\
177 VC1_SHIFT2(OP_PUT, put_)
178 VC1_SHIFT2(OP_AVG, avg_)
181 * Core of the 1/4 and 3/4 shift bicubic interpolation.
183 * @param UNPACK Macro unpacking arguments from 8 to 16bits (can be empty).
184 * @param MOVQ "movd 1" or "movq 2", if data read is already unpacked.
185 * @param A1 Address of 1st tap (beware of unpacked/packed).
186 * @param A2 Address of 2nd tap
187 * @param A3 Address of 3rd tap
188 * @param A4 Address of 4th tap
190 #define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4) \
191 MOVQ "*0+"A1", %%mm1 \n\t" \
192 MOVQ "*4+"A1", %%mm2 \n\t" \
195 "pmullw "MANGLE(ff_pw_3)", %%mm1\n\t" \
196 "pmullw "MANGLE(ff_pw_3)", %%mm2\n\t" \
197 MOVQ "*0+"A2", %%mm3 \n\t" \
198 MOVQ "*4+"A2", %%mm4 \n\t" \
201 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
202 "pmullw %%mm6, %%mm4 \n\t" /* *18 */ \
203 "psubw %%mm1, %%mm3 \n\t" /* 18,-3 */ \
204 "psubw %%mm2, %%mm4 \n\t" /* 18,-3 */ \
205 MOVQ "*0+"A4", %%mm1 \n\t" \
206 MOVQ "*4+"A4", %%mm2 \n\t" \
209 "psllw $2, %%mm1 \n\t" /* 4* */ \
210 "psllw $2, %%mm2 \n\t" /* 4* */ \
211 "psubw %%mm1, %%mm3 \n\t" /* -4,18,-3 */ \
212 "psubw %%mm2, %%mm4 \n\t" /* -4,18,-3 */ \
213 MOVQ "*0+"A3", %%mm1 \n\t" \
214 MOVQ "*4+"A3", %%mm2 \n\t" \
217 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
218 "pmullw %%mm5, %%mm2 \n\t" /* *53 */ \
219 "paddw %%mm1, %%mm3 \n\t" /* 4,53,18,-3 */ \
220 "paddw %%mm2, %%mm4 \n\t" /* 4,53,18,-3 */
223 * Macro to build the vertical 16bits version of vc1_put_shift[13].
224 * Here, offset=src_stride. Parameters passed A1 to A4 must use
225 * %3 (src_stride) and %4 (3*src_stride).
227 * @param NAME Either 1 or 3
228 * @see MSPEL_FILTER13_CORE for information on A1->A4
230 #define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4) \
232 vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src, \
233 x86_reg src_stride, \
234 int rnd, int64_t shift) \
239 LOAD_ROUNDER_MMX("%5") \
240 "movq "MANGLE(ff_pw_53)", %%mm5\n\t" \
241 "movq "MANGLE(ff_pw_18)", %%mm6\n\t" \
244 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
245 NORMALIZE_MMX("%6") \
246 TRANSFER_DONT_PACK(OP_PUT) \
247 /* Last 3 (in fact 4) bytes on the line */ \
248 "movd 8+"A1", %%mm1 \n\t" \
250 "movq %%mm1, %%mm3 \n\t" \
251 "paddw %%mm1, %%mm1 \n\t" \
252 "paddw %%mm3, %%mm1 \n\t" /* 3* */ \
253 "movd 8+"A2", %%mm3 \n\t" \
255 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
256 "psubw %%mm1, %%mm3 \n\t" /*18,-3 */ \
257 "movd 8+"A3", %%mm1 \n\t" \
259 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
260 "paddw %%mm1, %%mm3 \n\t" /*53,18,-3 */ \
261 "movd 8+"A4", %%mm1 \n\t" \
263 "psllw $2, %%mm1 \n\t" /* 4* */ \
264 "psubw %%mm1, %%mm3 \n\t" \
265 "paddw %%mm7, %%mm3 \n\t" \
266 "psraw %6, %%mm3 \n\t" \
267 "movq %%mm3, 16(%2) \n\t" \
272 : "+r"(h), "+r" (src), "+r" (dst) \
273 : "r"(src_stride), "r"(3*src_stride), \
274 "m"(rnd), "m"(shift) \
275 NAMED_CONSTRAINTS_ADD(ff_pw_3,ff_pw_53,ff_pw_18) \
281 * Macro to build the horizontal 16bits version of vc1_put_shift[13].
282 * Here, offset=16bits, so parameters passed A1 to A4 should be simple.
284 * @param NAME Either 1 or 3
285 * @see MSPEL_FILTER13_CORE for information on A1->A4
287 #define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME) \
289 OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \
290 const int16_t *src, int rnd) \
294 rnd -= (-4+58+13-3)*256; /* Add -256 bias */ \
296 LOAD_ROUNDER_MMX("%4") \
297 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
298 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
301 MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4) \
302 NORMALIZE_MMX("$7") \
304 "paddw "MANGLE(ff_pw_128)", %%mm3 \n\t" \
305 "paddw "MANGLE(ff_pw_128)", %%mm4 \n\t" \
306 TRANSFER_DO_PACK(OP) \
311 : "+r"(h), "+r" (src), "+r" (dst) \
312 : "r"(stride), "m"(rnd) \
313 NAMED_CONSTRAINTS_ADD(ff_pw_3,ff_pw_18,ff_pw_53,ff_pw_128) \
319 * Macro to build the 8bits, any direction, version of vc1_put_shift[13].
320 * Here, offset=src_stride. Parameters passed A1 to A4 must use
321 * %3 (offset) and %4 (3*offset).
323 * @param NAME Either 1 or 3
324 * @see MSPEL_FILTER13_CORE for information on A1->A4
326 #define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME) \
328 OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \
329 x86_reg stride, int rnd, x86_reg offset) \
335 LOAD_ROUNDER_MMX("%6") \
336 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
337 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
340 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
341 NORMALIZE_MMX("$6") \
342 TRANSFER_DO_PACK(OP) \
347 : "+r"(h), "+r" (src), "+r" (dst) \
348 : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd) \
349 NAMED_CONSTRAINTS_ADD(ff_pw_53,ff_pw_18,ff_pw_3) \
354 /** 1/4 shift bicubic interpolation */
355 MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_PUT, put_)
356 MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_AVG, avg_)
357 MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )")
358 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT, put_)
359 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG, avg_)
361 /** 3/4 shift bicubic interpolation */
362 MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_PUT, put_)
363 MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_AVG, avg_)
364 MSPEL_FILTER13_VER_16B(shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )")
365 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT, put_)
366 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG, avg_)
368 typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift);
369 typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd);
370 typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, x86_reg stride, int rnd, x86_reg offset);
373 * Interpolate fractional pel values by applying proper vertical then
376 * @param dst Destination buffer for interpolated pels.
377 * @param src Source buffer.
378 * @param stride Stride for both src and dst buffers.
379 * @param hmode Horizontal filter (expressed in quarter pixels shift).
380 * @param hmode Vertical filter.
381 * @param rnd Rounding bias.
383 #define VC1_MSPEL_MC(OP)\
384 static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
385 int hmode, int vmode, int rnd)\
387 static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\
388 { NULL, vc1_put_ver_16b_shift1_mmx, ff_vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\
389 static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\
390 { NULL, OP ## vc1_hor_16b_shift1_mmx, OP ## vc1_hor_16b_shift2_mmx, OP ## vc1_hor_16b_shift3_mmx };\
391 static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\
392 { NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\
395 "pxor %%mm0, %%mm0 \n\t"\
399 if (vmode) { /* Vertical filter to apply */\
400 if (hmode) { /* Horizontal filter to apply, output to tmp */\
401 static const int shift_value[] = { 0, 5, 1, 5 };\
402 int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
404 LOCAL_ALIGNED(16, int16_t, tmp, [12*8]);\
406 r = (1<<(shift-1)) + rnd-1;\
407 vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
409 vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\
412 else { /* No horizontal filter, output 8 lines to dst */\
413 vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\
418 /* Horizontal mode with no vertical mode */\
419 vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\
421 static void OP ## vc1_mspel_mc_16(uint8_t *dst, const uint8_t *src, \
422 int stride, int hmode, int vmode, int rnd)\
424 OP ## vc1_mspel_mc(dst + 0, src + 0, stride, hmode, vmode, rnd); \
425 OP ## vc1_mspel_mc(dst + 8, src + 8, stride, hmode, vmode, rnd); \
426 dst += 8*stride; src += 8*stride; \
427 OP ## vc1_mspel_mc(dst + 0, src + 0, stride, hmode, vmode, rnd); \
428 OP ## vc1_mspel_mc(dst + 8, src + 8, stride, hmode, vmode, rnd); \
434 /** Macro to ease bicubic filter interpolation functions declarations */
435 #define DECLARE_FUNCTION(a, b) \
436 static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, \
437 const uint8_t *src, \
441 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
443 static void avg_vc1_mspel_mc ## a ## b ## _mmxext(uint8_t *dst, \
444 const uint8_t *src, \
448 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
450 static void put_vc1_mspel_mc ## a ## b ## _16_mmx(uint8_t *dst, \
451 const uint8_t *src, \
455 put_vc1_mspel_mc_16(dst, src, stride, a, b, rnd); \
457 static void avg_vc1_mspel_mc ## a ## b ## _16_mmxext(uint8_t *dst, \
462 avg_vc1_mspel_mc_16(dst, src, stride, a, b, rnd); \
465 DECLARE_FUNCTION(0, 1)
466 DECLARE_FUNCTION(0, 2)
467 DECLARE_FUNCTION(0, 3)
469 DECLARE_FUNCTION(1, 0)
470 DECLARE_FUNCTION(1, 1)
471 DECLARE_FUNCTION(1, 2)
472 DECLARE_FUNCTION(1, 3)
474 DECLARE_FUNCTION(2, 0)
475 DECLARE_FUNCTION(2, 1)
476 DECLARE_FUNCTION(2, 2)
477 DECLARE_FUNCTION(2, 3)
479 DECLARE_FUNCTION(3, 0)
480 DECLARE_FUNCTION(3, 1)
481 DECLARE_FUNCTION(3, 2)
482 DECLARE_FUNCTION(3, 3)
484 static void vc1_inv_trans_4x4_dc_mmxext(uint8_t *dest, int linesize,
488 dc = (17 * dc + 4) >> 3;
489 dc = (17 * dc + 64) >> 7;
491 "movd %0, %%mm0 \n\t"
492 "pshufw $0, %%mm0, %%mm0 \n\t"
493 "pxor %%mm1, %%mm1 \n\t"
494 "psubw %%mm0, %%mm1 \n\t"
495 "packuswb %%mm0, %%mm0 \n\t"
496 "packuswb %%mm1, %%mm1 \n\t"
500 "movd %0, %%mm2 \n\t"
501 "movd %1, %%mm3 \n\t"
502 "movd %2, %%mm4 \n\t"
503 "movd %3, %%mm5 \n\t"
504 "paddusb %%mm0, %%mm2 \n\t"
505 "paddusb %%mm0, %%mm3 \n\t"
506 "paddusb %%mm0, %%mm4 \n\t"
507 "paddusb %%mm0, %%mm5 \n\t"
508 "psubusb %%mm1, %%mm2 \n\t"
509 "psubusb %%mm1, %%mm3 \n\t"
510 "psubusb %%mm1, %%mm4 \n\t"
511 "psubusb %%mm1, %%mm5 \n\t"
512 "movd %%mm2, %0 \n\t"
513 "movd %%mm3, %1 \n\t"
514 "movd %%mm4, %2 \n\t"
515 "movd %%mm5, %3 \n\t"
516 :"+m"(*(uint32_t*)(dest+0*linesize)),
517 "+m"(*(uint32_t*)(dest+1*linesize)),
518 "+m"(*(uint32_t*)(dest+2*linesize)),
519 "+m"(*(uint32_t*)(dest+3*linesize))
523 static void vc1_inv_trans_4x8_dc_mmxext(uint8_t *dest, int linesize,
527 dc = (17 * dc + 4) >> 3;
528 dc = (12 * dc + 64) >> 7;
530 "movd %0, %%mm0 \n\t"
531 "pshufw $0, %%mm0, %%mm0 \n\t"
532 "pxor %%mm1, %%mm1 \n\t"
533 "psubw %%mm0, %%mm1 \n\t"
534 "packuswb %%mm0, %%mm0 \n\t"
535 "packuswb %%mm1, %%mm1 \n\t"
539 "movd %0, %%mm2 \n\t"
540 "movd %1, %%mm3 \n\t"
541 "movd %2, %%mm4 \n\t"
542 "movd %3, %%mm5 \n\t"
543 "paddusb %%mm0, %%mm2 \n\t"
544 "paddusb %%mm0, %%mm3 \n\t"
545 "paddusb %%mm0, %%mm4 \n\t"
546 "paddusb %%mm0, %%mm5 \n\t"
547 "psubusb %%mm1, %%mm2 \n\t"
548 "psubusb %%mm1, %%mm3 \n\t"
549 "psubusb %%mm1, %%mm4 \n\t"
550 "psubusb %%mm1, %%mm5 \n\t"
551 "movd %%mm2, %0 \n\t"
552 "movd %%mm3, %1 \n\t"
553 "movd %%mm4, %2 \n\t"
554 "movd %%mm5, %3 \n\t"
555 :"+m"(*(uint32_t*)(dest+0*linesize)),
556 "+m"(*(uint32_t*)(dest+1*linesize)),
557 "+m"(*(uint32_t*)(dest+2*linesize)),
558 "+m"(*(uint32_t*)(dest+3*linesize))
562 "movd %0, %%mm2 \n\t"
563 "movd %1, %%mm3 \n\t"
564 "movd %2, %%mm4 \n\t"
565 "movd %3, %%mm5 \n\t"
566 "paddusb %%mm0, %%mm2 \n\t"
567 "paddusb %%mm0, %%mm3 \n\t"
568 "paddusb %%mm0, %%mm4 \n\t"
569 "paddusb %%mm0, %%mm5 \n\t"
570 "psubusb %%mm1, %%mm2 \n\t"
571 "psubusb %%mm1, %%mm3 \n\t"
572 "psubusb %%mm1, %%mm4 \n\t"
573 "psubusb %%mm1, %%mm5 \n\t"
574 "movd %%mm2, %0 \n\t"
575 "movd %%mm3, %1 \n\t"
576 "movd %%mm4, %2 \n\t"
577 "movd %%mm5, %3 \n\t"
578 :"+m"(*(uint32_t*)(dest+0*linesize)),
579 "+m"(*(uint32_t*)(dest+1*linesize)),
580 "+m"(*(uint32_t*)(dest+2*linesize)),
581 "+m"(*(uint32_t*)(dest+3*linesize))
585 static void vc1_inv_trans_8x4_dc_mmxext(uint8_t *dest, int linesize,
589 dc = ( 3 * dc + 1) >> 1;
590 dc = (17 * dc + 64) >> 7;
592 "movd %0, %%mm0 \n\t"
593 "pshufw $0, %%mm0, %%mm0 \n\t"
594 "pxor %%mm1, %%mm1 \n\t"
595 "psubw %%mm0, %%mm1 \n\t"
596 "packuswb %%mm0, %%mm0 \n\t"
597 "packuswb %%mm1, %%mm1 \n\t"
601 "movq %0, %%mm2 \n\t"
602 "movq %1, %%mm3 \n\t"
603 "movq %2, %%mm4 \n\t"
604 "movq %3, %%mm5 \n\t"
605 "paddusb %%mm0, %%mm2 \n\t"
606 "paddusb %%mm0, %%mm3 \n\t"
607 "paddusb %%mm0, %%mm4 \n\t"
608 "paddusb %%mm0, %%mm5 \n\t"
609 "psubusb %%mm1, %%mm2 \n\t"
610 "psubusb %%mm1, %%mm3 \n\t"
611 "psubusb %%mm1, %%mm4 \n\t"
612 "psubusb %%mm1, %%mm5 \n\t"
613 "movq %%mm2, %0 \n\t"
614 "movq %%mm3, %1 \n\t"
615 "movq %%mm4, %2 \n\t"
616 "movq %%mm5, %3 \n\t"
617 :"+m"(*(uint32_t*)(dest+0*linesize)),
618 "+m"(*(uint32_t*)(dest+1*linesize)),
619 "+m"(*(uint32_t*)(dest+2*linesize)),
620 "+m"(*(uint32_t*)(dest+3*linesize))
624 static void vc1_inv_trans_8x8_dc_mmxext(uint8_t *dest, int linesize,
628 dc = (3 * dc + 1) >> 1;
629 dc = (3 * dc + 16) >> 5;
631 "movd %0, %%mm0 \n\t"
632 "pshufw $0, %%mm0, %%mm0 \n\t"
633 "pxor %%mm1, %%mm1 \n\t"
634 "psubw %%mm0, %%mm1 \n\t"
635 "packuswb %%mm0, %%mm0 \n\t"
636 "packuswb %%mm1, %%mm1 \n\t"
640 "movq %0, %%mm2 \n\t"
641 "movq %1, %%mm3 \n\t"
642 "movq %2, %%mm4 \n\t"
643 "movq %3, %%mm5 \n\t"
644 "paddusb %%mm0, %%mm2 \n\t"
645 "paddusb %%mm0, %%mm3 \n\t"
646 "paddusb %%mm0, %%mm4 \n\t"
647 "paddusb %%mm0, %%mm5 \n\t"
648 "psubusb %%mm1, %%mm2 \n\t"
649 "psubusb %%mm1, %%mm3 \n\t"
650 "psubusb %%mm1, %%mm4 \n\t"
651 "psubusb %%mm1, %%mm5 \n\t"
652 "movq %%mm2, %0 \n\t"
653 "movq %%mm3, %1 \n\t"
654 "movq %%mm4, %2 \n\t"
655 "movq %%mm5, %3 \n\t"
656 :"+m"(*(uint32_t*)(dest+0*linesize)),
657 "+m"(*(uint32_t*)(dest+1*linesize)),
658 "+m"(*(uint32_t*)(dest+2*linesize)),
659 "+m"(*(uint32_t*)(dest+3*linesize))
663 "movq %0, %%mm2 \n\t"
664 "movq %1, %%mm3 \n\t"
665 "movq %2, %%mm4 \n\t"
666 "movq %3, %%mm5 \n\t"
667 "paddusb %%mm0, %%mm2 \n\t"
668 "paddusb %%mm0, %%mm3 \n\t"
669 "paddusb %%mm0, %%mm4 \n\t"
670 "paddusb %%mm0, %%mm5 \n\t"
671 "psubusb %%mm1, %%mm2 \n\t"
672 "psubusb %%mm1, %%mm3 \n\t"
673 "psubusb %%mm1, %%mm4 \n\t"
674 "psubusb %%mm1, %%mm5 \n\t"
675 "movq %%mm2, %0 \n\t"
676 "movq %%mm3, %1 \n\t"
677 "movq %%mm4, %2 \n\t"
678 "movq %%mm5, %3 \n\t"
679 :"+m"(*(uint32_t*)(dest+0*linesize)),
680 "+m"(*(uint32_t*)(dest+1*linesize)),
681 "+m"(*(uint32_t*)(dest+2*linesize)),
682 "+m"(*(uint32_t*)(dest+3*linesize))
686 #define FN_ASSIGN(OP, X, Y, INSN) \
687 dsp->OP##vc1_mspel_pixels_tab[1][X+4*Y] = OP##vc1_mspel_mc##X##Y##INSN; \
688 dsp->OP##vc1_mspel_pixels_tab[0][X+4*Y] = OP##vc1_mspel_mc##X##Y##_16##INSN
690 av_cold void ff_vc1dsp_init_mmx(VC1DSPContext *dsp)
692 FN_ASSIGN(put_, 0, 1, _mmx);
693 FN_ASSIGN(put_, 0, 2, _mmx);
694 FN_ASSIGN(put_, 0, 3, _mmx);
696 FN_ASSIGN(put_, 1, 0, _mmx);
697 FN_ASSIGN(put_, 1, 1, _mmx);
698 FN_ASSIGN(put_, 1, 2, _mmx);
699 FN_ASSIGN(put_, 1, 3, _mmx);
701 FN_ASSIGN(put_, 2, 0, _mmx);
702 FN_ASSIGN(put_, 2, 1, _mmx);
703 FN_ASSIGN(put_, 2, 2, _mmx);
704 FN_ASSIGN(put_, 2, 3, _mmx);
706 FN_ASSIGN(put_, 3, 0, _mmx);
707 FN_ASSIGN(put_, 3, 1, _mmx);
708 FN_ASSIGN(put_, 3, 2, _mmx);
709 FN_ASSIGN(put_, 3, 3, _mmx);
712 av_cold void ff_vc1dsp_init_mmxext(VC1DSPContext *dsp)
714 FN_ASSIGN(avg_, 0, 1, _mmxext);
715 FN_ASSIGN(avg_, 0, 2, _mmxext);
716 FN_ASSIGN(avg_, 0, 3, _mmxext);
718 FN_ASSIGN(avg_, 1, 0, _mmxext);
719 FN_ASSIGN(avg_, 1, 1, _mmxext);
720 FN_ASSIGN(avg_, 1, 2, _mmxext);
721 FN_ASSIGN(avg_, 1, 3, _mmxext);
723 FN_ASSIGN(avg_, 2, 0, _mmxext);
724 FN_ASSIGN(avg_, 2, 1, _mmxext);
725 FN_ASSIGN(avg_, 2, 2, _mmxext);
726 FN_ASSIGN(avg_, 2, 3, _mmxext);
728 FN_ASSIGN(avg_, 3, 0, _mmxext);
729 FN_ASSIGN(avg_, 3, 1, _mmxext);
730 FN_ASSIGN(avg_, 3, 2, _mmxext);
731 FN_ASSIGN(avg_, 3, 3, _mmxext);
733 dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_mmxext;
734 dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_mmxext;
735 dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_mmxext;
736 dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_mmxext;
738 #endif /* HAVE_6REGS && HAVE_INLINE_ASM && HAVE_MMX_EXTERNAL */