2 * VC-1 and WMV3 - DSP functions MMX-optimized
3 * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
5 * Permission is hereby granted, free of charge, to any person
6 * obtaining a copy of this software and associated documentation
7 * files (the "Software"), to deal in the Software without
8 * restriction, including without limitation the rights to use,
9 * copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "libavutil/cpu.h"
28 #include "libavutil/internal.h"
29 #include "libavutil/mem.h"
30 #include "libavutil/x86/asm.h"
31 #include "libavcodec/dsputil.h"
32 #include "dsputil_mmx.h"
33 #include "libavcodec/vc1dsp.h"
38 #define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t"
40 /** Add rounder from mm7 to mm3 and pack result at destination */
41 #define NORMALIZE_MMX(SHIFT) \
42 "paddw %%mm7, %%mm3 \n\t" /* +bias-r */ \
43 "paddw %%mm7, %%mm4 \n\t" /* +bias-r */ \
44 "psraw "SHIFT", %%mm3 \n\t" \
45 "psraw "SHIFT", %%mm4 \n\t"
47 #define TRANSFER_DO_PACK(OP) \
48 "packuswb %%mm4, %%mm3 \n\t" \
50 "movq %%mm3, (%2) \n\t"
52 #define TRANSFER_DONT_PACK(OP) \
55 "movq %%mm3, 0(%2) \n\t" \
56 "movq %%mm4, 8(%2) \n\t"
58 /** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
59 #define DO_UNPACK(reg) "punpcklbw %%mm0, " reg "\n\t"
60 #define DONT_UNPACK(reg)
62 /** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
63 #define LOAD_ROUNDER_MMX(ROUND) \
64 "movd "ROUND", %%mm7 \n\t" \
65 "punpcklwd %%mm7, %%mm7 \n\t" \
66 "punpckldq %%mm7, %%mm7 \n\t"
68 #define SHIFT2_LINE(OFF, R0,R1,R2,R3) \
69 "paddw %%mm"#R2", %%mm"#R1" \n\t" \
70 "movd (%0,%3), %%mm"#R0" \n\t" \
71 "pmullw %%mm6, %%mm"#R1" \n\t" \
72 "punpcklbw %%mm0, %%mm"#R0" \n\t" \
73 "movd (%0,%2), %%mm"#R3" \n\t" \
74 "psubw %%mm"#R0", %%mm"#R1" \n\t" \
75 "punpcklbw %%mm0, %%mm"#R3" \n\t" \
76 "paddw %%mm7, %%mm"#R1" \n\t" \
77 "psubw %%mm"#R3", %%mm"#R1" \n\t" \
78 "psraw %4, %%mm"#R1" \n\t" \
79 "movq %%mm"#R1", "#OFF"(%1) \n\t" \
82 /** Sacrifying mm6 allows to pipeline loads from src */
83 static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
84 const uint8_t *src, x86_reg stride,
85 int rnd, int64_t shift)
88 "mov $3, %%"REG_c" \n\t"
89 LOAD_ROUNDER_MMX("%5")
90 "movq "MANGLE(ff_pw_9)", %%mm6 \n\t"
92 "movd (%0), %%mm2 \n\t"
94 "movd (%0), %%mm3 \n\t"
95 "punpcklbw %%mm0, %%mm2 \n\t"
96 "punpcklbw %%mm0, %%mm3 \n\t"
97 SHIFT2_LINE( 0, 1, 2, 3, 4)
98 SHIFT2_LINE( 24, 2, 3, 4, 1)
99 SHIFT2_LINE( 48, 3, 4, 1, 2)
100 SHIFT2_LINE( 72, 4, 1, 2, 3)
101 SHIFT2_LINE( 96, 1, 2, 3, 4)
102 SHIFT2_LINE(120, 2, 3, 4, 1)
103 SHIFT2_LINE(144, 3, 4, 1, 2)
104 SHIFT2_LINE(168, 4, 1, 2, 3)
109 : "+r"(src), "+r"(dst)
110 : "r"(stride), "r"(-2*stride),
111 "m"(shift), "m"(rnd), "r"(9*stride-4)
117 * Data is already unpacked, so some operations can directly be made from
120 #define VC1_HOR_16b_SHIFT2(OP, OPNAME)\
121 static void OPNAME ## vc1_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,\
122 const int16_t *src, int rnd)\
127 rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */\
129 LOAD_ROUNDER_MMX("%4")\
130 "movq "MANGLE(ff_pw_128)", %%mm6\n\t"\
131 "movq "MANGLE(ff_pw_9)", %%mm5 \n\t"\
133 "movq 2*0+0(%1), %%mm1 \n\t"\
134 "movq 2*0+8(%1), %%mm2 \n\t"\
135 "movq 2*1+0(%1), %%mm3 \n\t"\
136 "movq 2*1+8(%1), %%mm4 \n\t"\
137 "paddw 2*3+0(%1), %%mm1 \n\t"\
138 "paddw 2*3+8(%1), %%mm2 \n\t"\
139 "paddw 2*2+0(%1), %%mm3 \n\t"\
140 "paddw 2*2+8(%1), %%mm4 \n\t"\
141 "pmullw %%mm5, %%mm3 \n\t"\
142 "pmullw %%mm5, %%mm4 \n\t"\
143 "psubw %%mm1, %%mm3 \n\t"\
144 "psubw %%mm2, %%mm4 \n\t"\
147 "paddw %%mm6, %%mm3 \n\t"\
148 "paddw %%mm6, %%mm4 \n\t"\
149 TRANSFER_DO_PACK(OP)\
154 : "+r"(h), "+r" (src), "+r" (dst)\
155 : "r"(stride), "m"(rnd)\
160 VC1_HOR_16b_SHIFT2(OP_PUT, put_)
161 VC1_HOR_16b_SHIFT2(OP_AVG, avg_)
165 * Purely vertical or horizontal 1/2 shift interpolation.
166 * Sacrify mm6 for *9 factor.
168 #define VC1_SHIFT2(OP, OPNAME)\
169 static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
170 x86_reg stride, int rnd, x86_reg offset)\
174 "mov $8, %%"REG_c" \n\t"\
175 LOAD_ROUNDER_MMX("%5")\
176 "movq "MANGLE(ff_pw_9)", %%mm6\n\t"\
178 "movd 0(%0 ), %%mm3 \n\t"\
179 "movd 4(%0 ), %%mm4 \n\t"\
180 "movd 0(%0,%2), %%mm1 \n\t"\
181 "movd 4(%0,%2), %%mm2 \n\t"\
183 "punpcklbw %%mm0, %%mm3 \n\t"\
184 "punpcklbw %%mm0, %%mm4 \n\t"\
185 "punpcklbw %%mm0, %%mm1 \n\t"\
186 "punpcklbw %%mm0, %%mm2 \n\t"\
187 "paddw %%mm1, %%mm3 \n\t"\
188 "paddw %%mm2, %%mm4 \n\t"\
189 "movd 0(%0,%3), %%mm1 \n\t"\
190 "movd 4(%0,%3), %%mm2 \n\t"\
191 "pmullw %%mm6, %%mm3 \n\t" /* 0,9,9,0*/\
192 "pmullw %%mm6, %%mm4 \n\t" /* 0,9,9,0*/\
193 "punpcklbw %%mm0, %%mm1 \n\t"\
194 "punpcklbw %%mm0, %%mm2 \n\t"\
195 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,0*/\
196 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,0*/\
197 "movd 0(%0,%2), %%mm1 \n\t"\
198 "movd 4(%0,%2), %%mm2 \n\t"\
199 "punpcklbw %%mm0, %%mm1 \n\t"\
200 "punpcklbw %%mm0, %%mm2 \n\t"\
201 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,-1*/\
202 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,-1*/\
204 "packuswb %%mm4, %%mm3 \n\t"\
206 "movq %%mm3, (%1) \n\t"\
209 "dec %%"REG_c" \n\t"\
211 : "+r"(src), "+r"(dst)\
212 : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\
214 : "%"REG_c, "memory"\
218 VC1_SHIFT2(OP_PUT, put_)
219 VC1_SHIFT2(OP_AVG, avg_)
222 * Core of the 1/4 and 3/4 shift bicubic interpolation.
224 * @param UNPACK Macro unpacking arguments from 8 to 16bits (can be empty).
225 * @param MOVQ "movd 1" or "movq 2", if data read is already unpacked.
226 * @param A1 Address of 1st tap (beware of unpacked/packed).
227 * @param A2 Address of 2nd tap
228 * @param A3 Address of 3rd tap
229 * @param A4 Address of 4th tap
231 #define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4) \
232 MOVQ "*0+"A1", %%mm1 \n\t" \
233 MOVQ "*4+"A1", %%mm2 \n\t" \
236 "pmullw "MANGLE(ff_pw_3)", %%mm1\n\t" \
237 "pmullw "MANGLE(ff_pw_3)", %%mm2\n\t" \
238 MOVQ "*0+"A2", %%mm3 \n\t" \
239 MOVQ "*4+"A2", %%mm4 \n\t" \
242 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
243 "pmullw %%mm6, %%mm4 \n\t" /* *18 */ \
244 "psubw %%mm1, %%mm3 \n\t" /* 18,-3 */ \
245 "psubw %%mm2, %%mm4 \n\t" /* 18,-3 */ \
246 MOVQ "*0+"A4", %%mm1 \n\t" \
247 MOVQ "*4+"A4", %%mm2 \n\t" \
250 "psllw $2, %%mm1 \n\t" /* 4* */ \
251 "psllw $2, %%mm2 \n\t" /* 4* */ \
252 "psubw %%mm1, %%mm3 \n\t" /* -4,18,-3 */ \
253 "psubw %%mm2, %%mm4 \n\t" /* -4,18,-3 */ \
254 MOVQ "*0+"A3", %%mm1 \n\t" \
255 MOVQ "*4+"A3", %%mm2 \n\t" \
258 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
259 "pmullw %%mm5, %%mm2 \n\t" /* *53 */ \
260 "paddw %%mm1, %%mm3 \n\t" /* 4,53,18,-3 */ \
261 "paddw %%mm2, %%mm4 \n\t" /* 4,53,18,-3 */
264 * Macro to build the vertical 16bits version of vc1_put_shift[13].
265 * Here, offset=src_stride. Parameters passed A1 to A4 must use
266 * %3 (src_stride) and %4 (3*src_stride).
268 * @param NAME Either 1 or 3
269 * @see MSPEL_FILTER13_CORE for information on A1->A4
271 #define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4) \
273 vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src, \
274 x86_reg src_stride, \
275 int rnd, int64_t shift) \
280 LOAD_ROUNDER_MMX("%5") \
281 "movq "MANGLE(ff_pw_53)", %%mm5\n\t" \
282 "movq "MANGLE(ff_pw_18)", %%mm6\n\t" \
285 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
286 NORMALIZE_MMX("%6") \
287 TRANSFER_DONT_PACK(OP_PUT) \
288 /* Last 3 (in fact 4) bytes on the line */ \
289 "movd 8+"A1", %%mm1 \n\t" \
291 "movq %%mm1, %%mm3 \n\t" \
292 "paddw %%mm1, %%mm1 \n\t" \
293 "paddw %%mm3, %%mm1 \n\t" /* 3* */ \
294 "movd 8+"A2", %%mm3 \n\t" \
296 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
297 "psubw %%mm1, %%mm3 \n\t" /*18,-3 */ \
298 "movd 8+"A3", %%mm1 \n\t" \
300 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
301 "paddw %%mm1, %%mm3 \n\t" /*53,18,-3 */ \
302 "movd 8+"A4", %%mm1 \n\t" \
304 "psllw $2, %%mm1 \n\t" /* 4* */ \
305 "psubw %%mm1, %%mm3 \n\t" \
306 "paddw %%mm7, %%mm3 \n\t" \
307 "psraw %6, %%mm3 \n\t" \
308 "movq %%mm3, 16(%2) \n\t" \
313 : "+r"(h), "+r" (src), "+r" (dst) \
314 : "r"(src_stride), "r"(3*src_stride), \
315 "m"(rnd), "m"(shift) \
321 * Macro to build the horizontal 16bits version of vc1_put_shift[13].
322 * Here, offset=16bits, so parameters passed A1 to A4 should be simple.
324 * @param NAME Either 1 or 3
325 * @see MSPEL_FILTER13_CORE for information on A1->A4
327 #define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME) \
329 OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \
330 const int16_t *src, int rnd) \
334 rnd -= (-4+58+13-3)*256; /* Add -256 bias */ \
336 LOAD_ROUNDER_MMX("%4") \
337 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
338 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
341 MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4) \
342 NORMALIZE_MMX("$7") \
344 "paddw "MANGLE(ff_pw_128)", %%mm3 \n\t" \
345 "paddw "MANGLE(ff_pw_128)", %%mm4 \n\t" \
346 TRANSFER_DO_PACK(OP) \
351 : "+r"(h), "+r" (src), "+r" (dst) \
352 : "r"(stride), "m"(rnd) \
358 * Macro to build the 8bits, any direction, version of vc1_put_shift[13].
359 * Here, offset=src_stride. Parameters passed A1 to A4 must use
360 * %3 (offset) and %4 (3*offset).
362 * @param NAME Either 1 or 3
363 * @see MSPEL_FILTER13_CORE for information on A1->A4
365 #define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME) \
367 OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \
368 x86_reg stride, int rnd, x86_reg offset) \
374 LOAD_ROUNDER_MMX("%6") \
375 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
376 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
379 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
380 NORMALIZE_MMX("$6") \
381 TRANSFER_DO_PACK(OP) \
386 : "+r"(h), "+r" (src), "+r" (dst) \
387 : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd) \
392 /** 1/4 shift bicubic interpolation */
393 MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_PUT, put_)
394 MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_AVG, avg_)
395 MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )")
396 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT, put_)
397 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG, avg_)
399 /** 3/4 shift bicubic interpolation */
400 MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_PUT, put_)
401 MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_AVG, avg_)
402 MSPEL_FILTER13_VER_16B(shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )")
403 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT, put_)
404 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG, avg_)
406 typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift);
407 typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd);
408 typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, x86_reg stride, int rnd, x86_reg offset);
411 * Interpolate fractional pel values by applying proper vertical then
414 * @param dst Destination buffer for interpolated pels.
415 * @param src Source buffer.
416 * @param stride Stride for both src and dst buffers.
417 * @param hmode Horizontal filter (expressed in quarter pixels shift).
418 * @param hmode Vertical filter.
419 * @param rnd Rounding bias.
421 #define VC1_MSPEL_MC(OP)\
422 static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
423 int hmode, int vmode, int rnd)\
425 static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\
426 { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\
427 static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\
428 { NULL, OP ## vc1_hor_16b_shift1_mmx, OP ## vc1_hor_16b_shift2_mmx, OP ## vc1_hor_16b_shift3_mmx };\
429 static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\
430 { NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\
433 "pxor %%mm0, %%mm0 \n\t"\
437 if (vmode) { /* Vertical filter to apply */\
438 if (hmode) { /* Horizontal filter to apply, output to tmp */\
439 static const int shift_value[] = { 0, 5, 1, 5 };\
440 int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
442 DECLARE_ALIGNED(16, int16_t, tmp)[12*8];\
444 r = (1<<(shift-1)) + rnd-1;\
445 vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
447 vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\
450 else { /* No horizontal filter, output 8 lines to dst */\
451 vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\
456 /* Horizontal mode with no vertical mode */\
457 vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\
463 /** Macro to ease bicubic filter interpolation functions declarations */
464 #define DECLARE_FUNCTION(a, b) \
465 static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
466 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
468 static void avg_vc1_mspel_mc ## a ## b ## _mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
469 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
472 DECLARE_FUNCTION(0, 1)
473 DECLARE_FUNCTION(0, 2)
474 DECLARE_FUNCTION(0, 3)
476 DECLARE_FUNCTION(1, 0)
477 DECLARE_FUNCTION(1, 1)
478 DECLARE_FUNCTION(1, 2)
479 DECLARE_FUNCTION(1, 3)
481 DECLARE_FUNCTION(2, 0)
482 DECLARE_FUNCTION(2, 1)
483 DECLARE_FUNCTION(2, 2)
484 DECLARE_FUNCTION(2, 3)
486 DECLARE_FUNCTION(3, 0)
487 DECLARE_FUNCTION(3, 1)
488 DECLARE_FUNCTION(3, 2)
489 DECLARE_FUNCTION(3, 3)
491 static void vc1_inv_trans_4x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
494 dc = (17 * dc + 4) >> 3;
495 dc = (17 * dc + 64) >> 7;
497 "movd %0, %%mm0 \n\t"
498 "pshufw $0, %%mm0, %%mm0 \n\t"
499 "pxor %%mm1, %%mm1 \n\t"
500 "psubw %%mm0, %%mm1 \n\t"
501 "packuswb %%mm0, %%mm0 \n\t"
502 "packuswb %%mm1, %%mm1 \n\t"
506 "movd %0, %%mm2 \n\t"
507 "movd %1, %%mm3 \n\t"
508 "movd %2, %%mm4 \n\t"
509 "movd %3, %%mm5 \n\t"
510 "paddusb %%mm0, %%mm2 \n\t"
511 "paddusb %%mm0, %%mm3 \n\t"
512 "paddusb %%mm0, %%mm4 \n\t"
513 "paddusb %%mm0, %%mm5 \n\t"
514 "psubusb %%mm1, %%mm2 \n\t"
515 "psubusb %%mm1, %%mm3 \n\t"
516 "psubusb %%mm1, %%mm4 \n\t"
517 "psubusb %%mm1, %%mm5 \n\t"
518 "movd %%mm2, %0 \n\t"
519 "movd %%mm3, %1 \n\t"
520 "movd %%mm4, %2 \n\t"
521 "movd %%mm5, %3 \n\t"
522 :"+m"(*(uint32_t*)(dest+0*linesize)),
523 "+m"(*(uint32_t*)(dest+1*linesize)),
524 "+m"(*(uint32_t*)(dest+2*linesize)),
525 "+m"(*(uint32_t*)(dest+3*linesize))
529 static void vc1_inv_trans_4x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
532 dc = (17 * dc + 4) >> 3;
533 dc = (12 * dc + 64) >> 7;
535 "movd %0, %%mm0 \n\t"
536 "pshufw $0, %%mm0, %%mm0 \n\t"
537 "pxor %%mm1, %%mm1 \n\t"
538 "psubw %%mm0, %%mm1 \n\t"
539 "packuswb %%mm0, %%mm0 \n\t"
540 "packuswb %%mm1, %%mm1 \n\t"
544 "movd %0, %%mm2 \n\t"
545 "movd %1, %%mm3 \n\t"
546 "movd %2, %%mm4 \n\t"
547 "movd %3, %%mm5 \n\t"
548 "paddusb %%mm0, %%mm2 \n\t"
549 "paddusb %%mm0, %%mm3 \n\t"
550 "paddusb %%mm0, %%mm4 \n\t"
551 "paddusb %%mm0, %%mm5 \n\t"
552 "psubusb %%mm1, %%mm2 \n\t"
553 "psubusb %%mm1, %%mm3 \n\t"
554 "psubusb %%mm1, %%mm4 \n\t"
555 "psubusb %%mm1, %%mm5 \n\t"
556 "movd %%mm2, %0 \n\t"
557 "movd %%mm3, %1 \n\t"
558 "movd %%mm4, %2 \n\t"
559 "movd %%mm5, %3 \n\t"
560 :"+m"(*(uint32_t*)(dest+0*linesize)),
561 "+m"(*(uint32_t*)(dest+1*linesize)),
562 "+m"(*(uint32_t*)(dest+2*linesize)),
563 "+m"(*(uint32_t*)(dest+3*linesize))
567 "movd %0, %%mm2 \n\t"
568 "movd %1, %%mm3 \n\t"
569 "movd %2, %%mm4 \n\t"
570 "movd %3, %%mm5 \n\t"
571 "paddusb %%mm0, %%mm2 \n\t"
572 "paddusb %%mm0, %%mm3 \n\t"
573 "paddusb %%mm0, %%mm4 \n\t"
574 "paddusb %%mm0, %%mm5 \n\t"
575 "psubusb %%mm1, %%mm2 \n\t"
576 "psubusb %%mm1, %%mm3 \n\t"
577 "psubusb %%mm1, %%mm4 \n\t"
578 "psubusb %%mm1, %%mm5 \n\t"
579 "movd %%mm2, %0 \n\t"
580 "movd %%mm3, %1 \n\t"
581 "movd %%mm4, %2 \n\t"
582 "movd %%mm5, %3 \n\t"
583 :"+m"(*(uint32_t*)(dest+0*linesize)),
584 "+m"(*(uint32_t*)(dest+1*linesize)),
585 "+m"(*(uint32_t*)(dest+2*linesize)),
586 "+m"(*(uint32_t*)(dest+3*linesize))
590 static void vc1_inv_trans_8x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
593 dc = ( 3 * dc + 1) >> 1;
594 dc = (17 * dc + 64) >> 7;
596 "movd %0, %%mm0 \n\t"
597 "pshufw $0, %%mm0, %%mm0 \n\t"
598 "pxor %%mm1, %%mm1 \n\t"
599 "psubw %%mm0, %%mm1 \n\t"
600 "packuswb %%mm0, %%mm0 \n\t"
601 "packuswb %%mm1, %%mm1 \n\t"
605 "movq %0, %%mm2 \n\t"
606 "movq %1, %%mm3 \n\t"
607 "movq %2, %%mm4 \n\t"
608 "movq %3, %%mm5 \n\t"
609 "paddusb %%mm0, %%mm2 \n\t"
610 "paddusb %%mm0, %%mm3 \n\t"
611 "paddusb %%mm0, %%mm4 \n\t"
612 "paddusb %%mm0, %%mm5 \n\t"
613 "psubusb %%mm1, %%mm2 \n\t"
614 "psubusb %%mm1, %%mm3 \n\t"
615 "psubusb %%mm1, %%mm4 \n\t"
616 "psubusb %%mm1, %%mm5 \n\t"
617 "movq %%mm2, %0 \n\t"
618 "movq %%mm3, %1 \n\t"
619 "movq %%mm4, %2 \n\t"
620 "movq %%mm5, %3 \n\t"
621 :"+m"(*(uint32_t*)(dest+0*linesize)),
622 "+m"(*(uint32_t*)(dest+1*linesize)),
623 "+m"(*(uint32_t*)(dest+2*linesize)),
624 "+m"(*(uint32_t*)(dest+3*linesize))
628 static void vc1_inv_trans_8x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
631 dc = (3 * dc + 1) >> 1;
632 dc = (3 * dc + 16) >> 5;
634 "movd %0, %%mm0 \n\t"
635 "pshufw $0, %%mm0, %%mm0 \n\t"
636 "pxor %%mm1, %%mm1 \n\t"
637 "psubw %%mm0, %%mm1 \n\t"
638 "packuswb %%mm0, %%mm0 \n\t"
639 "packuswb %%mm1, %%mm1 \n\t"
643 "movq %0, %%mm2 \n\t"
644 "movq %1, %%mm3 \n\t"
645 "movq %2, %%mm4 \n\t"
646 "movq %3, %%mm5 \n\t"
647 "paddusb %%mm0, %%mm2 \n\t"
648 "paddusb %%mm0, %%mm3 \n\t"
649 "paddusb %%mm0, %%mm4 \n\t"
650 "paddusb %%mm0, %%mm5 \n\t"
651 "psubusb %%mm1, %%mm2 \n\t"
652 "psubusb %%mm1, %%mm3 \n\t"
653 "psubusb %%mm1, %%mm4 \n\t"
654 "psubusb %%mm1, %%mm5 \n\t"
655 "movq %%mm2, %0 \n\t"
656 "movq %%mm3, %1 \n\t"
657 "movq %%mm4, %2 \n\t"
658 "movq %%mm5, %3 \n\t"
659 :"+m"(*(uint32_t*)(dest+0*linesize)),
660 "+m"(*(uint32_t*)(dest+1*linesize)),
661 "+m"(*(uint32_t*)(dest+2*linesize)),
662 "+m"(*(uint32_t*)(dest+3*linesize))
666 "movq %0, %%mm2 \n\t"
667 "movq %1, %%mm3 \n\t"
668 "movq %2, %%mm4 \n\t"
669 "movq %3, %%mm5 \n\t"
670 "paddusb %%mm0, %%mm2 \n\t"
671 "paddusb %%mm0, %%mm3 \n\t"
672 "paddusb %%mm0, %%mm4 \n\t"
673 "paddusb %%mm0, %%mm5 \n\t"
674 "psubusb %%mm1, %%mm2 \n\t"
675 "psubusb %%mm1, %%mm3 \n\t"
676 "psubusb %%mm1, %%mm4 \n\t"
677 "psubusb %%mm1, %%mm5 \n\t"
678 "movq %%mm2, %0 \n\t"
679 "movq %%mm3, %1 \n\t"
680 "movq %%mm4, %2 \n\t"
681 "movq %%mm5, %3 \n\t"
682 :"+m"(*(uint32_t*)(dest+0*linesize)),
683 "+m"(*(uint32_t*)(dest+1*linesize)),
684 "+m"(*(uint32_t*)(dest+2*linesize)),
685 "+m"(*(uint32_t*)(dest+3*linesize))
689 #endif /* HAVE_INLINE_ASM */
691 #define LOOP_FILTER(EXT) \
692 void ff_vc1_v_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \
693 void ff_vc1_h_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \
694 void ff_vc1_v_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \
695 void ff_vc1_h_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \
697 static void vc1_v_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \
699 ff_vc1_v_loop_filter8_ ## EXT(src, stride, pq); \
700 ff_vc1_v_loop_filter8_ ## EXT(src+8, stride, pq); \
703 static void vc1_h_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \
705 ff_vc1_h_loop_filter8_ ## EXT(src, stride, pq); \
706 ff_vc1_h_loop_filter8_ ## EXT(src+8*stride, stride, pq); \
714 void ff_vc1_h_loop_filter8_sse4(uint8_t *src, int stride, int pq);
716 static void vc1_h_loop_filter16_sse4(uint8_t *src, int stride, int pq)
718 ff_vc1_h_loop_filter8_sse4(src, stride, pq);
719 ff_vc1_h_loop_filter8_sse4(src+8*stride, stride, pq);
721 #endif /* HAVE_YASM */
723 void ff_put_vc1_chroma_mc8_mmx_nornd (uint8_t *dst, uint8_t *src,
724 int stride, int h, int x, int y);
725 void ff_avg_vc1_chroma_mc8_mmx2_nornd (uint8_t *dst, uint8_t *src,
726 int stride, int h, int x, int y);
727 void ff_avg_vc1_chroma_mc8_3dnow_nornd(uint8_t *dst, uint8_t *src,
728 int stride, int h, int x, int y);
729 void ff_put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src,
730 int stride, int h, int x, int y);
731 void ff_avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src,
732 int stride, int h, int x, int y);
734 void ff_vc1dsp_init_mmx(VC1DSPContext *dsp)
736 int mm_flags = av_get_cpu_flags();
739 if (mm_flags & AV_CPU_FLAG_MMX) {
740 dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_mmx;
741 dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx;
742 dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_mmx;
743 dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_mmx;
745 dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_mmx;
746 dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_mmx;
747 dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_mmx;
748 dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_mmx;
750 dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_mmx;
751 dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_mmx;
752 dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_mmx;
753 dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_mmx;
755 dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_mmx;
756 dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_mmx;
757 dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx;
758 dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx;
761 if (mm_flags & AV_CPU_FLAG_MMXEXT) {
762 dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_vc1_mspel_mc00_mmx2;
763 dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_mmx2;
764 dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_mmx2;
765 dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_mmx2;
767 dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_mmx2;
768 dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_mmx2;
769 dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_mmx2;
770 dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_mmx2;
772 dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_mmx2;
773 dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_mmx2;
774 dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_mmx2;
775 dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_mmx2;
777 dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_mmx2;
778 dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_mmx2;
779 dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_mmx2;
780 dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_mmx2;
782 dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_mmx2;
783 dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_mmx2;
784 dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_mmx2;
785 dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_mmx2;
787 #endif /* HAVE_INLINE_ASM */
789 #define ASSIGN_LF(EXT) \
790 dsp->vc1_v_loop_filter4 = ff_vc1_v_loop_filter4_ ## EXT; \
791 dsp->vc1_h_loop_filter4 = ff_vc1_h_loop_filter4_ ## EXT; \
792 dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_ ## EXT; \
793 dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_ ## EXT; \
794 dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_ ## EXT; \
795 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_ ## EXT
798 if (mm_flags & AV_CPU_FLAG_MMX) {
799 dsp->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_mmx_nornd;
802 if (mm_flags & AV_CPU_FLAG_MMXEXT) {
804 dsp->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_mmx2_nornd;
805 } else if (mm_flags & AV_CPU_FLAG_3DNOW) {
806 dsp->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_3dnow_nornd;
809 if (mm_flags & AV_CPU_FLAG_SSE2) {
810 dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_sse2;
811 dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse2;
812 dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_sse2;
813 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse2;
815 if (mm_flags & AV_CPU_FLAG_SSSE3) {
817 dsp->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_ssse3_nornd;
818 dsp->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_ssse3_nornd;
820 if (mm_flags & AV_CPU_FLAG_SSE4) {
821 dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse4;
822 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse4;
824 #endif /* HAVE_YASM */