2 * VC-1 and WMV3 - DSP functions MMX-optimized
3 * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
5 * Permission is hereby granted, free of charge, to any person
6 * obtaining a copy of this software and associated documentation
7 * files (the "Software"), to deal in the Software without
8 * restriction, including without limitation the rights to use,
9 * copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "libavutil/cpu.h"
28 #include "libavutil/x86_cpu.h"
29 #include "libavcodec/dsputil.h"
30 #include "dsputil_mmx.h"
33 #define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t"
35 /** Add rounder from mm7 to mm3 and pack result at destination */
36 #define NORMALIZE_MMX(SHIFT) \
37 "paddw %%mm7, %%mm3 \n\t" /* +bias-r */ \
38 "paddw %%mm7, %%mm4 \n\t" /* +bias-r */ \
39 "psraw "SHIFT", %%mm3 \n\t" \
40 "psraw "SHIFT", %%mm4 \n\t"
42 #define TRANSFER_DO_PACK(OP) \
43 "packuswb %%mm4, %%mm3 \n\t" \
45 "movq %%mm3, (%2) \n\t"
47 #define TRANSFER_DONT_PACK(OP) \
50 "movq %%mm3, 0(%2) \n\t" \
51 "movq %%mm4, 8(%2) \n\t"
53 /** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
54 #define DO_UNPACK(reg) "punpcklbw %%mm0, " reg "\n\t"
55 #define DONT_UNPACK(reg)
57 /** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
58 #define LOAD_ROUNDER_MMX(ROUND) \
59 "movd "ROUND", %%mm7 \n\t" \
60 "punpcklwd %%mm7, %%mm7 \n\t" \
61 "punpckldq %%mm7, %%mm7 \n\t"
63 #define SHIFT2_LINE(OFF, R0,R1,R2,R3) \
64 "paddw %%mm"#R2", %%mm"#R1" \n\t" \
65 "movd (%0,%3), %%mm"#R0" \n\t" \
66 "pmullw %%mm6, %%mm"#R1" \n\t" \
67 "punpcklbw %%mm0, %%mm"#R0" \n\t" \
68 "movd (%0,%2), %%mm"#R3" \n\t" \
69 "psubw %%mm"#R0", %%mm"#R1" \n\t" \
70 "punpcklbw %%mm0, %%mm"#R3" \n\t" \
71 "paddw %%mm7, %%mm"#R1" \n\t" \
72 "psubw %%mm"#R3", %%mm"#R1" \n\t" \
73 "psraw %4, %%mm"#R1" \n\t" \
74 "movq %%mm"#R1", "#OFF"(%1) \n\t" \
77 /** Sacrifying mm6 allows to pipeline loads from src */
78 static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
79 const uint8_t *src, x86_reg stride,
80 int rnd, int64_t shift)
83 "mov $3, %%"REG_c" \n\t"
84 LOAD_ROUNDER_MMX("%5")
85 "movq "MANGLE(ff_pw_9)", %%mm6 \n\t"
87 "movd (%0), %%mm2 \n\t"
89 "movd (%0), %%mm3 \n\t"
90 "punpcklbw %%mm0, %%mm2 \n\t"
91 "punpcklbw %%mm0, %%mm3 \n\t"
92 SHIFT2_LINE( 0, 1, 2, 3, 4)
93 SHIFT2_LINE( 24, 2, 3, 4, 1)
94 SHIFT2_LINE( 48, 3, 4, 1, 2)
95 SHIFT2_LINE( 72, 4, 1, 2, 3)
96 SHIFT2_LINE( 96, 1, 2, 3, 4)
97 SHIFT2_LINE(120, 2, 3, 4, 1)
98 SHIFT2_LINE(144, 3, 4, 1, 2)
99 SHIFT2_LINE(168, 4, 1, 2, 3)
104 : "+r"(src), "+r"(dst)
105 : "r"(stride), "r"(-2*stride),
106 "m"(shift), "m"(rnd), "r"(9*stride-4)
112 * Data is already unpacked, so some operations can directly be made from
115 #define VC1_HOR_16b_SHIFT2(OP, OPNAME)\
116 static void OPNAME ## vc1_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,\
117 const int16_t *src, int rnd)\
122 rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */\
124 LOAD_ROUNDER_MMX("%4")\
125 "movq "MANGLE(ff_pw_128)", %%mm6\n\t"\
126 "movq "MANGLE(ff_pw_9)", %%mm5 \n\t"\
128 "movq 2*0+0(%1), %%mm1 \n\t"\
129 "movq 2*0+8(%1), %%mm2 \n\t"\
130 "movq 2*1+0(%1), %%mm3 \n\t"\
131 "movq 2*1+8(%1), %%mm4 \n\t"\
132 "paddw 2*3+0(%1), %%mm1 \n\t"\
133 "paddw 2*3+8(%1), %%mm2 \n\t"\
134 "paddw 2*2+0(%1), %%mm3 \n\t"\
135 "paddw 2*2+8(%1), %%mm4 \n\t"\
136 "pmullw %%mm5, %%mm3 \n\t"\
137 "pmullw %%mm5, %%mm4 \n\t"\
138 "psubw %%mm1, %%mm3 \n\t"\
139 "psubw %%mm2, %%mm4 \n\t"\
142 "paddw %%mm6, %%mm3 \n\t"\
143 "paddw %%mm6, %%mm4 \n\t"\
144 TRANSFER_DO_PACK(OP)\
149 : "+r"(h), "+r" (src), "+r" (dst)\
150 : "r"(stride), "m"(rnd)\
155 VC1_HOR_16b_SHIFT2(OP_PUT, put_)
156 VC1_HOR_16b_SHIFT2(OP_AVG, avg_)
160 * Purely vertical or horizontal 1/2 shift interpolation.
161 * Sacrify mm6 for *9 factor.
163 #define VC1_SHIFT2(OP, OPNAME)\
164 static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
165 x86_reg stride, int rnd, x86_reg offset)\
169 "mov $8, %%"REG_c" \n\t"\
170 LOAD_ROUNDER_MMX("%5")\
171 "movq "MANGLE(ff_pw_9)", %%mm6\n\t"\
173 "movd 0(%0 ), %%mm3 \n\t"\
174 "movd 4(%0 ), %%mm4 \n\t"\
175 "movd 0(%0,%2), %%mm1 \n\t"\
176 "movd 4(%0,%2), %%mm2 \n\t"\
178 "punpcklbw %%mm0, %%mm3 \n\t"\
179 "punpcklbw %%mm0, %%mm4 \n\t"\
180 "punpcklbw %%mm0, %%mm1 \n\t"\
181 "punpcklbw %%mm0, %%mm2 \n\t"\
182 "paddw %%mm1, %%mm3 \n\t"\
183 "paddw %%mm2, %%mm4 \n\t"\
184 "movd 0(%0,%3), %%mm1 \n\t"\
185 "movd 4(%0,%3), %%mm2 \n\t"\
186 "pmullw %%mm6, %%mm3 \n\t" /* 0,9,9,0*/\
187 "pmullw %%mm6, %%mm4 \n\t" /* 0,9,9,0*/\
188 "punpcklbw %%mm0, %%mm1 \n\t"\
189 "punpcklbw %%mm0, %%mm2 \n\t"\
190 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,0*/\
191 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,0*/\
192 "movd 0(%0,%2), %%mm1 \n\t"\
193 "movd 4(%0,%2), %%mm2 \n\t"\
194 "punpcklbw %%mm0, %%mm1 \n\t"\
195 "punpcklbw %%mm0, %%mm2 \n\t"\
196 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,-1*/\
197 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,-1*/\
199 "packuswb %%mm4, %%mm3 \n\t"\
201 "movq %%mm3, (%1) \n\t"\
204 "dec %%"REG_c" \n\t"\
206 : "+r"(src), "+r"(dst)\
207 : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\
209 : "%"REG_c, "memory"\
213 VC1_SHIFT2(OP_PUT, put_)
214 VC1_SHIFT2(OP_AVG, avg_)
217 * Core of the 1/4 and 3/4 shift bicubic interpolation.
219 * @param UNPACK Macro unpacking arguments from 8 to 16bits (can be empty).
220 * @param MOVQ "movd 1" or "movq 2", if data read is already unpacked.
221 * @param A1 Address of 1st tap (beware of unpacked/packed).
222 * @param A2 Address of 2nd tap
223 * @param A3 Address of 3rd tap
224 * @param A4 Address of 4th tap
226 #define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4) \
227 MOVQ "*0+"A1", %%mm1 \n\t" \
228 MOVQ "*4+"A1", %%mm2 \n\t" \
231 "pmullw "MANGLE(ff_pw_3)", %%mm1\n\t" \
232 "pmullw "MANGLE(ff_pw_3)", %%mm2\n\t" \
233 MOVQ "*0+"A2", %%mm3 \n\t" \
234 MOVQ "*4+"A2", %%mm4 \n\t" \
237 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
238 "pmullw %%mm6, %%mm4 \n\t" /* *18 */ \
239 "psubw %%mm1, %%mm3 \n\t" /* 18,-3 */ \
240 "psubw %%mm2, %%mm4 \n\t" /* 18,-3 */ \
241 MOVQ "*0+"A4", %%mm1 \n\t" \
242 MOVQ "*4+"A4", %%mm2 \n\t" \
245 "psllw $2, %%mm1 \n\t" /* 4* */ \
246 "psllw $2, %%mm2 \n\t" /* 4* */ \
247 "psubw %%mm1, %%mm3 \n\t" /* -4,18,-3 */ \
248 "psubw %%mm2, %%mm4 \n\t" /* -4,18,-3 */ \
249 MOVQ "*0+"A3", %%mm1 \n\t" \
250 MOVQ "*4+"A3", %%mm2 \n\t" \
253 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
254 "pmullw %%mm5, %%mm2 \n\t" /* *53 */ \
255 "paddw %%mm1, %%mm3 \n\t" /* 4,53,18,-3 */ \
256 "paddw %%mm2, %%mm4 \n\t" /* 4,53,18,-3 */
259 * Macro to build the vertical 16bits version of vc1_put_shift[13].
260 * Here, offset=src_stride. Parameters passed A1 to A4 must use
261 * %3 (src_stride) and %4 (3*src_stride).
263 * @param NAME Either 1 or 3
264 * @see MSPEL_FILTER13_CORE for information on A1->A4
266 #define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4) \
268 vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src, \
269 x86_reg src_stride, \
270 int rnd, int64_t shift) \
275 LOAD_ROUNDER_MMX("%5") \
276 "movq "MANGLE(ff_pw_53)", %%mm5\n\t" \
277 "movq "MANGLE(ff_pw_18)", %%mm6\n\t" \
280 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
281 NORMALIZE_MMX("%6") \
282 TRANSFER_DONT_PACK(OP_PUT) \
283 /* Last 3 (in fact 4) bytes on the line */ \
284 "movd 8+"A1", %%mm1 \n\t" \
286 "movq %%mm1, %%mm3 \n\t" \
287 "paddw %%mm1, %%mm1 \n\t" \
288 "paddw %%mm3, %%mm1 \n\t" /* 3* */ \
289 "movd 8+"A2", %%mm3 \n\t" \
291 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
292 "psubw %%mm1, %%mm3 \n\t" /*18,-3 */ \
293 "movd 8+"A3", %%mm1 \n\t" \
295 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
296 "paddw %%mm1, %%mm3 \n\t" /*53,18,-3 */ \
297 "movd 8+"A4", %%mm1 \n\t" \
299 "psllw $2, %%mm1 \n\t" /* 4* */ \
300 "psubw %%mm1, %%mm3 \n\t" \
301 "paddw %%mm7, %%mm3 \n\t" \
302 "psraw %6, %%mm3 \n\t" \
303 "movq %%mm3, 16(%2) \n\t" \
308 : "+r"(h), "+r" (src), "+r" (dst) \
309 : "r"(src_stride), "r"(3*src_stride), \
310 "m"(rnd), "m"(shift) \
316 * Macro to build the horizontal 16bits version of vc1_put_shift[13].
317 * Here, offset=16bits, so parameters passed A1 to A4 should be simple.
319 * @param NAME Either 1 or 3
320 * @see MSPEL_FILTER13_CORE for information on A1->A4
322 #define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME) \
324 OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \
325 const int16_t *src, int rnd) \
329 rnd -= (-4+58+13-3)*256; /* Add -256 bias */ \
331 LOAD_ROUNDER_MMX("%4") \
332 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
333 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
336 MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4) \
337 NORMALIZE_MMX("$7") \
339 "paddw "MANGLE(ff_pw_128)", %%mm3 \n\t" \
340 "paddw "MANGLE(ff_pw_128)", %%mm4 \n\t" \
341 TRANSFER_DO_PACK(OP) \
346 : "+r"(h), "+r" (src), "+r" (dst) \
347 : "r"(stride), "m"(rnd) \
353 * Macro to build the 8bits, any direction, version of vc1_put_shift[13].
354 * Here, offset=src_stride. Parameters passed A1 to A4 must use
355 * %3 (offset) and %4 (3*offset).
357 * @param NAME Either 1 or 3
358 * @see MSPEL_FILTER13_CORE for information on A1->A4
360 #define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME) \
362 OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \
363 x86_reg stride, int rnd, x86_reg offset) \
369 LOAD_ROUNDER_MMX("%6") \
370 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
371 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
374 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
375 NORMALIZE_MMX("$6") \
376 TRANSFER_DO_PACK(OP) \
381 : "+r"(h), "+r" (src), "+r" (dst) \
382 : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd) \
387 /** 1/4 shift bicubic interpolation */
388 MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_PUT, put_)
389 MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_AVG, avg_)
390 MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )")
391 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT, put_)
392 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG, avg_)
394 /** 3/4 shift bicubic interpolation */
395 MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_PUT, put_)
396 MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_AVG, avg_)
397 MSPEL_FILTER13_VER_16B(shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )")
398 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT, put_)
399 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG, avg_)
401 typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift);
402 typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd);
403 typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, x86_reg stride, int rnd, x86_reg offset);
406 * Interpolate fractional pel values by applying proper vertical then
409 * @param dst Destination buffer for interpolated pels.
410 * @param src Source buffer.
411 * @param stride Stride for both src and dst buffers.
412 * @param hmode Horizontal filter (expressed in quarter pixels shift).
413 * @param hmode Vertical filter.
414 * @param rnd Rounding bias.
416 #define VC1_MSPEL_MC(OP)\
417 static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
418 int hmode, int vmode, int rnd)\
420 static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\
421 { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\
422 static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\
423 { NULL, OP ## vc1_hor_16b_shift1_mmx, OP ## vc1_hor_16b_shift2_mmx, OP ## vc1_hor_16b_shift3_mmx };\
424 static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\
425 { NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\
428 "pxor %%mm0, %%mm0 \n\t"\
432 if (vmode) { /* Vertical filter to apply */\
433 if (hmode) { /* Horizontal filter to apply, output to tmp */\
434 static const int shift_value[] = { 0, 5, 1, 5 };\
435 int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
437 DECLARE_ALIGNED(16, int16_t, tmp)[12*8];\
439 r = (1<<(shift-1)) + rnd-1;\
440 vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
442 vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\
445 else { /* No horizontal filter, output 8 lines to dst */\
446 vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\
451 /* Horizontal mode with no vertical mode */\
452 vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\
458 /** Macro to ease bicubic filter interpolation functions declarations */
459 #define DECLARE_FUNCTION(a, b) \
460 static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
461 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
463 static void avg_vc1_mspel_mc ## a ## b ## _mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
464 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
467 DECLARE_FUNCTION(0, 1)
468 DECLARE_FUNCTION(0, 2)
469 DECLARE_FUNCTION(0, 3)
471 DECLARE_FUNCTION(1, 0)
472 DECLARE_FUNCTION(1, 1)
473 DECLARE_FUNCTION(1, 2)
474 DECLARE_FUNCTION(1, 3)
476 DECLARE_FUNCTION(2, 0)
477 DECLARE_FUNCTION(2, 1)
478 DECLARE_FUNCTION(2, 2)
479 DECLARE_FUNCTION(2, 3)
481 DECLARE_FUNCTION(3, 0)
482 DECLARE_FUNCTION(3, 1)
483 DECLARE_FUNCTION(3, 2)
484 DECLARE_FUNCTION(3, 3)
486 static void vc1_inv_trans_4x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
489 dc = (17 * dc + 4) >> 3;
490 dc = (17 * dc + 64) >> 7;
492 "movd %0, %%mm0 \n\t"
493 "pshufw $0, %%mm0, %%mm0 \n\t"
494 "pxor %%mm1, %%mm1 \n\t"
495 "psubw %%mm0, %%mm1 \n\t"
496 "packuswb %%mm0, %%mm0 \n\t"
497 "packuswb %%mm1, %%mm1 \n\t"
501 "movd %0, %%mm2 \n\t"
502 "movd %1, %%mm3 \n\t"
503 "movd %2, %%mm4 \n\t"
504 "movd %3, %%mm5 \n\t"
505 "paddusb %%mm0, %%mm2 \n\t"
506 "paddusb %%mm0, %%mm3 \n\t"
507 "paddusb %%mm0, %%mm4 \n\t"
508 "paddusb %%mm0, %%mm5 \n\t"
509 "psubusb %%mm1, %%mm2 \n\t"
510 "psubusb %%mm1, %%mm3 \n\t"
511 "psubusb %%mm1, %%mm4 \n\t"
512 "psubusb %%mm1, %%mm5 \n\t"
513 "movd %%mm2, %0 \n\t"
514 "movd %%mm3, %1 \n\t"
515 "movd %%mm4, %2 \n\t"
516 "movd %%mm5, %3 \n\t"
517 :"+m"(*(uint32_t*)(dest+0*linesize)),
518 "+m"(*(uint32_t*)(dest+1*linesize)),
519 "+m"(*(uint32_t*)(dest+2*linesize)),
520 "+m"(*(uint32_t*)(dest+3*linesize))
524 static void vc1_inv_trans_4x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
527 dc = (17 * dc + 4) >> 3;
528 dc = (12 * dc + 64) >> 7;
530 "movd %0, %%mm0 \n\t"
531 "pshufw $0, %%mm0, %%mm0 \n\t"
532 "pxor %%mm1, %%mm1 \n\t"
533 "psubw %%mm0, %%mm1 \n\t"
534 "packuswb %%mm0, %%mm0 \n\t"
535 "packuswb %%mm1, %%mm1 \n\t"
539 "movd %0, %%mm2 \n\t"
540 "movd %1, %%mm3 \n\t"
541 "movd %2, %%mm4 \n\t"
542 "movd %3, %%mm5 \n\t"
543 "paddusb %%mm0, %%mm2 \n\t"
544 "paddusb %%mm0, %%mm3 \n\t"
545 "paddusb %%mm0, %%mm4 \n\t"
546 "paddusb %%mm0, %%mm5 \n\t"
547 "psubusb %%mm1, %%mm2 \n\t"
548 "psubusb %%mm1, %%mm3 \n\t"
549 "psubusb %%mm1, %%mm4 \n\t"
550 "psubusb %%mm1, %%mm5 \n\t"
551 "movd %%mm2, %0 \n\t"
552 "movd %%mm3, %1 \n\t"
553 "movd %%mm4, %2 \n\t"
554 "movd %%mm5, %3 \n\t"
555 :"+m"(*(uint32_t*)(dest+0*linesize)),
556 "+m"(*(uint32_t*)(dest+1*linesize)),
557 "+m"(*(uint32_t*)(dest+2*linesize)),
558 "+m"(*(uint32_t*)(dest+3*linesize))
562 "movd %0, %%mm2 \n\t"
563 "movd %1, %%mm3 \n\t"
564 "movd %2, %%mm4 \n\t"
565 "movd %3, %%mm5 \n\t"
566 "paddusb %%mm0, %%mm2 \n\t"
567 "paddusb %%mm0, %%mm3 \n\t"
568 "paddusb %%mm0, %%mm4 \n\t"
569 "paddusb %%mm0, %%mm5 \n\t"
570 "psubusb %%mm1, %%mm2 \n\t"
571 "psubusb %%mm1, %%mm3 \n\t"
572 "psubusb %%mm1, %%mm4 \n\t"
573 "psubusb %%mm1, %%mm5 \n\t"
574 "movd %%mm2, %0 \n\t"
575 "movd %%mm3, %1 \n\t"
576 "movd %%mm4, %2 \n\t"
577 "movd %%mm5, %3 \n\t"
578 :"+m"(*(uint32_t*)(dest+0*linesize)),
579 "+m"(*(uint32_t*)(dest+1*linesize)),
580 "+m"(*(uint32_t*)(dest+2*linesize)),
581 "+m"(*(uint32_t*)(dest+3*linesize))
585 static void vc1_inv_trans_8x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
588 dc = ( 3 * dc + 1) >> 1;
589 dc = (17 * dc + 64) >> 7;
591 "movd %0, %%mm0 \n\t"
592 "pshufw $0, %%mm0, %%mm0 \n\t"
593 "pxor %%mm1, %%mm1 \n\t"
594 "psubw %%mm0, %%mm1 \n\t"
595 "packuswb %%mm0, %%mm0 \n\t"
596 "packuswb %%mm1, %%mm1 \n\t"
600 "movq %0, %%mm2 \n\t"
601 "movq %1, %%mm3 \n\t"
602 "movq %2, %%mm4 \n\t"
603 "movq %3, %%mm5 \n\t"
604 "paddusb %%mm0, %%mm2 \n\t"
605 "paddusb %%mm0, %%mm3 \n\t"
606 "paddusb %%mm0, %%mm4 \n\t"
607 "paddusb %%mm0, %%mm5 \n\t"
608 "psubusb %%mm1, %%mm2 \n\t"
609 "psubusb %%mm1, %%mm3 \n\t"
610 "psubusb %%mm1, %%mm4 \n\t"
611 "psubusb %%mm1, %%mm5 \n\t"
612 "movq %%mm2, %0 \n\t"
613 "movq %%mm3, %1 \n\t"
614 "movq %%mm4, %2 \n\t"
615 "movq %%mm5, %3 \n\t"
616 :"+m"(*(uint32_t*)(dest+0*linesize)),
617 "+m"(*(uint32_t*)(dest+1*linesize)),
618 "+m"(*(uint32_t*)(dest+2*linesize)),
619 "+m"(*(uint32_t*)(dest+3*linesize))
623 static void vc1_inv_trans_8x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
626 dc = (3 * dc + 1) >> 1;
627 dc = (3 * dc + 16) >> 5;
629 "movd %0, %%mm0 \n\t"
630 "pshufw $0, %%mm0, %%mm0 \n\t"
631 "pxor %%mm1, %%mm1 \n\t"
632 "psubw %%mm0, %%mm1 \n\t"
633 "packuswb %%mm0, %%mm0 \n\t"
634 "packuswb %%mm1, %%mm1 \n\t"
638 "movq %0, %%mm2 \n\t"
639 "movq %1, %%mm3 \n\t"
640 "movq %2, %%mm4 \n\t"
641 "movq %3, %%mm5 \n\t"
642 "paddusb %%mm0, %%mm2 \n\t"
643 "paddusb %%mm0, %%mm3 \n\t"
644 "paddusb %%mm0, %%mm4 \n\t"
645 "paddusb %%mm0, %%mm5 \n\t"
646 "psubusb %%mm1, %%mm2 \n\t"
647 "psubusb %%mm1, %%mm3 \n\t"
648 "psubusb %%mm1, %%mm4 \n\t"
649 "psubusb %%mm1, %%mm5 \n\t"
650 "movq %%mm2, %0 \n\t"
651 "movq %%mm3, %1 \n\t"
652 "movq %%mm4, %2 \n\t"
653 "movq %%mm5, %3 \n\t"
654 :"+m"(*(uint32_t*)(dest+0*linesize)),
655 "+m"(*(uint32_t*)(dest+1*linesize)),
656 "+m"(*(uint32_t*)(dest+2*linesize)),
657 "+m"(*(uint32_t*)(dest+3*linesize))
661 "movq %0, %%mm2 \n\t"
662 "movq %1, %%mm3 \n\t"
663 "movq %2, %%mm4 \n\t"
664 "movq %3, %%mm5 \n\t"
665 "paddusb %%mm0, %%mm2 \n\t"
666 "paddusb %%mm0, %%mm3 \n\t"
667 "paddusb %%mm0, %%mm4 \n\t"
668 "paddusb %%mm0, %%mm5 \n\t"
669 "psubusb %%mm1, %%mm2 \n\t"
670 "psubusb %%mm1, %%mm3 \n\t"
671 "psubusb %%mm1, %%mm4 \n\t"
672 "psubusb %%mm1, %%mm5 \n\t"
673 "movq %%mm2, %0 \n\t"
674 "movq %%mm3, %1 \n\t"
675 "movq %%mm4, %2 \n\t"
676 "movq %%mm5, %3 \n\t"
677 :"+m"(*(uint32_t*)(dest+0*linesize)),
678 "+m"(*(uint32_t*)(dest+1*linesize)),
679 "+m"(*(uint32_t*)(dest+2*linesize)),
680 "+m"(*(uint32_t*)(dest+3*linesize))
684 #define LOOP_FILTER(EXT) \
685 void ff_vc1_v_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \
686 void ff_vc1_h_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \
687 void ff_vc1_v_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \
688 void ff_vc1_h_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \
690 static void vc1_v_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \
692 ff_vc1_v_loop_filter8_ ## EXT(src, stride, pq); \
693 ff_vc1_v_loop_filter8_ ## EXT(src+8, stride, pq); \
696 static void vc1_h_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \
698 ff_vc1_h_loop_filter8_ ## EXT(src, stride, pq); \
699 ff_vc1_h_loop_filter8_ ## EXT(src+8*stride, stride, pq); \
708 void ff_vc1_h_loop_filter8_sse4(uint8_t *src, int stride, int pq);
710 static void vc1_h_loop_filter16_sse4(uint8_t *src, int stride, int pq)
712 ff_vc1_h_loop_filter8_sse4(src, stride, pq);
713 ff_vc1_h_loop_filter8_sse4(src+8*stride, stride, pq);
717 void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) {
718 int mm_flags = av_get_cpu_flags();
720 dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_mmx;
721 dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx;
722 dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_mmx;
723 dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_mmx;
725 dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_mmx;
726 dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_mmx;
727 dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_mmx;
728 dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_mmx;
730 dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_mmx;
731 dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_mmx;
732 dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_mmx;
733 dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_mmx;
735 dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_mmx;
736 dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_mmx;
737 dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx;
738 dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx;
740 if (mm_flags & AV_CPU_FLAG_MMX2){
741 dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_vc1_mspel_mc00_mmx2;
742 dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_mmx2;
743 dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_mmx2;
744 dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_mmx2;
746 dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_mmx2;
747 dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_mmx2;
748 dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_mmx2;
749 dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_mmx2;
751 dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_mmx2;
752 dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_mmx2;
753 dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_mmx2;
754 dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_mmx2;
756 dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_mmx2;
757 dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_mmx2;
758 dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_mmx2;
759 dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_mmx2;
761 dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_mmx2;
762 dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_mmx2;
763 dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_mmx2;
764 dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_mmx2;
767 #define ASSIGN_LF(EXT) \
768 dsp->vc1_v_loop_filter4 = ff_vc1_v_loop_filter4_ ## EXT; \
769 dsp->vc1_h_loop_filter4 = ff_vc1_h_loop_filter4_ ## EXT; \
770 dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_ ## EXT; \
771 dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_ ## EXT; \
772 dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_ ## EXT; \
773 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_ ## EXT
776 if (mm_flags & AV_CPU_FLAG_MMX) {
780 if (mm_flags & AV_CPU_FLAG_MMX2) {
783 if (mm_flags & AV_CPU_FLAG_SSE2) {
784 dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_sse2;
785 dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse2;
786 dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_sse2;
787 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse2;
789 if (mm_flags & AV_CPU_FLAG_SSSE3) {
792 if (mm_flags & AV_CPU_FLAG_SSE4) {
793 dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse4;
794 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse4;