2 * VC-1 and WMV3 - DSP functions MMX-optimized
3 * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
5 * Permission is hereby granted, free of charge, to any person
6 * obtaining a copy of this software and associated documentation
7 * files (the "Software"), to deal in the Software without
8 * restriction, including without limitation the rights to use,
9 * copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
30 /** Add rounder from mm7 to mm3 and pack result at destination */
31 #define NORMALIZE_MMX(SHIFT) \
32 "paddw %%mm7, %%mm3 \n\t" /* +bias-r */ \
33 "paddw %%mm7, %%mm4 \n\t" /* +bias-r */ \
34 "psraw "SHIFT", %%mm3 \n\t" \
35 "psraw "SHIFT", %%mm4 \n\t"
37 #define TRANSFER_DO_PACK \
38 "packuswb %%mm4, %%mm3 \n\t" \
39 "movq %%mm3, (%2) \n\t"
41 #define TRANSFER_DONT_PACK \
42 "movq %%mm3, 0(%2) \n\t" \
43 "movq %%mm4, 8(%2) \n\t"
45 /** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
46 #define DO_UNPACK(reg) "punpcklbw %%mm0, " reg "\n\t"
47 #define DONT_UNPACK(reg)
49 /** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
50 #define LOAD_ROUNDER_MMX(ROUND) \
51 "movd "ROUND", %%mm7 \n\t" \
52 "punpcklwd %%mm7, %%mm7 \n\t" \
53 "punpckldq %%mm7, %%mm7 \n\t"
55 #define SHIFT2_LINE(OFF, R0,R1,R2,R3) \
56 "paddw %%mm"#R2", %%mm"#R1" \n\t" \
57 "movd (%1,%4), %%mm"#R0" \n\t" \
58 "pmullw %%mm6, %%mm"#R1" \n\t" \
59 "punpcklbw %%mm0, %%mm"#R0" \n\t" \
60 "movd (%1,%3), %%mm"#R3" \n\t" \
61 "psubw %%mm"#R0", %%mm"#R1" \n\t" \
62 "punpcklbw %%mm0, %%mm"#R3" \n\t" \
63 "paddw %%mm7, %%mm"#R1" \n\t" \
64 "psubw %%mm"#R3", %%mm"#R1" \n\t" \
65 "psraw %5, %%mm"#R1" \n\t" \
66 "movq %%mm"#R1", "#OFF"(%2) \n\t" \
69 DECLARE_ALIGNED_16(static const uint64_t, fact_9) = 0x0009000900090009ULL;
71 /** Sacrifying mm6 allows to pipeline loads from src */
72 static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
73 const uint8_t *src, long int stride,
74 int rnd, int64_t shift)
79 LOAD_ROUNDER_MMX("%6")
82 "movd (%1), %%mm2 \n\t"
84 "movd (%1), %%mm3 \n\t"
85 "punpcklbw %%mm0, %%mm2 \n\t"
86 "punpcklbw %%mm0, %%mm3 \n\t"
87 SHIFT2_LINE( 0, 1, 2, 3, 4)
88 SHIFT2_LINE( 24, 2, 3, 4, 1)
89 SHIFT2_LINE( 48, 3, 4, 1, 2)
90 SHIFT2_LINE( 72, 4, 1, 2, 3)
91 SHIFT2_LINE( 96, 1, 2, 3, 4)
92 SHIFT2_LINE(120, 2, 3, 4, 1)
93 SHIFT2_LINE(144, 3, 4, 1, 2)
94 SHIFT2_LINE(168, 4, 1, 2, 3)
99 : "+g"(w), "+r"(src), "+r"(dst)
100 : "r"(stride), "r"(-2*stride), "m"(shift),
101 "m"(rnd), "m"(fact_9), "g"(9*stride-4)
106 /** To remove bias allowing use of MMX 16bits arithmetic */
107 DECLARE_ALIGNED_16(static const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
110 * Data is already unpacked, so some operations can directly be made from
113 static void vc1_put_hor_16b_shift2_mmx(uint8_t *dst, long int stride,
114 const int16_t *src, int rnd)
119 rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */
121 LOAD_ROUNDER_MMX("%4")
122 "movq %6, %%mm6 \n\t"
123 "movq %5, %%mm5 \n\t"
125 "movq 2*0+0(%1), %%mm1 \n\t"
126 "movq 2*0+8(%1), %%mm2 \n\t"
127 "movq 2*1+0(%1), %%mm3 \n\t"
128 "movq 2*1+8(%1), %%mm4 \n\t"
129 "paddw 2*3+0(%1), %%mm1 \n\t"
130 "paddw 2*3+8(%1), %%mm2 \n\t"
131 "paddw 2*2+0(%1), %%mm3 \n\t"
132 "paddw 2*2+8(%1), %%mm4 \n\t"
133 "pmullw %%mm5, %%mm3 \n\t"
134 "pmullw %%mm5, %%mm4 \n\t"
135 "psubw %%mm1, %%mm3 \n\t"
136 "psubw %%mm2, %%mm4 \n\t"
139 "paddw %%mm6, %%mm3 \n\t"
140 "paddw %%mm6, %%mm4 \n\t"
146 : "+g"(h), "+r" (src), "+r" (dst)
147 : "g"(stride), "m"(rnd), "m"(fact_9), "m"(ff_pw_128)
154 * Purely vertical or horizontal 1/2 shift interpolation.
155 * Sacrify mm6 for *9 factor.
157 static void vc1_put_shift2_mmx(uint8_t *dst, const uint8_t *src,
158 long int stride, int rnd, long int offset)
164 LOAD_ROUNDER_MMX("%6")
165 "movq %8, %%mm6 \n\t"
167 "movd 0(%1 ), %%mm3 \n\t"
168 "movd 4(%1 ), %%mm4 \n\t"
169 "movd 0(%1,%3), %%mm1 \n\t"
170 "movd 4(%1,%3), %%mm2 \n\t"
172 "punpcklbw %%mm0, %%mm3 \n\t"
173 "punpcklbw %%mm0, %%mm4 \n\t"
174 "punpcklbw %%mm0, %%mm1 \n\t"
175 "punpcklbw %%mm0, %%mm2 \n\t"
176 "paddw %%mm1, %%mm3 \n\t"
177 "paddw %%mm2, %%mm4 \n\t"
178 "movd 0(%1,%4), %%mm1 \n\t"
179 "movd 4(%1,%4), %%mm2 \n\t"
180 "pmullw %%mm6, %%mm3 \n\t" /* 0,9,9,0*/
181 "pmullw %%mm6, %%mm4 \n\t" /* 0,9,9,0*/
182 "punpcklbw %%mm0, %%mm1 \n\t"
183 "punpcklbw %%mm0, %%mm2 \n\t"
184 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,0*/
185 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,0*/
186 "movd 0(%1,%3), %%mm1 \n\t"
187 "movd 4(%1,%3), %%mm2 \n\t"
188 "punpcklbw %%mm0, %%mm1 \n\t"
189 "punpcklbw %%mm0, %%mm2 \n\t"
190 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,-1*/
191 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,-1*/
198 : "+g"(h), "+r"(src), "+r"(dst)
199 : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),
200 "g"(stride-offset), "m"(fact_9)
206 * Filter coefficients made global to allow access by all 1 or 3 quarter shift
207 * interpolation functions.
209 DECLARE_ALIGNED_16(static const uint64_t, fact_53) = 0x0035003500350035ULL;
210 DECLARE_ALIGNED_16(static const uint64_t, fact_18) = 0x0012001200120012ULL;
213 * Core of the 1/4 and 3/4 shift bicubic interpolation.
215 * @param UNPACK Macro unpacking arguments from 8 to 16bits (can be empty).
216 * @param MOVQ "movd 1" or "movq 2", if data read is already unpacked.
217 * @param A1 Address of 1st tap (beware of unpacked/packed).
218 * @param A2 Address of 2nd tap
219 * @param A3 Address of 3rd tap
220 * @param A4 Address of 4th tap
222 #define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4, POS) \
223 MOVQ "*0+"A1", %%mm1 \n\t" \
224 MOVQ "*4+"A1", %%mm2 \n\t" \
227 "pmullw "POS", %%mm1 \n\t" \
228 "pmullw "POS", %%mm2 \n\t" \
229 MOVQ "*0+"A2", %%mm3 \n\t" \
230 MOVQ "*4+"A2", %%mm4 \n\t" \
233 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
234 "pmullw %%mm6, %%mm4 \n\t" /* *18 */ \
235 "psubw %%mm1, %%mm3 \n\t" /* 18,-3 */ \
236 "psubw %%mm2, %%mm4 \n\t" /* 18,-3 */ \
237 MOVQ "*0+"A4", %%mm1 \n\t" \
238 MOVQ "*4+"A4", %%mm2 \n\t" \
241 "psllw $2, %%mm1 \n\t" /* 4* */ \
242 "psllw $2, %%mm2 \n\t" /* 4* */ \
243 "psubw %%mm1, %%mm3 \n\t" /* -4,18,-3 */ \
244 "psubw %%mm2, %%mm4 \n\t" /* -4,18,-3 */ \
245 MOVQ "*0+"A3", %%mm1 \n\t" \
246 MOVQ "*4+"A3", %%mm2 \n\t" \
249 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
250 "pmullw %%mm5, %%mm2 \n\t" /* *53 */ \
251 "paddw %%mm1, %%mm3 \n\t" /* 4,53,18,-3 */ \
252 "paddw %%mm2, %%mm4 \n\t" /* 4,53,18,-3 */
255 * Macro to build the vertical 16bits version of vc1_put_shift[13].
256 * Here, offset=src_stride. Parameters passed A1 to A4 must use
257 * %3 (src_stride) and %4 (3*src_stride).
259 * @param NAME Either 1 or 3
260 * @see MSPEL_FILTER13_CORE for information on A1->A4
262 #define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4) \
264 vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src, \
265 long int src_stride, \
266 int rnd, int64_t shift) \
271 LOAD_ROUNDER_MMX("%5") \
272 "movq %7, %%mm5 \n\t" \
273 "movq %8, %%mm6 \n\t" \
276 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4, "%9") \
277 NORMALIZE_MMX("%6") \
279 /* Last 3 (in fact 4) bytes on the line */ \
280 "movd 8+"A1", %%mm1 \n\t" \
282 "movq %%mm1, %%mm3 \n\t" \
283 "paddw %%mm1, %%mm1 \n\t" \
284 "paddw %%mm3, %%mm1 \n\t" /* 3* */ \
285 "movd 8+"A2", %%mm3 \n\t" \
287 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
288 "psubw %%mm1, %%mm3 \n\t" /*18,-3 */ \
289 "movd 8+"A3", %%mm1 \n\t" \
291 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
292 "paddw %%mm1, %%mm3 \n\t" /*53,18,-3 */ \
293 "movd 8+"A4", %%mm1 \n\t" \
295 "psllw $2, %%mm1 \n\t" /* 4* */ \
296 "psubw %%mm1, %%mm3 \n\t" \
297 "paddw %%mm7, %%mm3 \n\t" \
298 "psraw %6, %%mm3 \n\t" \
299 "movq %%mm3, 16(%2) \n\t" \
304 : "+g"(h), "+r" (src), "+r" (dst) \
305 : "r"(src_stride), "r"(3*src_stride), \
306 "m"(rnd), "m"(shift), \
307 "m"(fact_53), "m"(fact_18), "m"(ff_pw_3) \
313 * Macro to build the horizontal 16bits version of vc1_put_shift[13].
314 * Here, offset=16bits, so parameters passed A1 to A4 should be simple.
316 * @param NAME Either 1 or 3
317 * @see MSPEL_FILTER13_CORE for information on A1->A4
319 #define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4) \
321 vc1_put_hor_16b_ ## NAME ## _mmx(uint8_t *dst, long int stride, \
322 const int16_t *src, int rnd) \
326 rnd -= (-4+58+13-3)*256; /* Add -256 bias */ \
328 LOAD_ROUNDER_MMX("%4") \
329 "movq %6, %%mm6 \n\t" \
330 "movq %5, %%mm5 \n\t" \
333 MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4, "%8")\
334 NORMALIZE_MMX("$7") \
336 "paddw %7, %%mm3 \n\t" \
337 "paddw %7, %%mm4 \n\t" \
343 : "+g"(h), "+r" (src), "+r" (dst) \
344 : "g"(stride), "m"(rnd), "m"(fact_53), "m"(fact_18), \
345 "m"(ff_pw_128), "m"(ff_pw_3) \
351 * Macro to build the 8bits, any direction, version of vc1_put_shift[13].
352 * Here, offset=src_stride. Parameters passed A1 to A4 must use
353 * %3 (offset) and %4 (3*offset).
355 * @param NAME Either 1 or 3
356 * @see MSPEL_FILTER13_CORE for information on A1->A4
358 #define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4) \
360 vc1_put_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \
361 long int stride, int rnd, long int offset) \
367 LOAD_ROUNDER_MMX("%6") \
368 "movq %7, %%mm5 \n\t" \
369 "movq %8, %%mm6 \n\t" \
372 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4, "%9")\
373 NORMALIZE_MMX("$6") \
379 : "+g"(h), "+r" (src), "+r" (dst) \
380 : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd), \
381 "m"(fact_53), "m"(fact_18), "m"(ff_pw_3) \
386 /** 1/4 shift bicubic interpolation */
387 MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )")
388 MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )")
389 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)")
391 /** 3/4 shift bicubic interpolation */
392 MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )")
393 MSPEL_FILTER13_VER_16B(shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )")
394 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)")
396 typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, long int src_stride, int rnd, int64_t shift);
397 typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, long int dst_stride, const int16_t *src, int rnd);
398 typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, long int stride, int rnd, long int offset);
401 * Interpolates fractional pel values by applying proper vertical then
404 * @param dst Destination buffer for interpolated pels.
405 * @param src Source buffer.
406 * @param stride Stride for both src and dst buffers.
407 * @param hmode Horizontal filter (expressed in quarter pixels shift).
408 * @param hmode Vertical filter.
409 * @param rnd Rounding bias.
411 static void vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,
412 int hmode, int vmode, int rnd)
414 static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =
415 { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };
416 static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =
417 { NULL, vc1_put_hor_16b_shift1_mmx, vc1_put_hor_16b_shift2_mmx, vc1_put_hor_16b_shift3_mmx };
418 static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =
419 { NULL, vc1_put_shift1_mmx, vc1_put_shift2_mmx, vc1_put_shift3_mmx };
422 "pxor %%mm0, %%mm0 \n\t"
426 if (vmode) { /* Vertical filter to apply */
427 if (hmode) { /* Horizontal filter to apply, output to tmp */
428 static const int shift_value[] = { 0, 5, 1, 5 };
429 int shift = (shift_value[hmode]+shift_value[vmode])>>1;
431 DECLARE_ALIGNED_16(int16_t, tmp[12*8]);
433 r = (1<<(shift-1)) + rnd-1;
434 vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);
436 vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);
439 else { /* No horizontal filter, output 8 lines to dst */
440 vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);
445 /* Horizontal mode with no vertical mode */
446 vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);
449 static void put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
450 put_pixels8_mmx(dst, src, stride, 8);
453 /** Macro to ease bicubic filter interpolation functions declarations */
454 #define DECLARE_FUNCTION(a, b) \
455 static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
456 vc1_mspel_mc(dst, src, stride, a, b, rnd); \
459 DECLARE_FUNCTION(0, 1)
460 DECLARE_FUNCTION(0, 2)
461 DECLARE_FUNCTION(0, 3)
463 DECLARE_FUNCTION(1, 0)
464 DECLARE_FUNCTION(1, 1)
465 DECLARE_FUNCTION(1, 2)
466 DECLARE_FUNCTION(1, 3)
468 DECLARE_FUNCTION(2, 0)
469 DECLARE_FUNCTION(2, 1)
470 DECLARE_FUNCTION(2, 2)
471 DECLARE_FUNCTION(2, 3)
473 DECLARE_FUNCTION(3, 0)
474 DECLARE_FUNCTION(3, 1)
475 DECLARE_FUNCTION(3, 2)
476 DECLARE_FUNCTION(3, 3)
478 void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) {
479 dsp->put_vc1_mspel_pixels_tab[ 0] = put_vc1_mspel_mc00_mmx;
480 dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx;
481 dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_mmx;
482 dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_mmx;
484 dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_mmx;
485 dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_mmx;
486 dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_mmx;
487 dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_mmx;
489 dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_mmx;
490 dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_mmx;
491 dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_mmx;
492 dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_mmx;
494 dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_mmx;
495 dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_mmx;
496 dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx;
497 dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx;