2 * VC-1 and WMV3 decoder - DSP functions
3 * Copyright (c) 2006 Konstantin Shishkov
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * VC-1 and WMV3 decoder
28 #include "libavutil/avassert.h"
29 #include "libavutil/common.h"
30 #include "h264chroma.h"
34 /* Apply overlap transform to horizontal edge */
35 static void vc1_v_overlap_c(uint8_t *src, int stride)
41 for (i = 0; i < 8; i++) {
46 d1 = (a - d + 3 + rnd) >> 3;
47 d2 = (a - d + b - c + 4 - rnd) >> 3;
49 src[-2 * stride] = a - d1;
50 src[-stride] = av_clip_uint8(b - d2);
51 src[0] = av_clip_uint8(c + d2);
58 /* Apply overlap transform to vertical edge */
59 static void vc1_h_overlap_c(uint8_t *src, int stride)
65 for (i = 0; i < 8; i++) {
70 d1 = (a - d + 3 + rnd) >> 3;
71 d2 = (a - d + b - c + 4 - rnd) >> 3;
74 src[-1] = av_clip_uint8(b - d2);
75 src[0] = av_clip_uint8(c + d2);
82 static void vc1_v_s_overlap_c(int16_t *top, int16_t *bottom)
87 int rnd1 = 4, rnd2 = 3;
88 for (i = 0; i < 8; i++) {
96 top[48] = ((a << 3) - d1 + rnd1) >> 3;
97 top[56] = ((b << 3) - d2 + rnd2) >> 3;
98 bottom[0] = ((c << 3) + d2 + rnd1) >> 3;
99 bottom[8] = ((d << 3) + d1 + rnd2) >> 3;
108 static void vc1_h_s_overlap_c(int16_t *left, int16_t *right)
113 int rnd1 = 4, rnd2 = 3;
114 for (i = 0; i < 8; i++) {
122 left[6] = ((a << 3) - d1 + rnd1) >> 3;
123 left[7] = ((b << 3) - d2 + rnd2) >> 3;
124 right[0] = ((c << 3) + d2 + rnd1) >> 3;
125 right[1] = ((d << 3) + d1 + rnd2) >> 3;
135 * VC-1 in-loop deblocking filter for one line
136 * @param src source block type
137 * @param stride block stride
138 * @param pq block quantizer
139 * @return whether other 3 pairs should be filtered or not
142 static av_always_inline int vc1_filter_line(uint8_t *src, int stride, int pq)
144 int a0 = (2 * (src[-2 * stride] - src[1 * stride]) -
145 5 * (src[-1 * stride] - src[0 * stride]) + 4) >> 3;
146 int a0_sign = a0 >> 31; /* Store sign */
148 a0 = (a0 ^ a0_sign) - a0_sign; /* a0 = FFABS(a0); */
150 int a1 = FFABS((2 * (src[-4 * stride] - src[-1 * stride]) -
151 5 * (src[-3 * stride] - src[-2 * stride]) + 4) >> 3);
152 int a2 = FFABS((2 * (src[ 0 * stride] - src[ 3 * stride]) -
153 5 * (src[ 1 * stride] - src[ 2 * stride]) + 4) >> 3);
154 if (a1 < a0 || a2 < a0) {
155 int clip = src[-1 * stride] - src[0 * stride];
156 int clip_sign = clip >> 31;
158 clip = ((clip ^ clip_sign) - clip_sign) >> 1;
160 int a3 = FFMIN(a1, a2);
161 int d = 5 * (a3 - a0);
162 int d_sign = (d >> 31);
164 d = ((d ^ d_sign) - d_sign) >> 3;
167 if (d_sign ^ clip_sign)
171 d = (d ^ d_sign) - d_sign; /* Restore sign */
172 src[-1 * stride] = av_clip_uint8(src[-1 * stride] - d);
173 src[ 0 * stride] = av_clip_uint8(src[ 0 * stride] + d);
183 * VC-1 in-loop deblocking filter
184 * @param src source block type
185 * @param step distance between horizontally adjacent elements
186 * @param stride distance between vertically adjacent elements
187 * @param len edge length to filter (4 or 8 pixels)
188 * @param pq block quantizer
191 static inline void vc1_loop_filter(uint8_t *src, int step, int stride,
197 for (i = 0; i < len; i += 4) {
198 filt3 = vc1_filter_line(src + 2 * step, stride, pq);
200 vc1_filter_line(src + 0 * step, stride, pq);
201 vc1_filter_line(src + 1 * step, stride, pq);
202 vc1_filter_line(src + 3 * step, stride, pq);
208 static void vc1_v_loop_filter4_c(uint8_t *src, int stride, int pq)
210 vc1_loop_filter(src, 1, stride, 4, pq);
213 static void vc1_h_loop_filter4_c(uint8_t *src, int stride, int pq)
215 vc1_loop_filter(src, stride, 1, 4, pq);
218 static void vc1_v_loop_filter8_c(uint8_t *src, int stride, int pq)
220 vc1_loop_filter(src, 1, stride, 8, pq);
223 static void vc1_h_loop_filter8_c(uint8_t *src, int stride, int pq)
225 vc1_loop_filter(src, stride, 1, 8, pq);
228 static void vc1_v_loop_filter16_c(uint8_t *src, int stride, int pq)
230 vc1_loop_filter(src, 1, stride, 16, pq);
233 static void vc1_h_loop_filter16_c(uint8_t *src, int stride, int pq)
235 vc1_loop_filter(src, stride, 1, 16, pq);
238 /* Do inverse transform on 8x8 block */
239 static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, int linesize, int16_t *block)
244 dc = (3 * dc + 1) >> 1;
245 dc = (3 * dc + 16) >> 5;
247 for (i = 0; i < 8; i++) {
248 dest[0] = av_clip_uint8(dest[0] + dc);
249 dest[1] = av_clip_uint8(dest[1] + dc);
250 dest[2] = av_clip_uint8(dest[2] + dc);
251 dest[3] = av_clip_uint8(dest[3] + dc);
252 dest[4] = av_clip_uint8(dest[4] + dc);
253 dest[5] = av_clip_uint8(dest[5] + dc);
254 dest[6] = av_clip_uint8(dest[6] + dc);
255 dest[7] = av_clip_uint8(dest[7] + dc);
260 static void vc1_inv_trans_8x8_c(int16_t block[64])
263 register int t1, t2, t3, t4, t5, t6, t7, t8;
264 int16_t *src, *dst, temp[64];
268 for (i = 0; i < 8; i++) {
269 t1 = 12 * (src[ 0] + src[32]) + 4;
270 t2 = 12 * (src[ 0] - src[32]) + 4;
271 t3 = 16 * src[16] + 6 * src[48];
272 t4 = 6 * src[16] - 16 * src[48];
279 t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
280 t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
281 t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
282 t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
284 dst[0] = (t5 + t1) >> 3;
285 dst[1] = (t6 + t2) >> 3;
286 dst[2] = (t7 + t3) >> 3;
287 dst[3] = (t8 + t4) >> 3;
288 dst[4] = (t8 - t4) >> 3;
289 dst[5] = (t7 - t3) >> 3;
290 dst[6] = (t6 - t2) >> 3;
291 dst[7] = (t5 - t1) >> 3;
299 for (i = 0; i < 8; i++) {
300 t1 = 12 * (src[ 0] + src[32]) + 64;
301 t2 = 12 * (src[ 0] - src[32]) + 64;
302 t3 = 16 * src[16] + 6 * src[48];
303 t4 = 6 * src[16] - 16 * src[48];
310 t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
311 t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
312 t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
313 t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
315 dst[ 0] = (t5 + t1) >> 7;
316 dst[ 8] = (t6 + t2) >> 7;
317 dst[16] = (t7 + t3) >> 7;
318 dst[24] = (t8 + t4) >> 7;
319 dst[32] = (t8 - t4 + 1) >> 7;
320 dst[40] = (t7 - t3 + 1) >> 7;
321 dst[48] = (t6 - t2 + 1) >> 7;
322 dst[56] = (t5 - t1 + 1) >> 7;
329 /* Do inverse transform on 8x4 part of block */
330 static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, int linesize, int16_t *block)
335 dc = (3 * dc + 1) >> 1;
336 dc = (17 * dc + 64) >> 7;
338 for (i = 0; i < 4; i++) {
339 dest[0] = av_clip_uint8(dest[0] + dc);
340 dest[1] = av_clip_uint8(dest[1] + dc);
341 dest[2] = av_clip_uint8(dest[2] + dc);
342 dest[3] = av_clip_uint8(dest[3] + dc);
343 dest[4] = av_clip_uint8(dest[4] + dc);
344 dest[5] = av_clip_uint8(dest[5] + dc);
345 dest[6] = av_clip_uint8(dest[6] + dc);
346 dest[7] = av_clip_uint8(dest[7] + dc);
351 static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, int16_t *block)
354 register int t1, t2, t3, t4, t5, t6, t7, t8;
360 for (i = 0; i < 4; i++) {
361 t1 = 12 * (src[0] + src[4]) + 4;
362 t2 = 12 * (src[0] - src[4]) + 4;
363 t3 = 16 * src[2] + 6 * src[6];
364 t4 = 6 * src[2] - 16 * src[6];
371 t1 = 16 * src[1] + 15 * src[3] + 9 * src[5] + 4 * src[7];
372 t2 = 15 * src[1] - 4 * src[3] - 16 * src[5] - 9 * src[7];
373 t3 = 9 * src[1] - 16 * src[3] + 4 * src[5] + 15 * src[7];
374 t4 = 4 * src[1] - 9 * src[3] + 15 * src[5] - 16 * src[7];
376 dst[0] = (t5 + t1) >> 3;
377 dst[1] = (t6 + t2) >> 3;
378 dst[2] = (t7 + t3) >> 3;
379 dst[3] = (t8 + t4) >> 3;
380 dst[4] = (t8 - t4) >> 3;
381 dst[5] = (t7 - t3) >> 3;
382 dst[6] = (t6 - t2) >> 3;
383 dst[7] = (t5 - t1) >> 3;
390 for (i = 0; i < 8; i++) {
391 t1 = 17 * (src[ 0] + src[16]) + 64;
392 t2 = 17 * (src[ 0] - src[16]) + 64;
393 t3 = 22 * src[ 8] + 10 * src[24];
394 t4 = 22 * src[24] - 10 * src[ 8];
396 dest[0 * linesize] = av_clip_uint8(dest[0 * linesize] + ((t1 + t3) >> 7));
397 dest[1 * linesize] = av_clip_uint8(dest[1 * linesize] + ((t2 - t4) >> 7));
398 dest[2 * linesize] = av_clip_uint8(dest[2 * linesize] + ((t2 + t4) >> 7));
399 dest[3 * linesize] = av_clip_uint8(dest[3 * linesize] + ((t1 - t3) >> 7));
406 /* Do inverse transform on 4x8 parts of block */
407 static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, int linesize, int16_t *block)
412 dc = (17 * dc + 4) >> 3;
413 dc = (12 * dc + 64) >> 7;
415 for (i = 0; i < 8; i++) {
416 dest[0] = av_clip_uint8(dest[0] + dc);
417 dest[1] = av_clip_uint8(dest[1] + dc);
418 dest[2] = av_clip_uint8(dest[2] + dc);
419 dest[3] = av_clip_uint8(dest[3] + dc);
424 static void vc1_inv_trans_4x8_c(uint8_t *dest, int linesize, int16_t *block)
427 register int t1, t2, t3, t4, t5, t6, t7, t8;
433 for (i = 0; i < 8; i++) {
434 t1 = 17 * (src[0] + src[2]) + 4;
435 t2 = 17 * (src[0] - src[2]) + 4;
436 t3 = 22 * src[1] + 10 * src[3];
437 t4 = 22 * src[3] - 10 * src[1];
439 dst[0] = (t1 + t3) >> 3;
440 dst[1] = (t2 - t4) >> 3;
441 dst[2] = (t2 + t4) >> 3;
442 dst[3] = (t1 - t3) >> 3;
449 for (i = 0; i < 4; i++) {
450 t1 = 12 * (src[ 0] + src[32]) + 64;
451 t2 = 12 * (src[ 0] - src[32]) + 64;
452 t3 = 16 * src[16] + 6 * src[48];
453 t4 = 6 * src[16] - 16 * src[48];
460 t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
461 t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
462 t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
463 t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
465 dest[0 * linesize] = av_clip_uint8(dest[0 * linesize] + ((t5 + t1) >> 7));
466 dest[1 * linesize] = av_clip_uint8(dest[1 * linesize] + ((t6 + t2) >> 7));
467 dest[2 * linesize] = av_clip_uint8(dest[2 * linesize] + ((t7 + t3) >> 7));
468 dest[3 * linesize] = av_clip_uint8(dest[3 * linesize] + ((t8 + t4) >> 7));
469 dest[4 * linesize] = av_clip_uint8(dest[4 * linesize] + ((t8 - t4 + 1) >> 7));
470 dest[5 * linesize] = av_clip_uint8(dest[5 * linesize] + ((t7 - t3 + 1) >> 7));
471 dest[6 * linesize] = av_clip_uint8(dest[6 * linesize] + ((t6 - t2 + 1) >> 7));
472 dest[7 * linesize] = av_clip_uint8(dest[7 * linesize] + ((t5 - t1 + 1) >> 7));
479 /* Do inverse transform on 4x4 part of block */
480 static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, int linesize, int16_t *block)
485 dc = (17 * dc + 4) >> 3;
486 dc = (17 * dc + 64) >> 7;
488 for (i = 0; i < 4; i++) {
489 dest[0] = av_clip_uint8(dest[0] + dc);
490 dest[1] = av_clip_uint8(dest[1] + dc);
491 dest[2] = av_clip_uint8(dest[2] + dc);
492 dest[3] = av_clip_uint8(dest[3] + dc);
497 static void vc1_inv_trans_4x4_c(uint8_t *dest, int linesize, int16_t *block)
500 register int t1, t2, t3, t4;
505 for (i = 0; i < 4; i++) {
506 t1 = 17 * (src[0] + src[2]) + 4;
507 t2 = 17 * (src[0] - src[2]) + 4;
508 t3 = 22 * src[1] + 10 * src[3];
509 t4 = 22 * src[3] - 10 * src[1];
511 dst[0] = (t1 + t3) >> 3;
512 dst[1] = (t2 - t4) >> 3;
513 dst[2] = (t2 + t4) >> 3;
514 dst[3] = (t1 - t3) >> 3;
521 for (i = 0; i < 4; i++) {
522 t1 = 17 * (src[0] + src[16]) + 64;
523 t2 = 17 * (src[0] - src[16]) + 64;
524 t3 = 22 * src[8] + 10 * src[24];
525 t4 = 22 * src[24] - 10 * src[8];
527 dest[0 * linesize] = av_clip_uint8(dest[0 * linesize] + ((t1 + t3) >> 7));
528 dest[1 * linesize] = av_clip_uint8(dest[1 * linesize] + ((t2 - t4) >> 7));
529 dest[2 * linesize] = av_clip_uint8(dest[2 * linesize] + ((t2 + t4) >> 7));
530 dest[3 * linesize] = av_clip_uint8(dest[3 * linesize] + ((t1 - t3) >> 7));
537 /* motion compensation functions */
539 /* Filter in case of 2 filters */
540 #define VC1_MSPEL_FILTER_16B(DIR, TYPE) \
541 static av_always_inline int vc1_mspel_ ## DIR ## _filter_16bits(const TYPE *src, \
546 case 0: /* no shift - should not occur */ \
548 case 1: /* 1/4 shift */ \
549 return -4 * src[-stride] + 53 * src[0] + \
550 18 * src[stride] - 3 * src[stride * 2]; \
551 case 2: /* 1/2 shift */ \
552 return -1 * src[-stride] + 9 * src[0] + \
553 9 * src[stride] - 1 * src[stride * 2]; \
554 case 3: /* 3/4 shift */ \
555 return -3 * src[-stride] + 18 * src[0] + \
556 53 * src[stride] - 4 * src[stride * 2]; \
558 return 0; /* should not occur */ \
561 VC1_MSPEL_FILTER_16B(ver, uint8_t)
562 VC1_MSPEL_FILTER_16B(hor, int16_t)
564 /* Filter used to interpolate fractional pel values */
565 static av_always_inline int vc1_mspel_filter(const uint8_t *src, int stride,
572 return (-4 * src[-stride] + 53 * src[0] +
573 18 * src[stride] - 3 * src[stride * 2] + 32 - r) >> 6;
575 return (-1 * src[-stride] + 9 * src[0] +
576 9 * src[stride] - 1 * src[stride * 2] + 8 - r) >> 4;
578 return (-3 * src[-stride] + 18 * src[0] +
579 53 * src[stride] - 4 * src[stride * 2] + 32 - r) >> 6;
581 return 0; // should not occur
584 /* Function used to do motion compensation with bicubic interpolation */
585 #define VC1_MSPEL_MC(OP, OP4, OPNAME) \
586 static av_always_inline void OPNAME ## vc1_mspel_mc(uint8_t *dst, \
587 const uint8_t *src, \
595 if (vmode) { /* Horizontal filter to apply */ \
598 if (hmode) { /* Vertical filter to apply, output to tmp */ \
599 static const int shift_value[] = { 0, 5, 1, 5 }; \
600 int shift = (shift_value[hmode] + shift_value[vmode]) >> 1; \
601 int16_t tmp[11 * 8], *tptr = tmp; \
603 r = (1 << (shift - 1)) + rnd - 1; \
606 for (j = 0; j < 8; j++) { \
607 for (i = 0; i < 11; i++) \
608 tptr[i] = (vc1_mspel_ver_filter_16bits(src + i, stride, vmode) + r) >> shift; \
615 for (j = 0; j < 8; j++) { \
616 for (i = 0; i < 8; i++) \
617 OP(dst[i], (vc1_mspel_hor_filter_16bits(tptr + i, 1, hmode) + r) >> 7); \
623 } else { /* No horizontal filter, output 8 lines to dst */ \
626 for (j = 0; j < 8; j++) { \
627 for (i = 0; i < 8; i++) \
628 OP(dst[i], vc1_mspel_filter(src + i, stride, vmode, r)); \
636 /* Horizontal mode with no vertical mode */ \
637 for (j = 0; j < 8; j++) { \
638 for (i = 0; i < 8; i++) \
639 OP(dst[i], vc1_mspel_filter(src + i, 1, hmode, rnd)); \
644 static void OPNAME ## pixels8x8_c(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int rnd){\
647 OP4(*(uint32_t*)(block ), AV_RN32(pixels ));\
648 OP4(*(uint32_t*)(block+4), AV_RN32(pixels+4));\
654 #define op_put(a, b) a = av_clip_uint8(b)
655 #define op_avg(a, b) a = (a + av_clip_uint8(b) + 1) >> 1
656 #define op4_avg(a, b) a = rnd_avg32(a, b)
657 #define op4_put(a, b) a = b
659 VC1_MSPEL_MC(op_put, op4_put, put_)
660 VC1_MSPEL_MC(op_avg, op4_avg, avg_)
662 /* pixel functions - really are entry points to vc1_mspel_mc */
664 #define PUT_VC1_MSPEL(a, b) \
665 static void put_vc1_mspel_mc ## a ## b ## _c(uint8_t *dst, \
666 const uint8_t *src, \
667 ptrdiff_t stride, int rnd) \
669 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
671 static void avg_vc1_mspel_mc ## a ## b ## _c(uint8_t *dst, \
672 const uint8_t *src, \
673 ptrdiff_t stride, int rnd) \
675 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
697 #define chroma_mc(a) \
698 ((A * src[a] + B * src[a + 1] + \
699 C * src[stride + a] + D * src[stride + a + 1] + 32 - 4) >> 6)
700 static void put_no_rnd_vc1_chroma_mc8_c(uint8_t *dst /* align 8 */,
701 uint8_t *src /* align 1 */,
702 int stride, int h, int x, int y)
704 const int A = (8 - x) * (8 - y);
705 const int B = (x) * (8 - y);
706 const int C = (8 - x) * (y);
707 const int D = (x) * (y);
710 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
712 for (i = 0; i < h; i++) {
713 dst[0] = chroma_mc(0);
714 dst[1] = chroma_mc(1);
715 dst[2] = chroma_mc(2);
716 dst[3] = chroma_mc(3);
717 dst[4] = chroma_mc(4);
718 dst[5] = chroma_mc(5);
719 dst[6] = chroma_mc(6);
720 dst[7] = chroma_mc(7);
726 static void put_no_rnd_vc1_chroma_mc4_c(uint8_t *dst, uint8_t *src,
727 int stride, int h, int x, int y)
729 const int A = (8 - x) * (8 - y);
730 const int B = (x) * (8 - y);
731 const int C = (8 - x) * (y);
732 const int D = (x) * (y);
735 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
737 for (i = 0; i < h; i++) {
738 dst[0] = chroma_mc(0);
739 dst[1] = chroma_mc(1);
740 dst[2] = chroma_mc(2);
741 dst[3] = chroma_mc(3);
747 #define avg2(a, b) (((a) + (b) + 1) >> 1)
748 static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst /* align 8 */,
749 uint8_t *src /* align 1 */,
750 int stride, int h, int x, int y)
752 const int A = (8 - x) * (8 - y);
753 const int B = (x) * (8 - y);
754 const int C = (8 - x) * (y);
755 const int D = (x) * (y);
758 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
760 for (i = 0; i < h; i++) {
761 dst[0] = avg2(dst[0], chroma_mc(0));
762 dst[1] = avg2(dst[1], chroma_mc(1));
763 dst[2] = avg2(dst[2], chroma_mc(2));
764 dst[3] = avg2(dst[3], chroma_mc(3));
765 dst[4] = avg2(dst[4], chroma_mc(4));
766 dst[5] = avg2(dst[5], chroma_mc(5));
767 dst[6] = avg2(dst[6], chroma_mc(6));
768 dst[7] = avg2(dst[7], chroma_mc(7));
774 static void avg_no_rnd_vc1_chroma_mc4_c(uint8_t *dst /* align 8 */,
775 uint8_t *src /* align 1 */,
776 int stride, int h, int x, int y)
778 const int A = (8 - x) * (8 - y);
779 const int B = ( x) * (8 - y);
780 const int C = (8 - x) * ( y);
781 const int D = ( x) * ( y);
784 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
786 for (i = 0; i < h; i++) {
787 dst[0] = avg2(dst[0], chroma_mc(0));
788 dst[1] = avg2(dst[1], chroma_mc(1));
789 dst[2] = avg2(dst[2], chroma_mc(2));
790 dst[3] = avg2(dst[3], chroma_mc(3));
796 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
798 static void sprite_h_c(uint8_t *dst, const uint8_t *src, int offset,
799 int advance, int count)
802 int a = src[(offset >> 16)];
803 int b = src[(offset >> 16) + 1];
804 *dst++ = a + ((b - a) * (offset & 0xFFFF) >> 16);
809 static av_always_inline void sprite_v_template(uint8_t *dst,
810 const uint8_t *src1a,
811 const uint8_t *src1b,
814 const uint8_t *src2a,
815 const uint8_t *src2b,
817 int alpha, int scaled,
825 a1 = a1 + ((b1 - a1) * offset1 >> 16);
831 a2 = a2 + ((b2 - a2) * offset2 >> 16);
833 a1 = a1 + ((a2 - a1) * alpha >> 16);
839 static void sprite_v_single_c(uint8_t *dst, const uint8_t *src1a,
840 const uint8_t *src1b,
841 int offset, int width)
843 sprite_v_template(dst, src1a, src1b, offset, 0, NULL, NULL, 0, 0, 1, width);
846 static void sprite_v_double_noscale_c(uint8_t *dst, const uint8_t *src1a,
847 const uint8_t *src2a,
848 int alpha, int width)
850 sprite_v_template(dst, src1a, NULL, 0, 1, src2a, NULL, 0, alpha, 0, width);
853 static void sprite_v_double_onescale_c(uint8_t *dst,
854 const uint8_t *src1a,
855 const uint8_t *src1b,
857 const uint8_t *src2a,
858 int alpha, int width)
860 sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, NULL, 0, alpha, 1,
864 static void sprite_v_double_twoscale_c(uint8_t *dst,
865 const uint8_t *src1a,
866 const uint8_t *src1b,
868 const uint8_t *src2a,
869 const uint8_t *src2b,
874 sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, src2b, offset2,
878 #endif /* CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER */
880 av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
882 dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c;
883 dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c;
884 dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_c;
885 dsp->vc1_inv_trans_4x4 = vc1_inv_trans_4x4_c;
886 dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_c;
887 dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_c;
888 dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_c;
889 dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_c;
891 dsp->vc1_h_overlap = vc1_h_overlap_c;
892 dsp->vc1_v_overlap = vc1_v_overlap_c;
893 dsp->vc1_h_s_overlap = vc1_h_s_overlap_c;
894 dsp->vc1_v_s_overlap = vc1_v_s_overlap_c;
896 dsp->vc1_v_loop_filter4 = vc1_v_loop_filter4_c;
897 dsp->vc1_h_loop_filter4 = vc1_h_loop_filter4_c;
898 dsp->vc1_v_loop_filter8 = vc1_v_loop_filter8_c;
899 dsp->vc1_h_loop_filter8 = vc1_h_loop_filter8_c;
900 dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_c;
901 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_c;
903 dsp->put_vc1_mspel_pixels_tab[0] = put_pixels8x8_c;
904 dsp->put_vc1_mspel_pixels_tab[1] = put_vc1_mspel_mc10_c;
905 dsp->put_vc1_mspel_pixels_tab[2] = put_vc1_mspel_mc20_c;
906 dsp->put_vc1_mspel_pixels_tab[3] = put_vc1_mspel_mc30_c;
907 dsp->put_vc1_mspel_pixels_tab[4] = put_vc1_mspel_mc01_c;
908 dsp->put_vc1_mspel_pixels_tab[5] = put_vc1_mspel_mc11_c;
909 dsp->put_vc1_mspel_pixels_tab[6] = put_vc1_mspel_mc21_c;
910 dsp->put_vc1_mspel_pixels_tab[7] = put_vc1_mspel_mc31_c;
911 dsp->put_vc1_mspel_pixels_tab[8] = put_vc1_mspel_mc02_c;
912 dsp->put_vc1_mspel_pixels_tab[9] = put_vc1_mspel_mc12_c;
913 dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_c;
914 dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_c;
915 dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_c;
916 dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_c;
917 dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_c;
918 dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_c;
920 dsp->avg_vc1_mspel_pixels_tab[0] = avg_pixels8x8_c;
921 dsp->avg_vc1_mspel_pixels_tab[1] = avg_vc1_mspel_mc10_c;
922 dsp->avg_vc1_mspel_pixels_tab[2] = avg_vc1_mspel_mc20_c;
923 dsp->avg_vc1_mspel_pixels_tab[3] = avg_vc1_mspel_mc30_c;
924 dsp->avg_vc1_mspel_pixels_tab[4] = avg_vc1_mspel_mc01_c;
925 dsp->avg_vc1_mspel_pixels_tab[5] = avg_vc1_mspel_mc11_c;
926 dsp->avg_vc1_mspel_pixels_tab[6] = avg_vc1_mspel_mc21_c;
927 dsp->avg_vc1_mspel_pixels_tab[7] = avg_vc1_mspel_mc31_c;
928 dsp->avg_vc1_mspel_pixels_tab[8] = avg_vc1_mspel_mc02_c;
929 dsp->avg_vc1_mspel_pixels_tab[9] = avg_vc1_mspel_mc12_c;
930 dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_c;
931 dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_c;
932 dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_c;
933 dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_c;
934 dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_c;
935 dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_c;
937 dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_c;
938 dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_c;
939 dsp->put_no_rnd_vc1_chroma_pixels_tab[1] = put_no_rnd_vc1_chroma_mc4_c;
940 dsp->avg_no_rnd_vc1_chroma_pixels_tab[1] = avg_no_rnd_vc1_chroma_mc4_c;
942 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
943 dsp->sprite_h = sprite_h_c;
944 dsp->sprite_v_single = sprite_v_single_c;
945 dsp->sprite_v_double_noscale = sprite_v_double_noscale_c;
946 dsp->sprite_v_double_onescale = sprite_v_double_onescale_c;
947 dsp->sprite_v_double_twoscale = sprite_v_double_twoscale_c;
948 #endif /* CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER */
951 ff_vc1dsp_init_aarch64(dsp);
953 ff_vc1dsp_init_arm(dsp);
955 ff_vc1dsp_init_ppc(dsp);
957 ff_vc1dsp_init_x86(dsp);