2 * VC-1 and WMV3 decoder - DSP functions
3 * Copyright (c) 2006 Konstantin Shishkov
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * VC-1 and WMV3 decoder
28 #include "libavutil/avassert.h"
29 #include "libavutil/common.h"
30 #include "libavutil/intreadwrite.h"
31 #include "h264chroma.h"
35 /* Apply overlap transform to horizontal edge */
36 static void vc1_v_overlap_c(uint8_t *src, int stride)
42 for (i = 0; i < 8; i++) {
47 d1 = (a - d + 3 + rnd) >> 3;
48 d2 = (a - d + b - c + 4 - rnd) >> 3;
50 src[-2 * stride] = a - d1;
51 src[-stride] = av_clip_uint8(b - d2);
52 src[0] = av_clip_uint8(c + d2);
59 /* Apply overlap transform to vertical edge */
60 static void vc1_h_overlap_c(uint8_t *src, int stride)
66 for (i = 0; i < 8; i++) {
71 d1 = (a - d + 3 + rnd) >> 3;
72 d2 = (a - d + b - c + 4 - rnd) >> 3;
75 src[-1] = av_clip_uint8(b - d2);
76 src[0] = av_clip_uint8(c + d2);
83 static void vc1_v_s_overlap_c(int16_t *top, int16_t *bottom)
88 int rnd1 = 4, rnd2 = 3;
89 for (i = 0; i < 8; i++) {
97 top[48] = ((a << 3) - d1 + rnd1) >> 3;
98 top[56] = ((b << 3) - d2 + rnd2) >> 3;
99 bottom[0] = ((c << 3) + d2 + rnd1) >> 3;
100 bottom[8] = ((d << 3) + d1 + rnd2) >> 3;
109 static void vc1_h_s_overlap_c(int16_t *left, int16_t *right)
114 int rnd1 = 4, rnd2 = 3;
115 for (i = 0; i < 8; i++) {
123 left[6] = ((a << 3) - d1 + rnd1) >> 3;
124 left[7] = ((b << 3) - d2 + rnd2) >> 3;
125 right[0] = ((c << 3) + d2 + rnd1) >> 3;
126 right[1] = ((d << 3) + d1 + rnd2) >> 3;
136 * VC-1 in-loop deblocking filter for one line
137 * @param src source block type
138 * @param stride block stride
139 * @param pq block quantizer
140 * @return whether other 3 pairs should be filtered or not
143 static av_always_inline int vc1_filter_line(uint8_t *src, int stride, int pq)
145 int a0 = (2 * (src[-2 * stride] - src[1 * stride]) -
146 5 * (src[-1 * stride] - src[0 * stride]) + 4) >> 3;
147 int a0_sign = a0 >> 31; /* Store sign */
149 a0 = (a0 ^ a0_sign) - a0_sign; /* a0 = FFABS(a0); */
151 int a1 = FFABS((2 * (src[-4 * stride] - src[-1 * stride]) -
152 5 * (src[-3 * stride] - src[-2 * stride]) + 4) >> 3);
153 int a2 = FFABS((2 * (src[ 0 * stride] - src[ 3 * stride]) -
154 5 * (src[ 1 * stride] - src[ 2 * stride]) + 4) >> 3);
155 if (a1 < a0 || a2 < a0) {
156 int clip = src[-1 * stride] - src[0 * stride];
157 int clip_sign = clip >> 31;
159 clip = ((clip ^ clip_sign) - clip_sign) >> 1;
161 int a3 = FFMIN(a1, a2);
162 int d = 5 * (a3 - a0);
163 int d_sign = (d >> 31);
165 d = ((d ^ d_sign) - d_sign) >> 3;
168 if (d_sign ^ clip_sign)
172 d = (d ^ d_sign) - d_sign; /* Restore sign */
173 src[-1 * stride] = av_clip_uint8(src[-1 * stride] - d);
174 src[ 0 * stride] = av_clip_uint8(src[ 0 * stride] + d);
184 * VC-1 in-loop deblocking filter
185 * @param src source block type
186 * @param step distance between horizontally adjacent elements
187 * @param stride distance between vertically adjacent elements
188 * @param len edge length to filter (4 or 8 pixels)
189 * @param pq block quantizer
192 static inline void vc1_loop_filter(uint8_t *src, int step, int stride,
198 for (i = 0; i < len; i += 4) {
199 filt3 = vc1_filter_line(src + 2 * step, stride, pq);
201 vc1_filter_line(src + 0 * step, stride, pq);
202 vc1_filter_line(src + 1 * step, stride, pq);
203 vc1_filter_line(src + 3 * step, stride, pq);
209 static void vc1_v_loop_filter4_c(uint8_t *src, int stride, int pq)
211 vc1_loop_filter(src, 1, stride, 4, pq);
214 static void vc1_h_loop_filter4_c(uint8_t *src, int stride, int pq)
216 vc1_loop_filter(src, stride, 1, 4, pq);
219 static void vc1_v_loop_filter8_c(uint8_t *src, int stride, int pq)
221 vc1_loop_filter(src, 1, stride, 8, pq);
224 static void vc1_h_loop_filter8_c(uint8_t *src, int stride, int pq)
226 vc1_loop_filter(src, stride, 1, 8, pq);
229 static void vc1_v_loop_filter16_c(uint8_t *src, int stride, int pq)
231 vc1_loop_filter(src, 1, stride, 16, pq);
234 static void vc1_h_loop_filter16_c(uint8_t *src, int stride, int pq)
236 vc1_loop_filter(src, stride, 1, 16, pq);
239 /* Do inverse transform on 8x8 block */
240 static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, int linesize, int16_t *block)
245 dc = (3 * dc + 1) >> 1;
246 dc = (3 * dc + 16) >> 5;
248 for (i = 0; i < 8; i++) {
249 dest[0] = av_clip_uint8(dest[0] + dc);
250 dest[1] = av_clip_uint8(dest[1] + dc);
251 dest[2] = av_clip_uint8(dest[2] + dc);
252 dest[3] = av_clip_uint8(dest[3] + dc);
253 dest[4] = av_clip_uint8(dest[4] + dc);
254 dest[5] = av_clip_uint8(dest[5] + dc);
255 dest[6] = av_clip_uint8(dest[6] + dc);
256 dest[7] = av_clip_uint8(dest[7] + dc);
261 static void vc1_inv_trans_8x8_c(int16_t block[64])
264 register int t1, t2, t3, t4, t5, t6, t7, t8;
265 int16_t *src, *dst, temp[64];
269 for (i = 0; i < 8; i++) {
270 t1 = 12 * (src[ 0] + src[32]) + 4;
271 t2 = 12 * (src[ 0] - src[32]) + 4;
272 t3 = 16 * src[16] + 6 * src[48];
273 t4 = 6 * src[16] - 16 * src[48];
280 t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
281 t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
282 t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
283 t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
285 dst[0] = (t5 + t1) >> 3;
286 dst[1] = (t6 + t2) >> 3;
287 dst[2] = (t7 + t3) >> 3;
288 dst[3] = (t8 + t4) >> 3;
289 dst[4] = (t8 - t4) >> 3;
290 dst[5] = (t7 - t3) >> 3;
291 dst[6] = (t6 - t2) >> 3;
292 dst[7] = (t5 - t1) >> 3;
300 for (i = 0; i < 8; i++) {
301 t1 = 12 * (src[ 0] + src[32]) + 64;
302 t2 = 12 * (src[ 0] - src[32]) + 64;
303 t3 = 16 * src[16] + 6 * src[48];
304 t4 = 6 * src[16] - 16 * src[48];
311 t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
312 t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
313 t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
314 t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
316 dst[ 0] = (t5 + t1) >> 7;
317 dst[ 8] = (t6 + t2) >> 7;
318 dst[16] = (t7 + t3) >> 7;
319 dst[24] = (t8 + t4) >> 7;
320 dst[32] = (t8 - t4 + 1) >> 7;
321 dst[40] = (t7 - t3 + 1) >> 7;
322 dst[48] = (t6 - t2 + 1) >> 7;
323 dst[56] = (t5 - t1 + 1) >> 7;
330 /* Do inverse transform on 8x4 part of block */
331 static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, int linesize, int16_t *block)
336 dc = (3 * dc + 1) >> 1;
337 dc = (17 * dc + 64) >> 7;
339 for (i = 0; i < 4; i++) {
340 dest[0] = av_clip_uint8(dest[0] + dc);
341 dest[1] = av_clip_uint8(dest[1] + dc);
342 dest[2] = av_clip_uint8(dest[2] + dc);
343 dest[3] = av_clip_uint8(dest[3] + dc);
344 dest[4] = av_clip_uint8(dest[4] + dc);
345 dest[5] = av_clip_uint8(dest[5] + dc);
346 dest[6] = av_clip_uint8(dest[6] + dc);
347 dest[7] = av_clip_uint8(dest[7] + dc);
352 static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, int16_t *block)
355 register int t1, t2, t3, t4, t5, t6, t7, t8;
361 for (i = 0; i < 4; i++) {
362 t1 = 12 * (src[0] + src[4]) + 4;
363 t2 = 12 * (src[0] - src[4]) + 4;
364 t3 = 16 * src[2] + 6 * src[6];
365 t4 = 6 * src[2] - 16 * src[6];
372 t1 = 16 * src[1] + 15 * src[3] + 9 * src[5] + 4 * src[7];
373 t2 = 15 * src[1] - 4 * src[3] - 16 * src[5] - 9 * src[7];
374 t3 = 9 * src[1] - 16 * src[3] + 4 * src[5] + 15 * src[7];
375 t4 = 4 * src[1] - 9 * src[3] + 15 * src[5] - 16 * src[7];
377 dst[0] = (t5 + t1) >> 3;
378 dst[1] = (t6 + t2) >> 3;
379 dst[2] = (t7 + t3) >> 3;
380 dst[3] = (t8 + t4) >> 3;
381 dst[4] = (t8 - t4) >> 3;
382 dst[5] = (t7 - t3) >> 3;
383 dst[6] = (t6 - t2) >> 3;
384 dst[7] = (t5 - t1) >> 3;
391 for (i = 0; i < 8; i++) {
392 t1 = 17 * (src[ 0] + src[16]) + 64;
393 t2 = 17 * (src[ 0] - src[16]) + 64;
394 t3 = 22 * src[ 8] + 10 * src[24];
395 t4 = 22 * src[24] - 10 * src[ 8];
397 dest[0 * linesize] = av_clip_uint8(dest[0 * linesize] + ((t1 + t3) >> 7));
398 dest[1 * linesize] = av_clip_uint8(dest[1 * linesize] + ((t2 - t4) >> 7));
399 dest[2 * linesize] = av_clip_uint8(dest[2 * linesize] + ((t2 + t4) >> 7));
400 dest[3 * linesize] = av_clip_uint8(dest[3 * linesize] + ((t1 - t3) >> 7));
407 /* Do inverse transform on 4x8 parts of block */
408 static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, int linesize, int16_t *block)
413 dc = (17 * dc + 4) >> 3;
414 dc = (12 * dc + 64) >> 7;
416 for (i = 0; i < 8; i++) {
417 dest[0] = av_clip_uint8(dest[0] + dc);
418 dest[1] = av_clip_uint8(dest[1] + dc);
419 dest[2] = av_clip_uint8(dest[2] + dc);
420 dest[3] = av_clip_uint8(dest[3] + dc);
425 static void vc1_inv_trans_4x8_c(uint8_t *dest, int linesize, int16_t *block)
428 register int t1, t2, t3, t4, t5, t6, t7, t8;
434 for (i = 0; i < 8; i++) {
435 t1 = 17 * (src[0] + src[2]) + 4;
436 t2 = 17 * (src[0] - src[2]) + 4;
437 t3 = 22 * src[1] + 10 * src[3];
438 t4 = 22 * src[3] - 10 * src[1];
440 dst[0] = (t1 + t3) >> 3;
441 dst[1] = (t2 - t4) >> 3;
442 dst[2] = (t2 + t4) >> 3;
443 dst[3] = (t1 - t3) >> 3;
450 for (i = 0; i < 4; i++) {
451 t1 = 12 * (src[ 0] + src[32]) + 64;
452 t2 = 12 * (src[ 0] - src[32]) + 64;
453 t3 = 16 * src[16] + 6 * src[48];
454 t4 = 6 * src[16] - 16 * src[48];
461 t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
462 t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
463 t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
464 t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
466 dest[0 * linesize] = av_clip_uint8(dest[0 * linesize] + ((t5 + t1) >> 7));
467 dest[1 * linesize] = av_clip_uint8(dest[1 * linesize] + ((t6 + t2) >> 7));
468 dest[2 * linesize] = av_clip_uint8(dest[2 * linesize] + ((t7 + t3) >> 7));
469 dest[3 * linesize] = av_clip_uint8(dest[3 * linesize] + ((t8 + t4) >> 7));
470 dest[4 * linesize] = av_clip_uint8(dest[4 * linesize] + ((t8 - t4 + 1) >> 7));
471 dest[5 * linesize] = av_clip_uint8(dest[5 * linesize] + ((t7 - t3 + 1) >> 7));
472 dest[6 * linesize] = av_clip_uint8(dest[6 * linesize] + ((t6 - t2 + 1) >> 7));
473 dest[7 * linesize] = av_clip_uint8(dest[7 * linesize] + ((t5 - t1 + 1) >> 7));
480 /* Do inverse transform on 4x4 part of block */
481 static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, int linesize, int16_t *block)
486 dc = (17 * dc + 4) >> 3;
487 dc = (17 * dc + 64) >> 7;
489 for (i = 0; i < 4; i++) {
490 dest[0] = av_clip_uint8(dest[0] + dc);
491 dest[1] = av_clip_uint8(dest[1] + dc);
492 dest[2] = av_clip_uint8(dest[2] + dc);
493 dest[3] = av_clip_uint8(dest[3] + dc);
498 static void vc1_inv_trans_4x4_c(uint8_t *dest, int linesize, int16_t *block)
501 register int t1, t2, t3, t4;
506 for (i = 0; i < 4; i++) {
507 t1 = 17 * (src[0] + src[2]) + 4;
508 t2 = 17 * (src[0] - src[2]) + 4;
509 t3 = 22 * src[1] + 10 * src[3];
510 t4 = 22 * src[3] - 10 * src[1];
512 dst[0] = (t1 + t3) >> 3;
513 dst[1] = (t2 - t4) >> 3;
514 dst[2] = (t2 + t4) >> 3;
515 dst[3] = (t1 - t3) >> 3;
522 for (i = 0; i < 4; i++) {
523 t1 = 17 * (src[0] + src[16]) + 64;
524 t2 = 17 * (src[0] - src[16]) + 64;
525 t3 = 22 * src[8] + 10 * src[24];
526 t4 = 22 * src[24] - 10 * src[8];
528 dest[0 * linesize] = av_clip_uint8(dest[0 * linesize] + ((t1 + t3) >> 7));
529 dest[1 * linesize] = av_clip_uint8(dest[1 * linesize] + ((t2 - t4) >> 7));
530 dest[2 * linesize] = av_clip_uint8(dest[2 * linesize] + ((t2 + t4) >> 7));
531 dest[3 * linesize] = av_clip_uint8(dest[3 * linesize] + ((t1 - t3) >> 7));
538 /* motion compensation functions */
540 /* Filter in case of 2 filters */
541 #define VC1_MSPEL_FILTER_16B(DIR, TYPE) \
542 static av_always_inline int vc1_mspel_ ## DIR ## _filter_16bits(const TYPE *src, \
547 case 0: /* no shift - should not occur */ \
549 case 1: /* 1/4 shift */ \
550 return -4 * src[-stride] + 53 * src[0] + \
551 18 * src[stride] - 3 * src[stride * 2]; \
552 case 2: /* 1/2 shift */ \
553 return -1 * src[-stride] + 9 * src[0] + \
554 9 * src[stride] - 1 * src[stride * 2]; \
555 case 3: /* 3/4 shift */ \
556 return -3 * src[-stride] + 18 * src[0] + \
557 53 * src[stride] - 4 * src[stride * 2]; \
559 return 0; /* should not occur */ \
562 VC1_MSPEL_FILTER_16B(ver, uint8_t)
563 VC1_MSPEL_FILTER_16B(hor, int16_t)
565 /* Filter used to interpolate fractional pel values */
566 static av_always_inline int vc1_mspel_filter(const uint8_t *src, int stride,
573 return (-4 * src[-stride] + 53 * src[0] +
574 18 * src[stride] - 3 * src[stride * 2] + 32 - r) >> 6;
576 return (-1 * src[-stride] + 9 * src[0] +
577 9 * src[stride] - 1 * src[stride * 2] + 8 - r) >> 4;
579 return (-3 * src[-stride] + 18 * src[0] +
580 53 * src[stride] - 4 * src[stride * 2] + 32 - r) >> 6;
582 return 0; // should not occur
585 /* Function used to do motion compensation with bicubic interpolation */
586 #define VC1_MSPEL_MC(OP, OP4, OPNAME) \
587 static av_always_inline void OPNAME ## vc1_mspel_mc(uint8_t *dst, \
588 const uint8_t *src, \
596 if (vmode) { /* Horizontal filter to apply */ \
599 if (hmode) { /* Vertical filter to apply, output to tmp */ \
600 static const int shift_value[] = { 0, 5, 1, 5 }; \
601 int shift = (shift_value[hmode] + shift_value[vmode]) >> 1; \
602 int16_t tmp[11 * 8], *tptr = tmp; \
604 r = (1 << (shift - 1)) + rnd - 1; \
607 for (j = 0; j < 8; j++) { \
608 for (i = 0; i < 11; i++) \
609 tptr[i] = (vc1_mspel_ver_filter_16bits(src + i, stride, vmode) + r) >> shift; \
616 for (j = 0; j < 8; j++) { \
617 for (i = 0; i < 8; i++) \
618 OP(dst[i], (vc1_mspel_hor_filter_16bits(tptr + i, 1, hmode) + r) >> 7); \
624 } else { /* No horizontal filter, output 8 lines to dst */ \
627 for (j = 0; j < 8; j++) { \
628 for (i = 0; i < 8; i++) \
629 OP(dst[i], vc1_mspel_filter(src + i, stride, vmode, r)); \
637 /* Horizontal mode with no vertical mode */ \
638 for (j = 0; j < 8; j++) { \
639 for (i = 0; i < 8; i++) \
640 OP(dst[i], vc1_mspel_filter(src + i, 1, hmode, rnd)); \
645 static void OPNAME ## pixels8x8_c(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int rnd){\
648 OP4(*(uint32_t*)(block ), AV_RN32(pixels ));\
649 OP4(*(uint32_t*)(block+4), AV_RN32(pixels+4));\
655 #define op_put(a, b) a = av_clip_uint8(b)
656 #define op_avg(a, b) a = (a + av_clip_uint8(b) + 1) >> 1
657 #define op4_avg(a, b) a = rnd_avg32(a, b)
658 #define op4_put(a, b) a = b
660 VC1_MSPEL_MC(op_put, op4_put, put_)
661 VC1_MSPEL_MC(op_avg, op4_avg, avg_)
663 /* pixel functions - really are entry points to vc1_mspel_mc */
665 #define PUT_VC1_MSPEL(a, b) \
666 static void put_vc1_mspel_mc ## a ## b ## _c(uint8_t *dst, \
667 const uint8_t *src, \
668 ptrdiff_t stride, int rnd) \
670 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
672 static void avg_vc1_mspel_mc ## a ## b ## _c(uint8_t *dst, \
673 const uint8_t *src, \
674 ptrdiff_t stride, int rnd) \
676 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
698 #define chroma_mc(a) \
699 ((A * src[a] + B * src[a + 1] + \
700 C * src[stride + a] + D * src[stride + a + 1] + 32 - 4) >> 6)
701 static void put_no_rnd_vc1_chroma_mc8_c(uint8_t *dst /* align 8 */,
702 uint8_t *src /* align 1 */,
703 int stride, int h, int x, int y)
705 const int A = (8 - x) * (8 - y);
706 const int B = (x) * (8 - y);
707 const int C = (8 - x) * (y);
708 const int D = (x) * (y);
711 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
713 for (i = 0; i < h; i++) {
714 dst[0] = chroma_mc(0);
715 dst[1] = chroma_mc(1);
716 dst[2] = chroma_mc(2);
717 dst[3] = chroma_mc(3);
718 dst[4] = chroma_mc(4);
719 dst[5] = chroma_mc(5);
720 dst[6] = chroma_mc(6);
721 dst[7] = chroma_mc(7);
727 static void put_no_rnd_vc1_chroma_mc4_c(uint8_t *dst, uint8_t *src,
728 int stride, int h, int x, int y)
730 const int A = (8 - x) * (8 - y);
731 const int B = (x) * (8 - y);
732 const int C = (8 - x) * (y);
733 const int D = (x) * (y);
736 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
738 for (i = 0; i < h; i++) {
739 dst[0] = chroma_mc(0);
740 dst[1] = chroma_mc(1);
741 dst[2] = chroma_mc(2);
742 dst[3] = chroma_mc(3);
748 #define avg2(a, b) (((a) + (b) + 1) >> 1)
749 static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst /* align 8 */,
750 uint8_t *src /* align 1 */,
751 int stride, int h, int x, int y)
753 const int A = (8 - x) * (8 - y);
754 const int B = (x) * (8 - y);
755 const int C = (8 - x) * (y);
756 const int D = (x) * (y);
759 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
761 for (i = 0; i < h; i++) {
762 dst[0] = avg2(dst[0], chroma_mc(0));
763 dst[1] = avg2(dst[1], chroma_mc(1));
764 dst[2] = avg2(dst[2], chroma_mc(2));
765 dst[3] = avg2(dst[3], chroma_mc(3));
766 dst[4] = avg2(dst[4], chroma_mc(4));
767 dst[5] = avg2(dst[5], chroma_mc(5));
768 dst[6] = avg2(dst[6], chroma_mc(6));
769 dst[7] = avg2(dst[7], chroma_mc(7));
775 static void avg_no_rnd_vc1_chroma_mc4_c(uint8_t *dst /* align 8 */,
776 uint8_t *src /* align 1 */,
777 int stride, int h, int x, int y)
779 const int A = (8 - x) * (8 - y);
780 const int B = ( x) * (8 - y);
781 const int C = (8 - x) * ( y);
782 const int D = ( x) * ( y);
785 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
787 for (i = 0; i < h; i++) {
788 dst[0] = avg2(dst[0], chroma_mc(0));
789 dst[1] = avg2(dst[1], chroma_mc(1));
790 dst[2] = avg2(dst[2], chroma_mc(2));
791 dst[3] = avg2(dst[3], chroma_mc(3));
797 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
799 static void sprite_h_c(uint8_t *dst, const uint8_t *src, int offset,
800 int advance, int count)
803 int a = src[(offset >> 16)];
804 int b = src[(offset >> 16) + 1];
805 *dst++ = a + ((b - a) * (offset & 0xFFFF) >> 16);
810 static av_always_inline void sprite_v_template(uint8_t *dst,
811 const uint8_t *src1a,
812 const uint8_t *src1b,
815 const uint8_t *src2a,
816 const uint8_t *src2b,
818 int alpha, int scaled,
826 a1 = a1 + ((b1 - a1) * offset1 >> 16);
832 a2 = a2 + ((b2 - a2) * offset2 >> 16);
834 a1 = a1 + ((a2 - a1) * alpha >> 16);
840 static void sprite_v_single_c(uint8_t *dst, const uint8_t *src1a,
841 const uint8_t *src1b,
842 int offset, int width)
844 sprite_v_template(dst, src1a, src1b, offset, 0, NULL, NULL, 0, 0, 1, width);
847 static void sprite_v_double_noscale_c(uint8_t *dst, const uint8_t *src1a,
848 const uint8_t *src2a,
849 int alpha, int width)
851 sprite_v_template(dst, src1a, NULL, 0, 1, src2a, NULL, 0, alpha, 0, width);
854 static void sprite_v_double_onescale_c(uint8_t *dst,
855 const uint8_t *src1a,
856 const uint8_t *src1b,
858 const uint8_t *src2a,
859 int alpha, int width)
861 sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, NULL, 0, alpha, 1,
865 static void sprite_v_double_twoscale_c(uint8_t *dst,
866 const uint8_t *src1a,
867 const uint8_t *src1b,
869 const uint8_t *src2a,
870 const uint8_t *src2b,
875 sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, src2b, offset2,
879 #endif /* CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER */
881 av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
883 dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c;
884 dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c;
885 dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_c;
886 dsp->vc1_inv_trans_4x4 = vc1_inv_trans_4x4_c;
887 dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_c;
888 dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_c;
889 dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_c;
890 dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_c;
892 dsp->vc1_h_overlap = vc1_h_overlap_c;
893 dsp->vc1_v_overlap = vc1_v_overlap_c;
894 dsp->vc1_h_s_overlap = vc1_h_s_overlap_c;
895 dsp->vc1_v_s_overlap = vc1_v_s_overlap_c;
897 dsp->vc1_v_loop_filter4 = vc1_v_loop_filter4_c;
898 dsp->vc1_h_loop_filter4 = vc1_h_loop_filter4_c;
899 dsp->vc1_v_loop_filter8 = vc1_v_loop_filter8_c;
900 dsp->vc1_h_loop_filter8 = vc1_h_loop_filter8_c;
901 dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_c;
902 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_c;
904 dsp->put_vc1_mspel_pixels_tab[0] = put_pixels8x8_c;
905 dsp->put_vc1_mspel_pixels_tab[1] = put_vc1_mspel_mc10_c;
906 dsp->put_vc1_mspel_pixels_tab[2] = put_vc1_mspel_mc20_c;
907 dsp->put_vc1_mspel_pixels_tab[3] = put_vc1_mspel_mc30_c;
908 dsp->put_vc1_mspel_pixels_tab[4] = put_vc1_mspel_mc01_c;
909 dsp->put_vc1_mspel_pixels_tab[5] = put_vc1_mspel_mc11_c;
910 dsp->put_vc1_mspel_pixels_tab[6] = put_vc1_mspel_mc21_c;
911 dsp->put_vc1_mspel_pixels_tab[7] = put_vc1_mspel_mc31_c;
912 dsp->put_vc1_mspel_pixels_tab[8] = put_vc1_mspel_mc02_c;
913 dsp->put_vc1_mspel_pixels_tab[9] = put_vc1_mspel_mc12_c;
914 dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_c;
915 dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_c;
916 dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_c;
917 dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_c;
918 dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_c;
919 dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_c;
921 dsp->avg_vc1_mspel_pixels_tab[0] = avg_pixels8x8_c;
922 dsp->avg_vc1_mspel_pixels_tab[1] = avg_vc1_mspel_mc10_c;
923 dsp->avg_vc1_mspel_pixels_tab[2] = avg_vc1_mspel_mc20_c;
924 dsp->avg_vc1_mspel_pixels_tab[3] = avg_vc1_mspel_mc30_c;
925 dsp->avg_vc1_mspel_pixels_tab[4] = avg_vc1_mspel_mc01_c;
926 dsp->avg_vc1_mspel_pixels_tab[5] = avg_vc1_mspel_mc11_c;
927 dsp->avg_vc1_mspel_pixels_tab[6] = avg_vc1_mspel_mc21_c;
928 dsp->avg_vc1_mspel_pixels_tab[7] = avg_vc1_mspel_mc31_c;
929 dsp->avg_vc1_mspel_pixels_tab[8] = avg_vc1_mspel_mc02_c;
930 dsp->avg_vc1_mspel_pixels_tab[9] = avg_vc1_mspel_mc12_c;
931 dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_c;
932 dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_c;
933 dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_c;
934 dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_c;
935 dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_c;
936 dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_c;
938 dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_c;
939 dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_c;
940 dsp->put_no_rnd_vc1_chroma_pixels_tab[1] = put_no_rnd_vc1_chroma_mc4_c;
941 dsp->avg_no_rnd_vc1_chroma_pixels_tab[1] = avg_no_rnd_vc1_chroma_mc4_c;
943 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
944 dsp->sprite_h = sprite_h_c;
945 dsp->sprite_v_single = sprite_v_single_c;
946 dsp->sprite_v_double_noscale = sprite_v_double_noscale_c;
947 dsp->sprite_v_double_onescale = sprite_v_double_onescale_c;
948 dsp->sprite_v_double_twoscale = sprite_v_double_twoscale_c;
949 #endif /* CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER */
952 ff_vc1dsp_init_aarch64(dsp);
954 ff_vc1dsp_init_arm(dsp);
956 ff_vc1dsp_init_ppc(dsp);
958 ff_vc1dsp_init_x86(dsp);