2 * VC-1 and WMV3 decoder - DSP functions
3 * Copyright (c) 2006 Konstantin Shishkov
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * VC-1 and WMV3 decoder
27 #include "libavutil/avassert.h"
28 #include "libavutil/common.h"
29 #include "libavutil/intreadwrite.h"
30 #include "h264chroma.h"
34 #include "startcode.h"
36 /* Apply overlap transform to horizontal edge */
37 static void vc1_v_overlap_c(uint8_t *src, int stride)
43 for (i = 0; i < 8; i++) {
48 d1 = (a - d + 3 + rnd) >> 3;
49 d2 = (a - d + b - c + 4 - rnd) >> 3;
51 src[-2 * stride] = a - d1;
52 src[-stride] = av_clip_uint8(b - d2);
53 src[0] = av_clip_uint8(c + d2);
60 /* Apply overlap transform to vertical edge */
61 static void vc1_h_overlap_c(uint8_t *src, int stride)
67 for (i = 0; i < 8; i++) {
72 d1 = (a - d + 3 + rnd) >> 3;
73 d2 = (a - d + b - c + 4 - rnd) >> 3;
76 src[-1] = av_clip_uint8(b - d2);
77 src[0] = av_clip_uint8(c + d2);
84 static void vc1_v_s_overlap_c(int16_t *top, int16_t *bottom)
89 int rnd1 = 4, rnd2 = 3;
90 for (i = 0; i < 8; i++) {
98 top[48] = ((a * 8) - d1 + rnd1) >> 3;
99 top[56] = ((b * 8) - d2 + rnd2) >> 3;
100 bottom[0] = ((c * 8) + d2 + rnd1) >> 3;
101 bottom[8] = ((d * 8) + d1 + rnd2) >> 3;
110 static void vc1_h_s_overlap_c(int16_t *left, int16_t *right, int left_stride, int right_stride, int flags)
115 int rnd1 = flags & 2 ? 3 : 4;
117 for (i = 0; i < 8; i++) {
125 left[6] = ((a * 8) - d1 + rnd1) >> 3;
126 left[7] = ((b * 8) - d2 + rnd2) >> 3;
127 right[0] = ((c * 8) + d2 + rnd1) >> 3;
128 right[1] = ((d * 8) + d1 + rnd2) >> 3;
130 right += right_stride;
140 * VC-1 in-loop deblocking filter for one line
141 * @param src source block type
142 * @param stride block stride
143 * @param pq block quantizer
144 * @return whether other 3 pairs should be filtered or not
147 static av_always_inline int vc1_filter_line(uint8_t *src, int stride, int pq)
149 int a0 = (2 * (src[-2 * stride] - src[1 * stride]) -
150 5 * (src[-1 * stride] - src[0 * stride]) + 4) >> 3;
151 int a0_sign = a0 >> 31; /* Store sign */
153 a0 = (a0 ^ a0_sign) - a0_sign; /* a0 = FFABS(a0); */
155 int a1 = FFABS((2 * (src[-4 * stride] - src[-1 * stride]) -
156 5 * (src[-3 * stride] - src[-2 * stride]) + 4) >> 3);
157 int a2 = FFABS((2 * (src[ 0 * stride] - src[ 3 * stride]) -
158 5 * (src[ 1 * stride] - src[ 2 * stride]) + 4) >> 3);
159 if (a1 < a0 || a2 < a0) {
160 int clip = src[-1 * stride] - src[0 * stride];
161 int clip_sign = clip >> 31;
163 clip = ((clip ^ clip_sign) - clip_sign) >> 1;
165 int a3 = FFMIN(a1, a2);
166 int d = 5 * (a3 - a0);
167 int d_sign = (d >> 31);
169 d = ((d ^ d_sign) - d_sign) >> 3;
172 if (d_sign ^ clip_sign)
176 d = (d ^ d_sign) - d_sign; /* Restore sign */
177 src[-1 * stride] = av_clip_uint8(src[-1 * stride] - d);
178 src[ 0 * stride] = av_clip_uint8(src[ 0 * stride] + d);
188 * VC-1 in-loop deblocking filter
189 * @param src source block type
190 * @param step distance between horizontally adjacent elements
191 * @param stride distance between vertically adjacent elements
192 * @param len edge length to filter (4 or 8 pixels)
193 * @param pq block quantizer
196 static inline void vc1_loop_filter(uint8_t *src, int step, int stride,
202 for (i = 0; i < len; i += 4) {
203 filt3 = vc1_filter_line(src + 2 * step, stride, pq);
205 vc1_filter_line(src + 0 * step, stride, pq);
206 vc1_filter_line(src + 1 * step, stride, pq);
207 vc1_filter_line(src + 3 * step, stride, pq);
213 static void vc1_v_loop_filter4_c(uint8_t *src, int stride, int pq)
215 vc1_loop_filter(src, 1, stride, 4, pq);
218 static void vc1_h_loop_filter4_c(uint8_t *src, int stride, int pq)
220 vc1_loop_filter(src, stride, 1, 4, pq);
223 static void vc1_v_loop_filter8_c(uint8_t *src, int stride, int pq)
225 vc1_loop_filter(src, 1, stride, 8, pq);
228 static void vc1_h_loop_filter8_c(uint8_t *src, int stride, int pq)
230 vc1_loop_filter(src, stride, 1, 8, pq);
233 static void vc1_v_loop_filter16_c(uint8_t *src, int stride, int pq)
235 vc1_loop_filter(src, 1, stride, 16, pq);
238 static void vc1_h_loop_filter16_c(uint8_t *src, int stride, int pq)
240 vc1_loop_filter(src, stride, 1, 16, pq);
243 /* Do inverse transform on 8x8 block */
244 static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
249 dc = (3 * dc + 1) >> 1;
250 dc = (3 * dc + 16) >> 5;
252 for (i = 0; i < 8; i++) {
253 dest[0] = av_clip_uint8(dest[0] + dc);
254 dest[1] = av_clip_uint8(dest[1] + dc);
255 dest[2] = av_clip_uint8(dest[2] + dc);
256 dest[3] = av_clip_uint8(dest[3] + dc);
257 dest[4] = av_clip_uint8(dest[4] + dc);
258 dest[5] = av_clip_uint8(dest[5] + dc);
259 dest[6] = av_clip_uint8(dest[6] + dc);
260 dest[7] = av_clip_uint8(dest[7] + dc);
265 static void vc1_inv_trans_8x8_c(int16_t block[64])
268 register int t1, t2, t3, t4, t5, t6, t7, t8;
269 int16_t *src, *dst, temp[64];
273 for (i = 0; i < 8; i++) {
274 t1 = 12 * (src[ 0] + src[32]) + 4;
275 t2 = 12 * (src[ 0] - src[32]) + 4;
276 t3 = 16 * src[16] + 6 * src[48];
277 t4 = 6 * src[16] - 16 * src[48];
284 t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
285 t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
286 t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
287 t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
289 dst[0] = (t5 + t1) >> 3;
290 dst[1] = (t6 + t2) >> 3;
291 dst[2] = (t7 + t3) >> 3;
292 dst[3] = (t8 + t4) >> 3;
293 dst[4] = (t8 - t4) >> 3;
294 dst[5] = (t7 - t3) >> 3;
295 dst[6] = (t6 - t2) >> 3;
296 dst[7] = (t5 - t1) >> 3;
304 for (i = 0; i < 8; i++) {
305 t1 = 12 * (src[ 0] + src[32]) + 64;
306 t2 = 12 * (src[ 0] - src[32]) + 64;
307 t3 = 16 * src[16] + 6 * src[48];
308 t4 = 6 * src[16] - 16 * src[48];
315 t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
316 t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
317 t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
318 t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
320 dst[ 0] = (t5 + t1) >> 7;
321 dst[ 8] = (t6 + t2) >> 7;
322 dst[16] = (t7 + t3) >> 7;
323 dst[24] = (t8 + t4) >> 7;
324 dst[32] = (t8 - t4 + 1) >> 7;
325 dst[40] = (t7 - t3 + 1) >> 7;
326 dst[48] = (t6 - t2 + 1) >> 7;
327 dst[56] = (t5 - t1 + 1) >> 7;
334 /* Do inverse transform on 8x4 part of block */
335 static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
340 dc = (3 * dc + 1) >> 1;
341 dc = (17 * dc + 64) >> 7;
343 for (i = 0; i < 4; i++) {
344 dest[0] = av_clip_uint8(dest[0] + dc);
345 dest[1] = av_clip_uint8(dest[1] + dc);
346 dest[2] = av_clip_uint8(dest[2] + dc);
347 dest[3] = av_clip_uint8(dest[3] + dc);
348 dest[4] = av_clip_uint8(dest[4] + dc);
349 dest[5] = av_clip_uint8(dest[5] + dc);
350 dest[6] = av_clip_uint8(dest[6] + dc);
351 dest[7] = av_clip_uint8(dest[7] + dc);
356 static void vc1_inv_trans_8x4_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
359 register int t1, t2, t3, t4, t5, t6, t7, t8;
365 for (i = 0; i < 4; i++) {
366 t1 = 12 * (src[0] + src[4]) + 4;
367 t2 = 12 * (src[0] - src[4]) + 4;
368 t3 = 16 * src[2] + 6 * src[6];
369 t4 = 6 * src[2] - 16 * src[6];
376 t1 = 16 * src[1] + 15 * src[3] + 9 * src[5] + 4 * src[7];
377 t2 = 15 * src[1] - 4 * src[3] - 16 * src[5] - 9 * src[7];
378 t3 = 9 * src[1] - 16 * src[3] + 4 * src[5] + 15 * src[7];
379 t4 = 4 * src[1] - 9 * src[3] + 15 * src[5] - 16 * src[7];
381 dst[0] = (t5 + t1) >> 3;
382 dst[1] = (t6 + t2) >> 3;
383 dst[2] = (t7 + t3) >> 3;
384 dst[3] = (t8 + t4) >> 3;
385 dst[4] = (t8 - t4) >> 3;
386 dst[5] = (t7 - t3) >> 3;
387 dst[6] = (t6 - t2) >> 3;
388 dst[7] = (t5 - t1) >> 3;
395 for (i = 0; i < 8; i++) {
396 t1 = 17 * (src[ 0] + src[16]) + 64;
397 t2 = 17 * (src[ 0] - src[16]) + 64;
398 t3 = 22 * src[ 8] + 10 * src[24];
399 t4 = 22 * src[24] - 10 * src[ 8];
401 dest[0 * stride] = av_clip_uint8(dest[0 * stride] + ((t1 + t3) >> 7));
402 dest[1 * stride] = av_clip_uint8(dest[1 * stride] + ((t2 - t4) >> 7));
403 dest[2 * stride] = av_clip_uint8(dest[2 * stride] + ((t2 + t4) >> 7));
404 dest[3 * stride] = av_clip_uint8(dest[3 * stride] + ((t1 - t3) >> 7));
411 /* Do inverse transform on 4x8 parts of block */
412 static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
417 dc = (17 * dc + 4) >> 3;
418 dc = (12 * dc + 64) >> 7;
420 for (i = 0; i < 8; i++) {
421 dest[0] = av_clip_uint8(dest[0] + dc);
422 dest[1] = av_clip_uint8(dest[1] + dc);
423 dest[2] = av_clip_uint8(dest[2] + dc);
424 dest[3] = av_clip_uint8(dest[3] + dc);
429 static void vc1_inv_trans_4x8_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
432 register int t1, t2, t3, t4, t5, t6, t7, t8;
438 for (i = 0; i < 8; i++) {
439 t1 = 17 * (src[0] + src[2]) + 4;
440 t2 = 17 * (src[0] - src[2]) + 4;
441 t3 = 22 * src[1] + 10 * src[3];
442 t4 = 22 * src[3] - 10 * src[1];
444 dst[0] = (t1 + t3) >> 3;
445 dst[1] = (t2 - t4) >> 3;
446 dst[2] = (t2 + t4) >> 3;
447 dst[3] = (t1 - t3) >> 3;
454 for (i = 0; i < 4; i++) {
455 t1 = 12 * (src[ 0] + src[32]) + 64;
456 t2 = 12 * (src[ 0] - src[32]) + 64;
457 t3 = 16 * src[16] + 6 * src[48];
458 t4 = 6 * src[16] - 16 * src[48];
465 t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56];
466 t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56];
467 t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56];
468 t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56];
470 dest[0 * stride] = av_clip_uint8(dest[0 * stride] + ((t5 + t1) >> 7));
471 dest[1 * stride] = av_clip_uint8(dest[1 * stride] + ((t6 + t2) >> 7));
472 dest[2 * stride] = av_clip_uint8(dest[2 * stride] + ((t7 + t3) >> 7));
473 dest[3 * stride] = av_clip_uint8(dest[3 * stride] + ((t8 + t4) >> 7));
474 dest[4 * stride] = av_clip_uint8(dest[4 * stride] + ((t8 - t4 + 1) >> 7));
475 dest[5 * stride] = av_clip_uint8(dest[5 * stride] + ((t7 - t3 + 1) >> 7));
476 dest[6 * stride] = av_clip_uint8(dest[6 * stride] + ((t6 - t2 + 1) >> 7));
477 dest[7 * stride] = av_clip_uint8(dest[7 * stride] + ((t5 - t1 + 1) >> 7));
484 /* Do inverse transform on 4x4 part of block */
485 static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
490 dc = (17 * dc + 4) >> 3;
491 dc = (17 * dc + 64) >> 7;
493 for (i = 0; i < 4; i++) {
494 dest[0] = av_clip_uint8(dest[0] + dc);
495 dest[1] = av_clip_uint8(dest[1] + dc);
496 dest[2] = av_clip_uint8(dest[2] + dc);
497 dest[3] = av_clip_uint8(dest[3] + dc);
502 static void vc1_inv_trans_4x4_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
505 register int t1, t2, t3, t4;
510 for (i = 0; i < 4; i++) {
511 t1 = 17 * (src[0] + src[2]) + 4;
512 t2 = 17 * (src[0] - src[2]) + 4;
513 t3 = 22 * src[1] + 10 * src[3];
514 t4 = 22 * src[3] - 10 * src[1];
516 dst[0] = (t1 + t3) >> 3;
517 dst[1] = (t2 - t4) >> 3;
518 dst[2] = (t2 + t4) >> 3;
519 dst[3] = (t1 - t3) >> 3;
526 for (i = 0; i < 4; i++) {
527 t1 = 17 * (src[0] + src[16]) + 64;
528 t2 = 17 * (src[0] - src[16]) + 64;
529 t3 = 22 * src[8] + 10 * src[24];
530 t4 = 22 * src[24] - 10 * src[8];
532 dest[0 * stride] = av_clip_uint8(dest[0 * stride] + ((t1 + t3) >> 7));
533 dest[1 * stride] = av_clip_uint8(dest[1 * stride] + ((t2 - t4) >> 7));
534 dest[2 * stride] = av_clip_uint8(dest[2 * stride] + ((t2 + t4) >> 7));
535 dest[3 * stride] = av_clip_uint8(dest[3 * stride] + ((t1 - t3) >> 7));
542 /* motion compensation functions */
544 /* Filter in case of 2 filters */
545 #define VC1_MSPEL_FILTER_16B(DIR, TYPE) \
546 static av_always_inline int vc1_mspel_ ## DIR ## _filter_16bits(const TYPE *src, \
551 case 0: /* no shift - should not occur */ \
553 case 1: /* 1/4 shift */ \
554 return -4 * src[-stride] + 53 * src[0] + \
555 18 * src[stride] - 3 * src[stride * 2]; \
556 case 2: /* 1/2 shift */ \
557 return -1 * src[-stride] + 9 * src[0] + \
558 9 * src[stride] - 1 * src[stride * 2]; \
559 case 3: /* 3/4 shift */ \
560 return -3 * src[-stride] + 18 * src[0] + \
561 53 * src[stride] - 4 * src[stride * 2]; \
563 return 0; /* should not occur */ \
566 VC1_MSPEL_FILTER_16B(ver, uint8_t)
567 VC1_MSPEL_FILTER_16B(hor, int16_t)
569 /* Filter used to interpolate fractional pel values */
570 static av_always_inline int vc1_mspel_filter(const uint8_t *src, int stride,
577 return (-4 * src[-stride] + 53 * src[0] +
578 18 * src[stride] - 3 * src[stride * 2] + 32 - r) >> 6;
580 return (-1 * src[-stride] + 9 * src[0] +
581 9 * src[stride] - 1 * src[stride * 2] + 8 - r) >> 4;
583 return (-3 * src[-stride] + 18 * src[0] +
584 53 * src[stride] - 4 * src[stride * 2] + 32 - r) >> 6;
586 return 0; // should not occur
589 /* Function used to do motion compensation with bicubic interpolation */
590 #define VC1_MSPEL_MC(OP, OP4, OPNAME) \
591 static av_always_inline void OPNAME ## vc1_mspel_mc(uint8_t *dst, \
592 const uint8_t *src, \
600 if (vmode) { /* Horizontal filter to apply */ \
603 if (hmode) { /* Vertical filter to apply, output to tmp */ \
604 static const int shift_value[] = { 0, 5, 1, 5 }; \
605 int shift = (shift_value[hmode] + shift_value[vmode]) >> 1; \
606 int16_t tmp[11 * 8], *tptr = tmp; \
608 r = (1 << (shift - 1)) + rnd - 1; \
611 for (j = 0; j < 8; j++) { \
612 for (i = 0; i < 11; i++) \
613 tptr[i] = (vc1_mspel_ver_filter_16bits(src + i, stride, vmode) + r) >> shift; \
620 for (j = 0; j < 8; j++) { \
621 for (i = 0; i < 8; i++) \
622 OP(dst[i], (vc1_mspel_hor_filter_16bits(tptr + i, 1, hmode) + r) >> 7); \
628 } else { /* No horizontal filter, output 8 lines to dst */ \
631 for (j = 0; j < 8; j++) { \
632 for (i = 0; i < 8; i++) \
633 OP(dst[i], vc1_mspel_filter(src + i, stride, vmode, r)); \
641 /* Horizontal mode with no vertical mode */ \
642 for (j = 0; j < 8; j++) { \
643 for (i = 0; i < 8; i++) \
644 OP(dst[i], vc1_mspel_filter(src + i, 1, hmode, rnd)); \
649 static av_always_inline void OPNAME ## vc1_mspel_mc_16(uint8_t *dst, \
650 const uint8_t *src, \
658 if (vmode) { /* Horizontal filter to apply */ \
661 if (hmode) { /* Vertical filter to apply, output to tmp */ \
662 static const int shift_value[] = { 0, 5, 1, 5 }; \
663 int shift = (shift_value[hmode] + shift_value[vmode]) >> 1; \
664 int16_t tmp[19 * 16], *tptr = tmp; \
666 r = (1 << (shift - 1)) + rnd - 1; \
669 for (j = 0; j < 16; j++) { \
670 for (i = 0; i < 19; i++) \
671 tptr[i] = (vc1_mspel_ver_filter_16bits(src + i, stride, vmode) + r) >> shift; \
678 for (j = 0; j < 16; j++) { \
679 for (i = 0; i < 16; i++) \
680 OP(dst[i], (vc1_mspel_hor_filter_16bits(tptr + i, 1, hmode) + r) >> 7); \
686 } else { /* No horizontal filter, output 8 lines to dst */ \
689 for (j = 0; j < 16; j++) { \
690 for (i = 0; i < 16; i++) \
691 OP(dst[i], vc1_mspel_filter(src + i, stride, vmode, r)); \
699 /* Horizontal mode with no vertical mode */ \
700 for (j = 0; j < 16; j++) { \
701 for (i = 0; i < 16; i++) \
702 OP(dst[i], vc1_mspel_filter(src + i, 1, hmode, rnd)); \
707 static void OPNAME ## pixels8x8_c(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int rnd){\
710 OP4(*(uint32_t*)(block ), AV_RN32(pixels ));\
711 OP4(*(uint32_t*)(block+4), AV_RN32(pixels+4));\
716 static void OPNAME ## pixels16x16_c(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int rnd){\
718 for(i=0; i<16; i++){\
719 OP4(*(uint32_t*)(block ), AV_RN32(pixels ));\
720 OP4(*(uint32_t*)(block+ 4), AV_RN32(pixels+ 4));\
721 OP4(*(uint32_t*)(block+ 8), AV_RN32(pixels+ 8));\
722 OP4(*(uint32_t*)(block+12), AV_RN32(pixels+12));\
728 #define op_put(a, b) (a) = av_clip_uint8(b)
729 #define op_avg(a, b) (a) = ((a) + av_clip_uint8(b) + 1) >> 1
730 #define op4_avg(a, b) (a) = rnd_avg32(a, b)
731 #define op4_put(a, b) (a) = (b)
733 VC1_MSPEL_MC(op_put, op4_put, put_)
734 VC1_MSPEL_MC(op_avg, op4_avg, avg_)
736 /* pixel functions - really are entry points to vc1_mspel_mc */
738 #define PUT_VC1_MSPEL(a, b) \
739 static void put_vc1_mspel_mc ## a ## b ## _c(uint8_t *dst, \
740 const uint8_t *src, \
741 ptrdiff_t stride, int rnd) \
743 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
745 static void avg_vc1_mspel_mc ## a ## b ## _c(uint8_t *dst, \
746 const uint8_t *src, \
747 ptrdiff_t stride, int rnd) \
749 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
751 static void put_vc1_mspel_mc ## a ## b ## _16_c(uint8_t *dst, \
752 const uint8_t *src, \
753 ptrdiff_t stride, int rnd) \
755 put_vc1_mspel_mc_16(dst, src, stride, a, b, rnd); \
757 static void avg_vc1_mspel_mc ## a ## b ## _16_c(uint8_t *dst, \
758 const uint8_t *src, \
759 ptrdiff_t stride, int rnd) \
761 avg_vc1_mspel_mc_16(dst, src, stride, a, b, rnd); \
783 #define chroma_mc(a) \
784 ((A * src[a] + B * src[a + 1] + \
785 C * src[stride + a] + D * src[stride + a + 1] + 32 - 4) >> 6)
786 static void put_no_rnd_vc1_chroma_mc8_c(uint8_t *dst /* align 8 */,
787 uint8_t *src /* align 1 */,
788 ptrdiff_t stride, int h, int x, int y)
790 const int A = (8 - x) * (8 - y);
791 const int B = (x) * (8 - y);
792 const int C = (8 - x) * (y);
793 const int D = (x) * (y);
796 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
798 for (i = 0; i < h; i++) {
799 dst[0] = chroma_mc(0);
800 dst[1] = chroma_mc(1);
801 dst[2] = chroma_mc(2);
802 dst[3] = chroma_mc(3);
803 dst[4] = chroma_mc(4);
804 dst[5] = chroma_mc(5);
805 dst[6] = chroma_mc(6);
806 dst[7] = chroma_mc(7);
812 static void put_no_rnd_vc1_chroma_mc4_c(uint8_t *dst, uint8_t *src,
813 ptrdiff_t stride, int h, int x, int y)
815 const int A = (8 - x) * (8 - y);
816 const int B = (x) * (8 - y);
817 const int C = (8 - x) * (y);
818 const int D = (x) * (y);
821 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
823 for (i = 0; i < h; i++) {
824 dst[0] = chroma_mc(0);
825 dst[1] = chroma_mc(1);
826 dst[2] = chroma_mc(2);
827 dst[3] = chroma_mc(3);
833 #define avg2(a, b) (((a) + (b) + 1) >> 1)
834 static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst /* align 8 */,
835 uint8_t *src /* align 1 */,
836 ptrdiff_t stride, int h, int x, int y)
838 const int A = (8 - x) * (8 - y);
839 const int B = (x) * (8 - y);
840 const int C = (8 - x) * (y);
841 const int D = (x) * (y);
844 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
846 for (i = 0; i < h; i++) {
847 dst[0] = avg2(dst[0], chroma_mc(0));
848 dst[1] = avg2(dst[1], chroma_mc(1));
849 dst[2] = avg2(dst[2], chroma_mc(2));
850 dst[3] = avg2(dst[3], chroma_mc(3));
851 dst[4] = avg2(dst[4], chroma_mc(4));
852 dst[5] = avg2(dst[5], chroma_mc(5));
853 dst[6] = avg2(dst[6], chroma_mc(6));
854 dst[7] = avg2(dst[7], chroma_mc(7));
860 static void avg_no_rnd_vc1_chroma_mc4_c(uint8_t *dst /* align 8 */,
861 uint8_t *src /* align 1 */,
862 ptrdiff_t stride, int h, int x, int y)
864 const int A = (8 - x) * (8 - y);
865 const int B = ( x) * (8 - y);
866 const int C = (8 - x) * ( y);
867 const int D = ( x) * ( y);
870 av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
872 for (i = 0; i < h; i++) {
873 dst[0] = avg2(dst[0], chroma_mc(0));
874 dst[1] = avg2(dst[1], chroma_mc(1));
875 dst[2] = avg2(dst[2], chroma_mc(2));
876 dst[3] = avg2(dst[3], chroma_mc(3));
882 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
884 static void sprite_h_c(uint8_t *dst, const uint8_t *src, int offset,
885 int advance, int count)
888 int a = src[(offset >> 16)];
889 int b = src[(offset >> 16) + 1];
890 *dst++ = a + ((b - a) * (offset & 0xFFFF) >> 16);
895 static av_always_inline void sprite_v_template(uint8_t *dst,
896 const uint8_t *src1a,
897 const uint8_t *src1b,
900 const uint8_t *src2a,
901 const uint8_t *src2b,
903 int alpha, int scaled,
911 a1 = a1 + ((b1 - a1) * offset1 >> 16);
917 a2 = a2 + ((b2 - a2) * offset2 >> 16);
919 a1 = a1 + ((a2 - a1) * alpha >> 16);
925 static void sprite_v_single_c(uint8_t *dst, const uint8_t *src1a,
926 const uint8_t *src1b,
927 int offset, int width)
929 sprite_v_template(dst, src1a, src1b, offset, 0, NULL, NULL, 0, 0, 1, width);
932 static void sprite_v_double_noscale_c(uint8_t *dst, const uint8_t *src1a,
933 const uint8_t *src2a,
934 int alpha, int width)
936 sprite_v_template(dst, src1a, NULL, 0, 1, src2a, NULL, 0, alpha, 0, width);
939 static void sprite_v_double_onescale_c(uint8_t *dst,
940 const uint8_t *src1a,
941 const uint8_t *src1b,
943 const uint8_t *src2a,
944 int alpha, int width)
946 sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, NULL, 0, alpha, 1,
950 static void sprite_v_double_twoscale_c(uint8_t *dst,
951 const uint8_t *src1a,
952 const uint8_t *src1b,
954 const uint8_t *src2a,
955 const uint8_t *src2b,
960 sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, src2b, offset2,
964 #endif /* CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER */
965 #define FN_ASSIGN(X, Y) \
966 dsp->put_vc1_mspel_pixels_tab[1][X+4*Y] = put_vc1_mspel_mc##X##Y##_c; \
967 dsp->put_vc1_mspel_pixels_tab[0][X+4*Y] = put_vc1_mspel_mc##X##Y##_16_c; \
968 dsp->avg_vc1_mspel_pixels_tab[1][X+4*Y] = avg_vc1_mspel_mc##X##Y##_c; \
969 dsp->avg_vc1_mspel_pixels_tab[0][X+4*Y] = avg_vc1_mspel_mc##X##Y##_16_c
971 av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
973 dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c;
974 dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c;
975 dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_c;
976 dsp->vc1_inv_trans_4x4 = vc1_inv_trans_4x4_c;
977 dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_c;
978 dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_c;
979 dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_c;
980 dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_c;
982 dsp->vc1_h_overlap = vc1_h_overlap_c;
983 dsp->vc1_v_overlap = vc1_v_overlap_c;
984 dsp->vc1_h_s_overlap = vc1_h_s_overlap_c;
985 dsp->vc1_v_s_overlap = vc1_v_s_overlap_c;
987 dsp->vc1_v_loop_filter4 = vc1_v_loop_filter4_c;
988 dsp->vc1_h_loop_filter4 = vc1_h_loop_filter4_c;
989 dsp->vc1_v_loop_filter8 = vc1_v_loop_filter8_c;
990 dsp->vc1_h_loop_filter8 = vc1_h_loop_filter8_c;
991 dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_c;
992 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_c;
994 dsp->put_vc1_mspel_pixels_tab[0][0] = put_pixels16x16_c;
995 dsp->avg_vc1_mspel_pixels_tab[0][0] = avg_pixels16x16_c;
996 dsp->put_vc1_mspel_pixels_tab[1][0] = put_pixels8x8_c;
997 dsp->avg_vc1_mspel_pixels_tab[1][0] = avg_pixels8x8_c;
1017 dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_c;
1018 dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_c;
1019 dsp->put_no_rnd_vc1_chroma_pixels_tab[1] = put_no_rnd_vc1_chroma_mc4_c;
1020 dsp->avg_no_rnd_vc1_chroma_pixels_tab[1] = avg_no_rnd_vc1_chroma_mc4_c;
1022 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
1023 dsp->sprite_h = sprite_h_c;
1024 dsp->sprite_v_single = sprite_v_single_c;
1025 dsp->sprite_v_double_noscale = sprite_v_double_noscale_c;
1026 dsp->sprite_v_double_onescale = sprite_v_double_onescale_c;
1027 dsp->sprite_v_double_twoscale = sprite_v_double_twoscale_c;
1028 #endif /* CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER */
1030 dsp->startcode_find_candidate = ff_startcode_find_candidate_c;
1033 ff_vc1dsp_init_aarch64(dsp);
1035 ff_vc1dsp_init_arm(dsp);
1037 ff_vc1dsp_init_ppc(dsp);
1039 ff_vc1dsp_init_x86(dsp);
1041 ff_vc1dsp_init_mips(dsp);