2 * VP9 compatible video decoder
4 * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5 * Copyright (C) 2013 Clément Bœsch <u pkh me>
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "libavutil/common.h"
25 #include "bit_depth_template.c"
28 // FIXME see whether we can merge parts of this (perhaps at least 4x4 and 8x8)
29 // back with h264pred.[ch]
31 static void vert_4x4_c(uint8_t *_dst, ptrdiff_t stride,
32 const uint8_t *left, const uint8_t *_top)
34 pixel *dst = (pixel *) _dst;
35 const pixel *top = (const pixel *) _top;
36 pixel4 p4 = AV_RN4PA(top);
38 stride /= sizeof(pixel);
39 AV_WN4PA(dst + stride * 0, p4);
40 AV_WN4PA(dst + stride * 1, p4);
41 AV_WN4PA(dst + stride * 2, p4);
42 AV_WN4PA(dst + stride * 3, p4);
45 static void vert_8x8_c(uint8_t *_dst, ptrdiff_t stride,
46 const uint8_t *left, const uint8_t *_top)
48 pixel *dst = (pixel *) _dst;
49 const pixel *top = (const pixel *) _top;
50 pixel4 p4a = AV_RN4PA(top + 0);
51 pixel4 p4b = AV_RN4PA(top + 4);
54 stride /= sizeof(pixel);
55 for (y = 0; y < 8; y++) {
56 AV_WN4PA(dst + 0, p4a);
57 AV_WN4PA(dst + 4, p4b);
62 static void vert_16x16_c(uint8_t *_dst, ptrdiff_t stride,
63 const uint8_t *left, const uint8_t *_top)
65 pixel *dst = (pixel *) _dst;
66 const pixel *top = (const pixel *) _top;
67 pixel4 p4a = AV_RN4PA(top + 0);
68 pixel4 p4b = AV_RN4PA(top + 4);
69 pixel4 p4c = AV_RN4PA(top + 8);
70 pixel4 p4d = AV_RN4PA(top + 12);
73 stride /= sizeof(pixel);
74 for (y = 0; y < 16; y++) {
75 AV_WN4PA(dst + 0, p4a);
76 AV_WN4PA(dst + 4, p4b);
77 AV_WN4PA(dst + 8, p4c);
78 AV_WN4PA(dst + 12, p4d);
83 static void vert_32x32_c(uint8_t *_dst, ptrdiff_t stride,
84 const uint8_t *left, const uint8_t *_top)
86 pixel *dst = (pixel *) _dst;
87 const pixel *top = (const pixel *) _top;
88 pixel4 p4a = AV_RN4PA(top + 0);
89 pixel4 p4b = AV_RN4PA(top + 4);
90 pixel4 p4c = AV_RN4PA(top + 8);
91 pixel4 p4d = AV_RN4PA(top + 12);
92 pixel4 p4e = AV_RN4PA(top + 16);
93 pixel4 p4f = AV_RN4PA(top + 20);
94 pixel4 p4g = AV_RN4PA(top + 24);
95 pixel4 p4h = AV_RN4PA(top + 28);
98 stride /= sizeof(pixel);
99 for (y = 0; y < 32; y++) {
100 AV_WN4PA(dst + 0, p4a);
101 AV_WN4PA(dst + 4, p4b);
102 AV_WN4PA(dst + 8, p4c);
103 AV_WN4PA(dst + 12, p4d);
104 AV_WN4PA(dst + 16, p4e);
105 AV_WN4PA(dst + 20, p4f);
106 AV_WN4PA(dst + 24, p4g);
107 AV_WN4PA(dst + 28, p4h);
112 static void hor_4x4_c(uint8_t *_dst, ptrdiff_t stride,
113 const uint8_t *_left, const uint8_t *top)
115 pixel *dst = (pixel *) _dst;
116 const pixel *left = (const pixel *) _left;
118 stride /= sizeof(pixel);
119 AV_WN4PA(dst + stride * 0, PIXEL_SPLAT_X4(left[3]));
120 AV_WN4PA(dst + stride * 1, PIXEL_SPLAT_X4(left[2]));
121 AV_WN4PA(dst + stride * 2, PIXEL_SPLAT_X4(left[1]));
122 AV_WN4PA(dst + stride * 3, PIXEL_SPLAT_X4(left[0]));
125 static void hor_8x8_c(uint8_t *_dst, ptrdiff_t stride,
126 const uint8_t *_left, const uint8_t *top)
128 pixel *dst = (pixel *) _dst;
129 const pixel *left = (const pixel *) _left;
132 stride /= sizeof(pixel);
133 for (y = 0; y < 8; y++) {
134 pixel4 p4 = PIXEL_SPLAT_X4(left[7 - y]);
136 AV_WN4PA(dst + 0, p4);
137 AV_WN4PA(dst + 4, p4);
142 static void hor_16x16_c(uint8_t *_dst, ptrdiff_t stride,
143 const uint8_t *_left, const uint8_t *top)
145 pixel *dst = (pixel *) _dst;
146 const pixel *left = (const pixel *) _left;
149 stride /= sizeof(pixel);
150 for (y = 0; y < 16; y++) {
151 pixel4 p4 = PIXEL_SPLAT_X4(left[15 - y]);
153 AV_WN4PA(dst + 0, p4);
154 AV_WN4PA(dst + 4, p4);
155 AV_WN4PA(dst + 8, p4);
156 AV_WN4PA(dst + 12, p4);
161 static void hor_32x32_c(uint8_t *_dst, ptrdiff_t stride,
162 const uint8_t *_left, const uint8_t *top)
164 pixel *dst = (pixel *) _dst;
165 const pixel *left = (const pixel *) _left;
168 stride /= sizeof(pixel);
169 for (y = 0; y < 32; y++) {
170 pixel4 p4 = PIXEL_SPLAT_X4(left[31 - y]);
172 AV_WN4PA(dst + 0, p4);
173 AV_WN4PA(dst + 4, p4);
174 AV_WN4PA(dst + 8, p4);
175 AV_WN4PA(dst + 12, p4);
176 AV_WN4PA(dst + 16, p4);
177 AV_WN4PA(dst + 20, p4);
178 AV_WN4PA(dst + 24, p4);
179 AV_WN4PA(dst + 28, p4);
184 static void tm_4x4_c(uint8_t *_dst, ptrdiff_t stride,
185 const uint8_t *_left, const uint8_t *_top)
187 pixel *dst = (pixel *) _dst;
188 const pixel *left = (const pixel *) _left;
189 const pixel *top = (const pixel *) _top;
192 stride /= sizeof(pixel);
193 for (y = 0; y < 4; y++) {
194 int l_m_tl = left[3 - y] - tl;
196 dst[0] = av_clip_pixel(top[0] + l_m_tl);
197 dst[1] = av_clip_pixel(top[1] + l_m_tl);
198 dst[2] = av_clip_pixel(top[2] + l_m_tl);
199 dst[3] = av_clip_pixel(top[3] + l_m_tl);
204 static void tm_8x8_c(uint8_t *_dst, ptrdiff_t stride,
205 const uint8_t *_left, const uint8_t *_top)
207 pixel *dst = (pixel *) _dst;
208 const pixel *left = (const pixel *) _left;
209 const pixel *top = (const pixel *) _top;
212 stride /= sizeof(pixel);
213 for (y = 0; y < 8; y++) {
214 int l_m_tl = left[7 - y] - tl;
216 dst[0] = av_clip_pixel(top[0] + l_m_tl);
217 dst[1] = av_clip_pixel(top[1] + l_m_tl);
218 dst[2] = av_clip_pixel(top[2] + l_m_tl);
219 dst[3] = av_clip_pixel(top[3] + l_m_tl);
220 dst[4] = av_clip_pixel(top[4] + l_m_tl);
221 dst[5] = av_clip_pixel(top[5] + l_m_tl);
222 dst[6] = av_clip_pixel(top[6] + l_m_tl);
223 dst[7] = av_clip_pixel(top[7] + l_m_tl);
228 static void tm_16x16_c(uint8_t *_dst, ptrdiff_t stride,
229 const uint8_t *_left, const uint8_t *_top)
231 pixel *dst = (pixel *) _dst;
232 const pixel *left = (const pixel *) _left;
233 const pixel *top = (const pixel *) _top;
236 stride /= sizeof(pixel);
237 for (y = 0; y < 16; y++) {
238 int l_m_tl = left[15 - y] - tl;
240 dst[ 0] = av_clip_pixel(top[ 0] + l_m_tl);
241 dst[ 1] = av_clip_pixel(top[ 1] + l_m_tl);
242 dst[ 2] = av_clip_pixel(top[ 2] + l_m_tl);
243 dst[ 3] = av_clip_pixel(top[ 3] + l_m_tl);
244 dst[ 4] = av_clip_pixel(top[ 4] + l_m_tl);
245 dst[ 5] = av_clip_pixel(top[ 5] + l_m_tl);
246 dst[ 6] = av_clip_pixel(top[ 6] + l_m_tl);
247 dst[ 7] = av_clip_pixel(top[ 7] + l_m_tl);
248 dst[ 8] = av_clip_pixel(top[ 8] + l_m_tl);
249 dst[ 9] = av_clip_pixel(top[ 9] + l_m_tl);
250 dst[10] = av_clip_pixel(top[10] + l_m_tl);
251 dst[11] = av_clip_pixel(top[11] + l_m_tl);
252 dst[12] = av_clip_pixel(top[12] + l_m_tl);
253 dst[13] = av_clip_pixel(top[13] + l_m_tl);
254 dst[14] = av_clip_pixel(top[14] + l_m_tl);
255 dst[15] = av_clip_pixel(top[15] + l_m_tl);
260 static void tm_32x32_c(uint8_t *_dst, ptrdiff_t stride,
261 const uint8_t *_left, const uint8_t *_top)
263 pixel *dst = (pixel *) _dst;
264 const pixel *left = (const pixel *) _left;
265 const pixel *top = (const pixel *) _top;
268 stride /= sizeof(pixel);
269 for (y = 0; y < 32; y++) {
270 int l_m_tl = left[31 - y] - tl;
272 dst[ 0] = av_clip_pixel(top[ 0] + l_m_tl);
273 dst[ 1] = av_clip_pixel(top[ 1] + l_m_tl);
274 dst[ 2] = av_clip_pixel(top[ 2] + l_m_tl);
275 dst[ 3] = av_clip_pixel(top[ 3] + l_m_tl);
276 dst[ 4] = av_clip_pixel(top[ 4] + l_m_tl);
277 dst[ 5] = av_clip_pixel(top[ 5] + l_m_tl);
278 dst[ 6] = av_clip_pixel(top[ 6] + l_m_tl);
279 dst[ 7] = av_clip_pixel(top[ 7] + l_m_tl);
280 dst[ 8] = av_clip_pixel(top[ 8] + l_m_tl);
281 dst[ 9] = av_clip_pixel(top[ 9] + l_m_tl);
282 dst[10] = av_clip_pixel(top[10] + l_m_tl);
283 dst[11] = av_clip_pixel(top[11] + l_m_tl);
284 dst[12] = av_clip_pixel(top[12] + l_m_tl);
285 dst[13] = av_clip_pixel(top[13] + l_m_tl);
286 dst[14] = av_clip_pixel(top[14] + l_m_tl);
287 dst[15] = av_clip_pixel(top[15] + l_m_tl);
288 dst[16] = av_clip_pixel(top[16] + l_m_tl);
289 dst[17] = av_clip_pixel(top[17] + l_m_tl);
290 dst[18] = av_clip_pixel(top[18] + l_m_tl);
291 dst[19] = av_clip_pixel(top[19] + l_m_tl);
292 dst[20] = av_clip_pixel(top[20] + l_m_tl);
293 dst[21] = av_clip_pixel(top[21] + l_m_tl);
294 dst[22] = av_clip_pixel(top[22] + l_m_tl);
295 dst[23] = av_clip_pixel(top[23] + l_m_tl);
296 dst[24] = av_clip_pixel(top[24] + l_m_tl);
297 dst[25] = av_clip_pixel(top[25] + l_m_tl);
298 dst[26] = av_clip_pixel(top[26] + l_m_tl);
299 dst[27] = av_clip_pixel(top[27] + l_m_tl);
300 dst[28] = av_clip_pixel(top[28] + l_m_tl);
301 dst[29] = av_clip_pixel(top[29] + l_m_tl);
302 dst[30] = av_clip_pixel(top[30] + l_m_tl);
303 dst[31] = av_clip_pixel(top[31] + l_m_tl);
308 static void dc_4x4_c(uint8_t *_dst, ptrdiff_t stride,
309 const uint8_t *_left, const uint8_t *_top)
311 pixel *dst = (pixel *) _dst;
312 const pixel *left = (const pixel *) _left;
313 const pixel *top = (const pixel *) _top;
314 pixel4 dc = PIXEL_SPLAT_X4((left[0] + left[1] + left[2] + left[3] +
315 top[0] + top[1] + top[2] + top[3] + 4) >> 3);
317 stride /= sizeof(pixel);
318 AV_WN4PA(dst + stride * 0, dc);
319 AV_WN4PA(dst + stride * 1, dc);
320 AV_WN4PA(dst + stride * 2, dc);
321 AV_WN4PA(dst + stride * 3, dc);
324 static void dc_8x8_c(uint8_t *_dst, ptrdiff_t stride,
325 const uint8_t *_left, const uint8_t *_top)
327 pixel *dst = (pixel *) _dst;
328 const pixel *left = (const pixel *) _left;
329 const pixel *top = (const pixel *) _top;
330 pixel4 dc = PIXEL_SPLAT_X4
331 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] +
332 left[6] + left[7] + top[0] + top[1] + top[2] + top[3] +
333 top[4] + top[5] + top[6] + top[7] + 8) >> 4);
336 stride /= sizeof(pixel);
337 for (y = 0; y < 8; y++) {
338 AV_WN4PA(dst + 0, dc);
339 AV_WN4PA(dst + 4, dc);
344 static void dc_16x16_c(uint8_t *_dst, ptrdiff_t stride,
345 const uint8_t *_left, const uint8_t *_top)
347 pixel *dst = (pixel *) _dst;
348 const pixel *left = (const pixel *) _left;
349 const pixel *top = (const pixel *) _top;
350 pixel4 dc = PIXEL_SPLAT_X4
351 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] + left[6] +
352 left[7] + left[8] + left[9] + left[10] + left[11] + left[12] +
353 left[13] + left[14] + left[15] + top[0] + top[1] + top[2] + top[3] +
354 top[4] + top[5] + top[6] + top[7] + top[8] + top[9] + top[10] +
355 top[11] + top[12] + top[13] + top[14] + top[15] + 16) >> 5);
358 stride /= sizeof(pixel);
359 for (y = 0; y < 16; y++) {
360 AV_WN4PA(dst + 0, dc);
361 AV_WN4PA(dst + 4, dc);
362 AV_WN4PA(dst + 8, dc);
363 AV_WN4PA(dst + 12, dc);
368 static void dc_32x32_c(uint8_t *_dst, ptrdiff_t stride,
369 const uint8_t *_left, const uint8_t *_top)
371 pixel *dst = (pixel *) _dst;
372 const pixel *left = (const pixel *) _left;
373 const pixel *top = (const pixel *) _top;
374 pixel4 dc = PIXEL_SPLAT_X4
375 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] + left[6] +
376 left[7] + left[8] + left[9] + left[10] + left[11] + left[12] +
377 left[13] + left[14] + left[15] + left[16] + left[17] + left[18] +
378 left[19] + left[20] + left[21] + left[22] + left[23] + left[24] +
379 left[25] + left[26] + left[27] + left[28] + left[29] + left[30] +
380 left[31] + top[0] + top[1] + top[2] + top[3] + top[4] + top[5] +
381 top[6] + top[7] + top[8] + top[9] + top[10] + top[11] + top[12] +
382 top[13] + top[14] + top[15] + top[16] + top[17] + top[18] + top[19] +
383 top[20] + top[21] + top[22] + top[23] + top[24] + top[25] + top[26] +
384 top[27] + top[28] + top[29] + top[30] + top[31] + 32) >> 6);
387 stride /= sizeof(pixel);
388 for (y = 0; y < 32; y++) {
389 AV_WN4PA(dst + 0, dc);
390 AV_WN4PA(dst + 4, dc);
391 AV_WN4PA(dst + 8, dc);
392 AV_WN4PA(dst + 12, dc);
393 AV_WN4PA(dst + 16, dc);
394 AV_WN4PA(dst + 20, dc);
395 AV_WN4PA(dst + 24, dc);
396 AV_WN4PA(dst + 28, dc);
401 static void dc_left_4x4_c(uint8_t *_dst, ptrdiff_t stride,
402 const uint8_t *_left, const uint8_t *top)
404 pixel *dst = (pixel *) _dst;
405 const pixel *left = (const pixel *) _left;
406 pixel4 dc = PIXEL_SPLAT_X4((left[0] + left[1] + left[2] + left[3] + 2) >> 2);
408 stride /= sizeof(pixel);
409 AV_WN4PA(dst + stride * 0, dc);
410 AV_WN4PA(dst + stride * 1, dc);
411 AV_WN4PA(dst + stride * 2, dc);
412 AV_WN4PA(dst + stride * 3, dc);
415 static void dc_left_8x8_c(uint8_t *_dst, ptrdiff_t stride,
416 const uint8_t *_left, const uint8_t *top)
418 pixel *dst = (pixel *) _dst;
419 const pixel *left = (const pixel *) _left;
420 pixel4 dc = PIXEL_SPLAT_X4
421 ((left[0] + left[1] + left[2] + left[3] +
422 left[4] + left[5] + left[6] + left[7] + 4) >> 3);
425 stride /= sizeof(pixel);
426 for (y = 0; y < 8; y++) {
427 AV_WN4PA(dst + 0, dc);
428 AV_WN4PA(dst + 4, dc);
433 static void dc_left_16x16_c(uint8_t *_dst, ptrdiff_t stride,
434 const uint8_t *_left, const uint8_t *top)
436 pixel *dst = (pixel *) _dst;
437 const pixel *left = (const pixel *) _left;
438 pixel4 dc = PIXEL_SPLAT_X4
439 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] +
440 left[6] + left[7] + left[8] + left[9] + left[10] + left[11] +
441 left[12] + left[13] + left[14] + left[15] + 8) >> 4);
444 stride /= sizeof(pixel);
445 for (y = 0; y < 16; y++) {
446 AV_WN4PA(dst + 0, dc);
447 AV_WN4PA(dst + 4, dc);
448 AV_WN4PA(dst + 8, dc);
449 AV_WN4PA(dst + 12, dc);
454 static void dc_left_32x32_c(uint8_t *_dst, ptrdiff_t stride,
455 const uint8_t *_left, const uint8_t *top)
457 pixel *dst = (pixel *) _dst;
458 const pixel *left = (const pixel *) _left;
459 pixel4 dc = PIXEL_SPLAT_X4
460 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] +
461 left[6] + left[7] + left[8] + left[9] + left[10] + left[11] +
462 left[12] + left[13] + left[14] + left[15] + left[16] + left[17] +
463 left[18] + left[19] + left[20] + left[21] + left[22] + left[23] +
464 left[24] + left[25] + left[26] + left[27] + left[28] + left[29] +
465 left[30] + left[31] + 16) >> 5);
468 stride /= sizeof(pixel);
469 for (y = 0; y < 32; y++) {
470 AV_WN4PA(dst + 0, dc);
471 AV_WN4PA(dst + 4, dc);
472 AV_WN4PA(dst + 8, dc);
473 AV_WN4PA(dst + 12, dc);
474 AV_WN4PA(dst + 16, dc);
475 AV_WN4PA(dst + 20, dc);
476 AV_WN4PA(dst + 24, dc);
477 AV_WN4PA(dst + 28, dc);
482 static void dc_top_4x4_c(uint8_t *_dst, ptrdiff_t stride,
483 const uint8_t *left, const uint8_t *_top)
485 pixel *dst = (pixel *) _dst;
486 const pixel *top = (const pixel *) _top;
487 pixel4 dc = PIXEL_SPLAT_X4((top[0] + top[1] + top[2] + top[3] + 2) >> 2);
489 stride /= sizeof(pixel);
490 AV_WN4PA(dst + stride * 0, dc);
491 AV_WN4PA(dst + stride * 1, dc);
492 AV_WN4PA(dst + stride * 2, dc);
493 AV_WN4PA(dst + stride * 3, dc);
496 static void dc_top_8x8_c(uint8_t *_dst, ptrdiff_t stride,
497 const uint8_t *left, const uint8_t *_top)
499 pixel *dst = (pixel *) _dst;
500 const pixel *top = (const pixel *) _top;
501 pixel4 dc = PIXEL_SPLAT_X4
502 ((top[0] + top[1] + top[2] + top[3] +
503 top[4] + top[5] + top[6] + top[7] + 4) >> 3);
506 stride /= sizeof(pixel);
507 for (y = 0; y < 8; y++) {
508 AV_WN4PA(dst + 0, dc);
509 AV_WN4PA(dst + 4, dc);
514 static void dc_top_16x16_c(uint8_t *_dst, ptrdiff_t stride,
515 const uint8_t *left, const uint8_t *_top)
517 pixel *dst = (pixel *) _dst;
518 const pixel *top = (const pixel *) _top;
519 pixel4 dc = PIXEL_SPLAT_X4
520 ((top[0] + top[1] + top[2] + top[3] + top[4] + top[5] +
521 top[6] + top[7] + top[8] + top[9] + top[10] + top[11] +
522 top[12] + top[13] + top[14] + top[15] + 8) >> 4);
525 stride /= sizeof(pixel);
526 for (y = 0; y < 16; y++) {
527 AV_WN4PA(dst + 0, dc);
528 AV_WN4PA(dst + 4, dc);
529 AV_WN4PA(dst + 8, dc);
530 AV_WN4PA(dst + 12, dc);
535 static void dc_top_32x32_c(uint8_t *_dst, ptrdiff_t stride,
536 const uint8_t *left, const uint8_t *_top)
538 pixel *dst = (pixel *) _dst;
539 const pixel *top = (const pixel *) _top;
540 pixel4 dc = PIXEL_SPLAT_X4
541 ((top[0] + top[1] + top[2] + top[3] + top[4] + top[5] +
542 top[6] + top[7] + top[8] + top[9] + top[10] + top[11] +
543 top[12] + top[13] + top[14] + top[15] + top[16] + top[17] +
544 top[18] + top[19] + top[20] + top[21] + top[22] + top[23] +
545 top[24] + top[25] + top[26] + top[27] + top[28] + top[29] +
546 top[30] + top[31] + 16) >> 5);
549 stride /= sizeof(pixel);
550 for (y = 0; y < 32; y++) {
551 AV_WN4PA(dst + 0, dc);
552 AV_WN4PA(dst + 4, dc);
553 AV_WN4PA(dst + 8, dc);
554 AV_WN4PA(dst + 12, dc);
555 AV_WN4PA(dst + 16, dc);
556 AV_WN4PA(dst + 20, dc);
557 AV_WN4PA(dst + 24, dc);
558 AV_WN4PA(dst + 28, dc);
563 static void dc_128_4x4_c(uint8_t *_dst, ptrdiff_t stride,
564 const uint8_t *left, const uint8_t *top)
566 pixel *dst = (pixel *) _dst;
567 pixel4 val = PIXEL_SPLAT_X4(128 << (BIT_DEPTH - 8));
569 stride /= sizeof(pixel);
570 AV_WN4PA(dst + stride * 0, val);
571 AV_WN4PA(dst + stride * 1, val);
572 AV_WN4PA(dst + stride * 2, val);
573 AV_WN4PA(dst + stride * 3, val);
576 static void dc_128_8x8_c(uint8_t *_dst, ptrdiff_t stride,
577 const uint8_t *left, const uint8_t *top)
579 pixel *dst = (pixel *) _dst;
580 pixel4 val = PIXEL_SPLAT_X4(128 << (BIT_DEPTH - 8));
583 stride /= sizeof(pixel);
584 for (y = 0; y < 8; y++) {
585 AV_WN4PA(dst + 0, val);
586 AV_WN4PA(dst + 4, val);
591 static void dc_128_16x16_c(uint8_t *_dst, ptrdiff_t stride,
592 const uint8_t *left, const uint8_t *top)
594 pixel *dst = (pixel *) _dst;
595 pixel4 val = PIXEL_SPLAT_X4(128 << (BIT_DEPTH - 8));
598 stride /= sizeof(pixel);
599 for (y = 0; y < 16; y++) {
600 AV_WN4PA(dst + 0, val);
601 AV_WN4PA(dst + 4, val);
602 AV_WN4PA(dst + 8, val);
603 AV_WN4PA(dst + 12, val);
608 static void dc_128_32x32_c(uint8_t *_dst, ptrdiff_t stride,
609 const uint8_t *left, const uint8_t *top)
611 pixel *dst = (pixel *) _dst;
612 pixel4 val = PIXEL_SPLAT_X4(128 << (BIT_DEPTH - 8));
615 stride /= sizeof(pixel);
616 for (y = 0; y < 32; y++) {
617 AV_WN4PA(dst + 0, val);
618 AV_WN4PA(dst + 4, val);
619 AV_WN4PA(dst + 8, val);
620 AV_WN4PA(dst + 12, val);
621 AV_WN4PA(dst + 16, val);
622 AV_WN4PA(dst + 20, val);
623 AV_WN4PA(dst + 24, val);
624 AV_WN4PA(dst + 28, val);
629 static void dc_127_4x4_c(uint8_t *_dst, ptrdiff_t stride,
630 const uint8_t *left, const uint8_t *top)
632 pixel *dst = (pixel *) _dst;
633 pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) - 1);
635 stride /= sizeof(pixel);
636 AV_WN4PA(dst + stride * 0, val);
637 AV_WN4PA(dst + stride * 1, val);
638 AV_WN4PA(dst + stride * 2, val);
639 AV_WN4PA(dst + stride * 3, val);}
641 static void dc_127_8x8_c(uint8_t *_dst, ptrdiff_t stride,
642 const uint8_t *left, const uint8_t *top)
644 pixel *dst = (pixel *) _dst;
645 pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) - 1);
648 stride /= sizeof(pixel);
649 for (y = 0; y < 8; y++) {
650 AV_WN4PA(dst + 0, val);
651 AV_WN4PA(dst + 4, val);
656 static void dc_127_16x16_c(uint8_t *_dst, ptrdiff_t stride,
657 const uint8_t *left, const uint8_t *top)
659 pixel *dst = (pixel *) _dst;
660 pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) - 1);
663 stride /= sizeof(pixel);
664 for (y = 0; y < 16; y++) {
665 AV_WN4PA(dst + 0, val);
666 AV_WN4PA(dst + 4, val);
667 AV_WN4PA(dst + 8, val);
668 AV_WN4PA(dst + 12, val);
673 static void dc_127_32x32_c(uint8_t *_dst, ptrdiff_t stride,
674 const uint8_t *left, const uint8_t *top)
676 pixel *dst = (pixel *) _dst;
677 pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) - 1);
680 stride /= sizeof(pixel);
681 for (y = 0; y < 32; y++) {
682 AV_WN4PA(dst + 0, val);
683 AV_WN4PA(dst + 4, val);
684 AV_WN4PA(dst + 8, val);
685 AV_WN4PA(dst + 12, val);
686 AV_WN4PA(dst + 16, val);
687 AV_WN4PA(dst + 20, val);
688 AV_WN4PA(dst + 24, val);
689 AV_WN4PA(dst + 28, val);
694 static void dc_129_4x4_c(uint8_t *_dst, ptrdiff_t stride,
695 const uint8_t *left, const uint8_t *top)
697 pixel *dst = (pixel *) _dst;
698 pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) + 1);
700 stride /= sizeof(pixel);
701 AV_WN4PA(dst + stride * 0, val);
702 AV_WN4PA(dst + stride * 1, val);
703 AV_WN4PA(dst + stride * 2, val);
704 AV_WN4PA(dst + stride * 3, val);
707 static void dc_129_8x8_c(uint8_t *_dst, ptrdiff_t stride,
708 const uint8_t *left, const uint8_t *top)
710 pixel *dst = (pixel *) _dst;
711 pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) + 1);
714 stride /= sizeof(pixel);
715 for (y = 0; y < 8; y++) {
716 AV_WN4PA(dst + 0, val);
717 AV_WN4PA(dst + 4, val);
722 static void dc_129_16x16_c(uint8_t *_dst, ptrdiff_t stride,
723 const uint8_t *left, const uint8_t *top)
725 pixel *dst = (pixel *) _dst;
726 pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) + 1);
729 stride /= sizeof(pixel);
730 for (y = 0; y < 16; y++) {
731 AV_WN4PA(dst + 0, val);
732 AV_WN4PA(dst + 4, val);
733 AV_WN4PA(dst + 8, val);
734 AV_WN4PA(dst + 12, val);
739 static void dc_129_32x32_c(uint8_t *_dst, ptrdiff_t stride,
740 const uint8_t *left, const uint8_t *top)
742 pixel *dst = (pixel *) _dst;
743 pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) + 1);
746 stride /= sizeof(pixel);
747 for (y = 0; y < 32; y++) {
748 AV_WN4PA(dst + 0, val);
749 AV_WN4PA(dst + 4, val);
750 AV_WN4PA(dst + 8, val);
751 AV_WN4PA(dst + 12, val);
752 AV_WN4PA(dst + 16, val);
753 AV_WN4PA(dst + 20, val);
754 AV_WN4PA(dst + 24, val);
755 AV_WN4PA(dst + 28, val);
761 #define memset_bpc memset
763 static inline void memset_bpc(uint16_t *dst, int val, int len) {
765 for (n = 0; n < len; n++) {
771 #define DST(x, y) dst[(x) + (y) * stride]
773 static void diag_downleft_4x4_c(uint8_t *_dst, ptrdiff_t stride,
774 const uint8_t *left, const uint8_t *_top)
776 pixel *dst = (pixel *) _dst;
777 const pixel *top = (const pixel *) _top;
778 int a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3],
779 a4 = top[4], a5 = top[5], a6 = top[6], a7 = top[7];
781 stride /= sizeof(pixel);
782 DST(0,0) = (a0 + a1 * 2 + a2 + 2) >> 2;
783 DST(1,0) = DST(0,1) = (a1 + a2 * 2 + a3 + 2) >> 2;
784 DST(2,0) = DST(1,1) = DST(0,2) = (a2 + a3 * 2 + a4 + 2) >> 2;
785 DST(3,0) = DST(2,1) = DST(1,2) = DST(0,3) = (a3 + a4 * 2 + a5 + 2) >> 2;
786 DST(3,1) = DST(2,2) = DST(1,3) = (a4 + a5 * 2 + a6 + 2) >> 2;
787 DST(3,2) = DST(2,3) = (a5 + a6 * 2 + a7 + 2) >> 2;
788 DST(3,3) = a7; // note: this is different from vp8 and such
791 #define def_diag_downleft(size) \
792 static void diag_downleft_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \
793 const uint8_t *left, const uint8_t *_top) \
795 pixel *dst = (pixel *) _dst; \
796 const pixel *top = (const pixel *) _top; \
800 stride /= sizeof(pixel); \
801 for (i = 0; i < size - 2; i++) \
802 v[i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \
803 v[size - 2] = (top[size - 2] + top[size - 1] * 3 + 2) >> 2; \
805 for (j = 0; j < size; j++) { \
806 memcpy(dst + j*stride, v + j, (size - 1 - j) * sizeof(pixel)); \
807 memset_bpc(dst + j*stride + size - 1 - j, top[size - 1], j + 1); \
812 def_diag_downleft(16)
813 def_diag_downleft(32)
815 static void diag_downright_4x4_c(uint8_t *_dst, ptrdiff_t stride,
816 const uint8_t *_left, const uint8_t *_top)
818 pixel *dst = (pixel *) _dst;
819 const pixel *top = (const pixel *) _top;
820 const pixel *left = (const pixel *) _left;
821 int tl = top[-1], a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3],
822 l0 = left[3], l1 = left[2], l2 = left[1], l3 = left[0];
824 stride /= sizeof(pixel);
825 DST(0,3) = (l1 + l2 * 2 + l3 + 2) >> 2;
826 DST(0,2) = DST(1,3) = (l0 + l1 * 2 + l2 + 2) >> 2;
827 DST(0,1) = DST(1,2) = DST(2,3) = (tl + l0 * 2 + l1 + 2) >> 2;
828 DST(0,0) = DST(1,1) = DST(2,2) = DST(3,3) = (l0 + tl * 2 + a0 + 2) >> 2;
829 DST(1,0) = DST(2,1) = DST(3,2) = (tl + a0 * 2 + a1 + 2) >> 2;
830 DST(2,0) = DST(3,1) = (a0 + a1 * 2 + a2 + 2) >> 2;
831 DST(3,0) = (a1 + a2 * 2 + a3 + 2) >> 2;
834 #define def_diag_downright(size) \
835 static void diag_downright_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \
836 const uint8_t *_left, const uint8_t *_top) \
838 pixel *dst = (pixel *) _dst; \
839 const pixel *top = (const pixel *) _top; \
840 const pixel *left = (const pixel *) _left; \
842 pixel v[size + size - 1]; \
844 stride /= sizeof(pixel); \
845 for (i = 0; i < size - 2; i++) { \
846 v[i ] = (left[i] + left[i + 1] * 2 + left[i + 2] + 2) >> 2; \
847 v[size + 1 + i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \
849 v[size - 2] = (left[size - 2] + left[size - 1] * 2 + top[-1] + 2) >> 2; \
850 v[size - 1] = (left[size - 1] + top[-1] * 2 + top[ 0] + 2) >> 2; \
851 v[size ] = (top[-1] + top[0] * 2 + top[ 1] + 2) >> 2; \
853 for (j = 0; j < size; j++) \
854 memcpy(dst + j*stride, v + size - 1 - j, size * sizeof(pixel)); \
857 def_diag_downright(8)
858 def_diag_downright(16)
859 def_diag_downright(32)
861 static void vert_right_4x4_c(uint8_t *_dst, ptrdiff_t stride,
862 const uint8_t *_left, const uint8_t *_top)
864 pixel *dst = (pixel *) _dst;
865 const pixel *top = (const pixel *) _top;
866 const pixel *left = (const pixel *) _left;
867 int tl = top[-1], a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3],
868 l0 = left[3], l1 = left[2], l2 = left[1];
870 stride /= sizeof(pixel);
871 DST(0,3) = (l0 + l1 * 2 + l2 + 2) >> 2;
872 DST(0,2) = (tl + l0 * 2 + l1 + 2) >> 2;
873 DST(0,0) = DST(1,2) = (tl + a0 + 1) >> 1;
874 DST(0,1) = DST(1,3) = (l0 + tl * 2 + a0 + 2) >> 2;
875 DST(1,0) = DST(2,2) = (a0 + a1 + 1) >> 1;
876 DST(1,1) = DST(2,3) = (tl + a0 * 2 + a1 + 2) >> 2;
877 DST(2,0) = DST(3,2) = (a1 + a2 + 1) >> 1;
878 DST(2,1) = DST(3,3) = (a0 + a1 * 2 + a2 + 2) >> 2;
879 DST(3,0) = (a2 + a3 + 1) >> 1;
880 DST(3,1) = (a1 + a2 * 2 + a3 + 2) >> 2;
883 #define def_vert_right(size) \
884 static void vert_right_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \
885 const uint8_t *_left, const uint8_t *_top) \
887 pixel *dst = (pixel *) _dst; \
888 const pixel *top = (const pixel *) _top; \
889 const pixel *left = (const pixel *) _left; \
891 pixel ve[size + size/2 - 1], vo[size + size/2 - 1]; \
893 stride /= sizeof(pixel); \
894 for (i = 0; i < size/2 - 2; i++) { \
895 vo[i] = (left[i*2 + 3] + left[i*2 + 2] * 2 + left[i*2 + 1] + 2) >> 2; \
896 ve[i] = (left[i*2 + 4] + left[i*2 + 3] * 2 + left[i*2 + 2] + 2) >> 2; \
898 vo[size/2 - 2] = (left[size - 1] + left[size - 2] * 2 + left[size - 3] + 2) >> 2; \
899 ve[size/2 - 2] = (top[-1] + left[size - 1] * 2 + left[size - 2] + 2) >> 2; \
901 ve[size/2 - 1] = (top[-1] + top[0] + 1) >> 1; \
902 vo[size/2 - 1] = (left[size - 1] + top[-1] * 2 + top[0] + 2) >> 2; \
903 for (i = 0; i < size - 1; i++) { \
904 ve[size/2 + i] = (top[i] + top[i + 1] + 1) >> 1; \
905 vo[size/2 + i] = (top[i - 1] + top[i] * 2 + top[i + 1] + 2) >> 2; \
908 for (j = 0; j < size / 2; j++) { \
909 memcpy(dst + j*2 *stride, ve + size/2 - 1 - j, size * sizeof(pixel)); \
910 memcpy(dst + (j*2 + 1)*stride, vo + size/2 - 1 - j, size * sizeof(pixel)); \
918 static void hor_down_4x4_c(uint8_t *_dst, ptrdiff_t stride,
919 const uint8_t *_left, const uint8_t *_top)
921 pixel *dst = (pixel *) _dst;
922 const pixel *top = (const pixel *) _top;
923 const pixel *left = (const pixel *) _left;
924 int l0 = left[3], l1 = left[2], l2 = left[1], l3 = left[0],
925 tl = top[-1], a0 = top[0], a1 = top[1], a2 = top[2];
927 stride /= sizeof(pixel);
928 DST(2,0) = (tl + a0 * 2 + a1 + 2) >> 2;
929 DST(3,0) = (a0 + a1 * 2 + a2 + 2) >> 2;
930 DST(0,0) = DST(2,1) = (tl + l0 + 1) >> 1;
931 DST(1,0) = DST(3,1) = (a0 + tl * 2 + l0 + 2) >> 2;
932 DST(0,1) = DST(2,2) = (l0 + l1 + 1) >> 1;
933 DST(1,1) = DST(3,2) = (tl + l0 * 2 + l1 + 2) >> 2;
934 DST(0,2) = DST(2,3) = (l1 + l2 + 1) >> 1;
935 DST(1,2) = DST(3,3) = (l0 + l1 * 2 + l2 + 2) >> 2;
936 DST(0,3) = (l2 + l3 + 1) >> 1;
937 DST(1,3) = (l1 + l2 * 2 + l3 + 2) >> 2;
940 #define def_hor_down(size) \
941 static void hor_down_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \
942 const uint8_t *_left, const uint8_t *_top) \
944 pixel *dst = (pixel *) _dst; \
945 const pixel *top = (const pixel *) _top; \
946 const pixel *left = (const pixel *) _left; \
948 pixel v[size * 3 - 2]; \
950 stride /= sizeof(pixel); \
951 for (i = 0; i < size - 2; i++) { \
952 v[i*2 ] = (left[i + 1] + left[i + 0] + 1) >> 1; \
953 v[i*2 + 1] = (left[i + 2] + left[i + 1] * 2 + left[i + 0] + 2) >> 2; \
954 v[size*2 + i] = (top[i - 1] + top[i] * 2 + top[i + 1] + 2) >> 2; \
956 v[size*2 - 2] = (top[-1] + left[size - 1] + 1) >> 1; \
957 v[size*2 - 4] = (left[size - 1] + left[size - 2] + 1) >> 1; \
958 v[size*2 - 1] = (top[0] + top[-1] * 2 + left[size - 1] + 2) >> 2; \
959 v[size*2 - 3] = (top[-1] + left[size - 1] * 2 + left[size - 2] + 2) >> 2; \
961 for (j = 0; j < size; j++) \
962 memcpy(dst + j*stride, v + size*2 - 2 - j*2, size * sizeof(pixel)); \
969 static void vert_left_4x4_c(uint8_t *_dst, ptrdiff_t stride,
970 const uint8_t *left, const uint8_t *_top)
972 pixel *dst = (pixel *) _dst;
973 const pixel *top = (const pixel *) _top;
974 int a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3],
975 a4 = top[4], a5 = top[5], a6 = top[6];
977 stride /= sizeof(pixel);
978 DST(0,0) = (a0 + a1 + 1) >> 1;
979 DST(0,1) = (a0 + a1 * 2 + a2 + 2) >> 2;
980 DST(1,0) = DST(0,2) = (a1 + a2 + 1) >> 1;
981 DST(1,1) = DST(0,3) = (a1 + a2 * 2 + a3 + 2) >> 2;
982 DST(2,0) = DST(1,2) = (a2 + a3 + 1) >> 1;
983 DST(2,1) = DST(1,3) = (a2 + a3 * 2 + a4 + 2) >> 2;
984 DST(3,0) = DST(2,2) = (a3 + a4 + 1) >> 1;
985 DST(3,1) = DST(2,3) = (a3 + a4 * 2 + a5 + 2) >> 2;
986 DST(3,2) = (a4 + a5 + 1) >> 1;
987 DST(3,3) = (a4 + a5 * 2 + a6 + 2) >> 2;
990 #define def_vert_left(size) \
991 static void vert_left_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \
992 const uint8_t *left, const uint8_t *_top) \
994 pixel *dst = (pixel *) _dst; \
995 const pixel *top = (const pixel *) _top; \
997 pixel ve[size - 1], vo[size - 1]; \
999 stride /= sizeof(pixel); \
1000 for (i = 0; i < size - 2; i++) { \
1001 ve[i] = (top[i] + top[i + 1] + 1) >> 1; \
1002 vo[i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \
1004 ve[size - 2] = (top[size - 2] + top[size - 1] + 1) >> 1; \
1005 vo[size - 2] = (top[size - 2] + top[size - 1] * 3 + 2) >> 2; \
1007 for (j = 0; j < size / 2; j++) { \
1008 memcpy(dst + j*2 * stride, ve + j, (size - j - 1) * sizeof(pixel)); \
1009 memset_bpc(dst + j*2 * stride + size - j - 1, top[size - 1], j + 1); \
1010 memcpy(dst + (j*2 + 1) * stride, vo + j, (size - j - 1) * sizeof(pixel)); \
1011 memset_bpc(dst + (j*2 + 1) * stride + size - j - 1, top[size - 1], j + 1); \
1019 static void hor_up_4x4_c(uint8_t *_dst, ptrdiff_t stride,
1020 const uint8_t *_left, const uint8_t *top)
1022 pixel *dst = (pixel *) _dst;
1023 const pixel *left = (const pixel *) _left;
1024 int l0 = left[0], l1 = left[1], l2 = left[2], l3 = left[3];
1026 stride /= sizeof(pixel);
1027 DST(0,0) = (l0 + l1 + 1) >> 1;
1028 DST(1,0) = (l0 + l1 * 2 + l2 + 2) >> 2;
1029 DST(0,1) = DST(2,0) = (l1 + l2 + 1) >> 1;
1030 DST(1,1) = DST(3,0) = (l1 + l2 * 2 + l3 + 2) >> 2;
1031 DST(0,2) = DST(2,1) = (l2 + l3 + 1) >> 1;
1032 DST(1,2) = DST(3,1) = (l2 + l3 * 3 + 2) >> 2;
1033 DST(0,3) = DST(1,3) = DST(2,2) = DST(2,3) = DST(3,2) = DST(3,3) = l3;
1036 #define def_hor_up(size) \
1037 static void hor_up_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \
1038 const uint8_t *_left, const uint8_t *top) \
1040 pixel *dst = (pixel *) _dst; \
1041 const pixel *left = (const pixel *) _left; \
1043 pixel v[size*2 - 2]; \
1045 stride /= sizeof(pixel); \
1046 for (i = 0; i < size - 2; i++) { \
1047 v[i*2 ] = (left[i] + left[i + 1] + 1) >> 1; \
1048 v[i*2 + 1] = (left[i] + left[i + 1] * 2 + left[i + 2] + 2) >> 2; \
1050 v[size*2 - 4] = (left[size - 2] + left[size - 1] + 1) >> 1; \
1051 v[size*2 - 3] = (left[size - 2] + left[size - 1] * 3 + 2) >> 2; \
1053 for (j = 0; j < size / 2; j++) \
1054 memcpy(dst + j*stride, v + j*2, size * sizeof(pixel)); \
1055 for (j = size / 2; j < size; j++) { \
1056 memcpy(dst + j*stride, v + j*2, (size*2 - 2 - j*2) * sizeof(pixel)); \
1057 memset_bpc(dst + j*stride + size*2 - 2 - j*2, left[size - 1], \
1068 static av_cold void vp9dsp_intrapred_init(VP9DSPContext *dsp)
1070 #define init_intra_pred(tx, sz) \
1071 dsp->intra_pred[tx][VERT_PRED] = vert_##sz##_c; \
1072 dsp->intra_pred[tx][HOR_PRED] = hor_##sz##_c; \
1073 dsp->intra_pred[tx][DC_PRED] = dc_##sz##_c; \
1074 dsp->intra_pred[tx][DIAG_DOWN_LEFT_PRED] = diag_downleft_##sz##_c; \
1075 dsp->intra_pred[tx][DIAG_DOWN_RIGHT_PRED] = diag_downright_##sz##_c; \
1076 dsp->intra_pred[tx][VERT_RIGHT_PRED] = vert_right_##sz##_c; \
1077 dsp->intra_pred[tx][HOR_DOWN_PRED] = hor_down_##sz##_c; \
1078 dsp->intra_pred[tx][VERT_LEFT_PRED] = vert_left_##sz##_c; \
1079 dsp->intra_pred[tx][HOR_UP_PRED] = hor_up_##sz##_c; \
1080 dsp->intra_pred[tx][TM_VP8_PRED] = tm_##sz##_c; \
1081 dsp->intra_pred[tx][LEFT_DC_PRED] = dc_left_##sz##_c; \
1082 dsp->intra_pred[tx][TOP_DC_PRED] = dc_top_##sz##_c; \
1083 dsp->intra_pred[tx][DC_128_PRED] = dc_128_##sz##_c; \
1084 dsp->intra_pred[tx][DC_127_PRED] = dc_127_##sz##_c; \
1085 dsp->intra_pred[tx][DC_129_PRED] = dc_129_##sz##_c
1087 init_intra_pred(TX_4X4, 4x4);
1088 init_intra_pred(TX_8X8, 8x8);
1089 init_intra_pred(TX_16X16, 16x16);
1090 init_intra_pred(TX_32X32, 32x32);
1092 #undef init_intra_pred
1095 #define itxfm_wrapper(type_a, type_b, sz, bits, has_dconly) \
1096 static void type_a##_##type_b##_##sz##x##sz##_add_c(uint8_t *_dst, \
1098 int16_t *_block, int eob) \
1101 pixel *dst = (pixel *) _dst; \
1102 dctcoef *block = (dctcoef *) _block, tmp[sz * sz], out[sz]; \
1104 stride /= sizeof(pixel); \
1105 if (has_dconly && eob == 1) { \
1106 const int t = (((block[0] * 11585 + (1 << 13)) >> 14) \
1107 * 11585 + (1 << 13)) >> 14; \
1109 for (i = 0; i < sz; i++) { \
1110 for (j = 0; j < sz; j++) \
1111 dst[j * stride] = av_clip_pixel(dst[j * stride] + \
1113 (t + (1 << (bits - 1))) >> bits : \
1120 for (i = 0; i < sz; i++) \
1121 type_a##sz##_1d(block + i, sz, tmp + i * sz, 0); \
1122 memset(block, 0, sz * sz * sizeof(*block)); \
1123 for (i = 0; i < sz; i++) { \
1124 type_b##sz##_1d(tmp + i, sz, out, 1); \
1125 for (j = 0; j < sz; j++) \
1126 dst[j * stride] = av_clip_pixel(dst[j * stride] + \
1128 (out[j] + (1 << (bits - 1))) >> bits : \
1134 #define itxfm_wrap(sz, bits) \
1135 itxfm_wrapper(idct, idct, sz, bits, 1) \
1136 itxfm_wrapper(iadst, idct, sz, bits, 0) \
1137 itxfm_wrapper(idct, iadst, sz, bits, 0) \
1138 itxfm_wrapper(iadst, iadst, sz, bits, 0)
1140 #define IN(x) ((dctint) in[(x) * stride])
1142 static av_always_inline void idct4_1d(const dctcoef *in, ptrdiff_t stride,
1143 dctcoef *out, int pass)
1145 dctint t0, t1, t2, t3;
1147 t0 = ((IN(0) + IN(2)) * 11585 + (1 << 13)) >> 14;
1148 t1 = ((IN(0) - IN(2)) * 11585 + (1 << 13)) >> 14;
1149 t2 = (IN(1) * 6270 - IN(3) * 15137 + (1 << 13)) >> 14;
1150 t3 = (IN(1) * 15137 + IN(3) * 6270 + (1 << 13)) >> 14;
1158 static av_always_inline void iadst4_1d(const dctcoef *in, ptrdiff_t stride,
1159 dctcoef *out, int pass)
1163 t0 = 5283 * IN(0) + 15212 * IN(2) + 9929 * IN(3);
1164 t1 = 9929 * IN(0) - 5283 * IN(2) - 15212 * IN(3);
1165 t2 = 13377 * (IN(0) - IN(2) + IN(3));
1168 out[0] = (t0 + t3 + (1 << 13)) >> 14;
1169 out[1] = (t1 + t3 + (1 << 13)) >> 14;
1170 out[2] = (t2 + (1 << 13)) >> 14;
1171 out[3] = (t0 + t1 - t3 + (1 << 13)) >> 14;
1176 static av_always_inline void idct8_1d(const dctcoef *in, ptrdiff_t stride,
1177 dctcoef *out, int pass)
1179 dctint t0, t0a, t1, t1a, t2, t2a, t3, t3a, t4, t4a, t5, t5a, t6, t6a, t7, t7a;
1181 t0a = ((IN(0) + IN(4)) * 11585 + (1 << 13)) >> 14;
1182 t1a = ((IN(0) - IN(4)) * 11585 + (1 << 13)) >> 14;
1183 t2a = (IN(2) * 6270 - IN(6) * 15137 + (1 << 13)) >> 14;
1184 t3a = (IN(2) * 15137 + IN(6) * 6270 + (1 << 13)) >> 14;
1185 t4a = (IN(1) * 3196 - IN(7) * 16069 + (1 << 13)) >> 14;
1186 t5a = (IN(5) * 13623 - IN(3) * 9102 + (1 << 13)) >> 14;
1187 t6a = (IN(5) * 9102 + IN(3) * 13623 + (1 << 13)) >> 14;
1188 t7a = (IN(1) * 16069 + IN(7) * 3196 + (1 << 13)) >> 14;
1199 t5 = ((t6a - t5a) * 11585 + (1 << 13)) >> 14;
1200 t6 = ((t6a + t5a) * 11585 + (1 << 13)) >> 14;
1212 static av_always_inline void iadst8_1d(const dctcoef *in, ptrdiff_t stride,
1213 dctcoef *out, int pass)
1215 dctint t0, t0a, t1, t1a, t2, t2a, t3, t3a, t4, t4a, t5, t5a, t6, t6a, t7, t7a;
1217 t0a = 16305 * IN(7) + 1606 * IN(0);
1218 t1a = 1606 * IN(7) - 16305 * IN(0);
1219 t2a = 14449 * IN(5) + 7723 * IN(2);
1220 t3a = 7723 * IN(5) - 14449 * IN(2);
1221 t4a = 10394 * IN(3) + 12665 * IN(4);
1222 t5a = 12665 * IN(3) - 10394 * IN(4);
1223 t6a = 4756 * IN(1) + 15679 * IN(6);
1224 t7a = 15679 * IN(1) - 4756 * IN(6);
1226 t0 = (t0a + t4a + (1 << 13)) >> 14;
1227 t1 = (t1a + t5a + (1 << 13)) >> 14;
1228 t2 = (t2a + t6a + (1 << 13)) >> 14;
1229 t3 = (t3a + t7a + (1 << 13)) >> 14;
1230 t4 = (t0a - t4a + (1 << 13)) >> 14;
1231 t5 = (t1a - t5a + (1 << 13)) >> 14;
1232 t6 = (t2a - t6a + (1 << 13)) >> 14;
1233 t7 = (t3a - t7a + (1 << 13)) >> 14;
1235 t4a = 15137 * t4 + 6270 * t5;
1236 t5a = 6270 * t4 - 15137 * t5;
1237 t6a = 15137 * t7 - 6270 * t6;
1238 t7a = 6270 * t7 + 15137 * t6;
1241 out[7] = -(t1 + t3);
1245 out[1] = -((t4a + t6a + (1 << 13)) >> 14);
1246 out[6] = (t5a + t7a + (1 << 13)) >> 14;
1247 t6 = (t4a - t6a + (1 << 13)) >> 14;
1248 t7 = (t5a - t7a + (1 << 13)) >> 14;
1250 out[3] = -(((t2 + t3) * 11585 + (1 << 13)) >> 14);
1251 out[4] = ((t2 - t3) * 11585 + (1 << 13)) >> 14;
1252 out[2] = ((t6 + t7) * 11585 + (1 << 13)) >> 14;
1253 out[5] = -(((t6 - t7) * 11585 + (1 << 13)) >> 14);
1258 static av_always_inline void idct16_1d(const dctcoef *in, ptrdiff_t stride,
1259 dctcoef *out, int pass)
1261 dctint t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15;
1262 dctint t0a, t1a, t2a, t3a, t4a, t5a, t6a, t7a;
1263 dctint t8a, t9a, t10a, t11a, t12a, t13a, t14a, t15a;
1265 t0a = ((IN(0) + IN(8)) * 11585 + (1 << 13)) >> 14;
1266 t1a = ((IN(0) - IN(8)) * 11585 + (1 << 13)) >> 14;
1267 t2a = (IN(4) * 6270 - IN(12) * 15137 + (1 << 13)) >> 14;
1268 t3a = (IN(4) * 15137 + IN(12) * 6270 + (1 << 13)) >> 14;
1269 t4a = (IN(2) * 3196 - IN(14) * 16069 + (1 << 13)) >> 14;
1270 t7a = (IN(2) * 16069 + IN(14) * 3196 + (1 << 13)) >> 14;
1271 t5a = (IN(10) * 13623 - IN(6) * 9102 + (1 << 13)) >> 14;
1272 t6a = (IN(10) * 9102 + IN(6) * 13623 + (1 << 13)) >> 14;
1273 t8a = (IN(1) * 1606 - IN(15) * 16305 + (1 << 13)) >> 14;
1274 t15a = (IN(1) * 16305 + IN(15) * 1606 + (1 << 13)) >> 14;
1275 t9a = (IN(9) * 12665 - IN(7) * 10394 + (1 << 13)) >> 14;
1276 t14a = (IN(9) * 10394 + IN(7) * 12665 + (1 << 13)) >> 14;
1277 t10a = (IN(5) * 7723 - IN(11) * 14449 + (1 << 13)) >> 14;
1278 t13a = (IN(5) * 14449 + IN(11) * 7723 + (1 << 13)) >> 14;
1279 t11a = (IN(13) * 15679 - IN(3) * 4756 + (1 << 13)) >> 14;
1280 t12a = (IN(13) * 4756 + IN(3) * 15679 + (1 << 13)) >> 14;
1299 t5a = ((t6 - t5) * 11585 + (1 << 13)) >> 14;
1300 t6a = ((t6 + t5) * 11585 + (1 << 13)) >> 14;
1301 t9a = ( t14 * 6270 - t9 * 15137 + (1 << 13)) >> 14;
1302 t14a = ( t14 * 15137 + t9 * 6270 + (1 << 13)) >> 14;
1303 t10a = (-(t13 * 15137 + t10 * 6270) + (1 << 13)) >> 14;
1304 t13a = ( t13 * 6270 - t10 * 15137 + (1 << 13)) >> 14;
1323 t10a = ((t13 - t10) * 11585 + (1 << 13)) >> 14;
1324 t13a = ((t13 + t10) * 11585 + (1 << 13)) >> 14;
1325 t11 = ((t12a - t11a) * 11585 + (1 << 13)) >> 14;
1326 t12 = ((t12a + t11a) * 11585 + (1 << 13)) >> 14;
1328 out[ 0] = t0a + t15a;
1329 out[ 1] = t1a + t14;
1330 out[ 2] = t2a + t13a;
1331 out[ 3] = t3a + t12;
1333 out[ 5] = t5 + t10a;
1338 out[10] = t5 - t10a;
1340 out[12] = t3a - t12;
1341 out[13] = t2a - t13a;
1342 out[14] = t1a - t14;
1343 out[15] = t0a - t15a;
1346 static av_always_inline void iadst16_1d(const dctcoef *in, ptrdiff_t stride,
1347 dctcoef *out, int pass)
1349 dctint t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15;
1350 dctint t0a, t1a, t2a, t3a, t4a, t5a, t6a, t7a;
1351 dctint t8a, t9a, t10a, t11a, t12a, t13a, t14a, t15a;
1353 t0 = IN(15) * 16364 + IN(0) * 804;
1354 t1 = IN(15) * 804 - IN(0) * 16364;
1355 t2 = IN(13) * 15893 + IN(2) * 3981;
1356 t3 = IN(13) * 3981 - IN(2) * 15893;
1357 t4 = IN(11) * 14811 + IN(4) * 7005;
1358 t5 = IN(11) * 7005 - IN(4) * 14811;
1359 t6 = IN(9) * 13160 + IN(6) * 9760;
1360 t7 = IN(9) * 9760 - IN(6) * 13160;
1361 t8 = IN(7) * 11003 + IN(8) * 12140;
1362 t9 = IN(7) * 12140 - IN(8) * 11003;
1363 t10 = IN(5) * 8423 + IN(10) * 14053;
1364 t11 = IN(5) * 14053 - IN(10) * 8423;
1365 t12 = IN(3) * 5520 + IN(12) * 15426;
1366 t13 = IN(3) * 15426 - IN(12) * 5520;
1367 t14 = IN(1) * 2404 + IN(14) * 16207;
1368 t15 = IN(1) * 16207 - IN(14) * 2404;
1370 t0a = (t0 + t8 + (1 << 13)) >> 14;
1371 t1a = (t1 + t9 + (1 << 13)) >> 14;
1372 t2a = (t2 + t10 + (1 << 13)) >> 14;
1373 t3a = (t3 + t11 + (1 << 13)) >> 14;
1374 t4a = (t4 + t12 + (1 << 13)) >> 14;
1375 t5a = (t5 + t13 + (1 << 13)) >> 14;
1376 t6a = (t6 + t14 + (1 << 13)) >> 14;
1377 t7a = (t7 + t15 + (1 << 13)) >> 14;
1378 t8a = (t0 - t8 + (1 << 13)) >> 14;
1379 t9a = (t1 - t9 + (1 << 13)) >> 14;
1380 t10a = (t2 - t10 + (1 << 13)) >> 14;
1381 t11a = (t3 - t11 + (1 << 13)) >> 14;
1382 t12a = (t4 - t12 + (1 << 13)) >> 14;
1383 t13a = (t5 - t13 + (1 << 13)) >> 14;
1384 t14a = (t6 - t14 + (1 << 13)) >> 14;
1385 t15a = (t7 - t15 + (1 << 13)) >> 14;
1387 t8 = t8a * 16069 + t9a * 3196;
1388 t9 = t8a * 3196 - t9a * 16069;
1389 t10 = t10a * 9102 + t11a * 13623;
1390 t11 = t10a * 13623 - t11a * 9102;
1391 t12 = t13a * 16069 - t12a * 3196;
1392 t13 = t13a * 3196 + t12a * 16069;
1393 t14 = t15a * 9102 - t14a * 13623;
1394 t15 = t15a * 13623 + t14a * 9102;
1404 t8a = (t8 + t12 + (1 << 13)) >> 14;
1405 t9a = (t9 + t13 + (1 << 13)) >> 14;
1406 t10a = (t10 + t14 + (1 << 13)) >> 14;
1407 t11a = (t11 + t15 + (1 << 13)) >> 14;
1408 t12a = (t8 - t12 + (1 << 13)) >> 14;
1409 t13a = (t9 - t13 + (1 << 13)) >> 14;
1410 t14a = (t10 - t14 + (1 << 13)) >> 14;
1411 t15a = (t11 - t15 + (1 << 13)) >> 14;
1413 t4a = t4 * 15137 + t5 * 6270;
1414 t5a = t4 * 6270 - t5 * 15137;
1415 t6a = t7 * 15137 - t6 * 6270;
1416 t7a = t7 * 6270 + t6 * 15137;
1417 t12 = t12a * 15137 + t13a * 6270;
1418 t13 = t12a * 6270 - t13a * 15137;
1419 t14 = t15a * 15137 - t14a * 6270;
1420 t15 = t15a * 6270 + t14a * 15137;
1423 out[15] = -(t1 + t3);
1426 out[ 3] = -((t4a + t6a + (1 << 13)) >> 14);
1427 out[12] = (t5a + t7a + (1 << 13)) >> 14;
1428 t6 = (t4a - t6a + (1 << 13)) >> 14;
1429 t7 = (t5a - t7a + (1 << 13)) >> 14;
1430 out[ 1] = -(t8a + t10a);
1431 out[14] = t9a + t11a;
1434 out[ 2] = (t12 + t14 + (1 << 13)) >> 14;
1435 out[13] = -((t13 + t15 + (1 << 13)) >> 14);
1436 t14a = (t12 - t14 + (1 << 13)) >> 14;
1437 t15a = (t13 - t15 + (1 << 13)) >> 14;
1439 out[ 7] = ((t2a + t3a) * -11585 + (1 << 13)) >> 14;
1440 out[ 8] = ((t2a - t3a) * 11585 + (1 << 13)) >> 14;
1441 out[ 4] = ((t7 + t6) * 11585 + (1 << 13)) >> 14;
1442 out[11] = ((t7 - t6) * 11585 + (1 << 13)) >> 14;
1443 out[ 6] = ((t11 + t10) * 11585 + (1 << 13)) >> 14;
1444 out[ 9] = ((t11 - t10) * 11585 + (1 << 13)) >> 14;
1445 out[ 5] = ((t14a + t15a) * -11585 + (1 << 13)) >> 14;
1446 out[10] = ((t14a - t15a) * 11585 + (1 << 13)) >> 14;
1451 static av_always_inline void idct32_1d(const dctcoef *in, ptrdiff_t stride,
1452 dctcoef *out, int pass)
1454 dctint t0a = ((IN(0) + IN(16)) * 11585 + (1 << 13)) >> 14;
1455 dctint t1a = ((IN(0) - IN(16)) * 11585 + (1 << 13)) >> 14;
1456 dctint t2a = (IN( 8) * 6270 - IN(24) * 15137 + (1 << 13)) >> 14;
1457 dctint t3a = (IN( 8) * 15137 + IN(24) * 6270 + (1 << 13)) >> 14;
1458 dctint t4a = (IN( 4) * 3196 - IN(28) * 16069 + (1 << 13)) >> 14;
1459 dctint t7a = (IN( 4) * 16069 + IN(28) * 3196 + (1 << 13)) >> 14;
1460 dctint t5a = (IN(20) * 13623 - IN(12) * 9102 + (1 << 13)) >> 14;
1461 dctint t6a = (IN(20) * 9102 + IN(12) * 13623 + (1 << 13)) >> 14;
1462 dctint t8a = (IN( 2) * 1606 - IN(30) * 16305 + (1 << 13)) >> 14;
1463 dctint t15a = (IN( 2) * 16305 + IN(30) * 1606 + (1 << 13)) >> 14;
1464 dctint t9a = (IN(18) * 12665 - IN(14) * 10394 + (1 << 13)) >> 14;
1465 dctint t14a = (IN(18) * 10394 + IN(14) * 12665 + (1 << 13)) >> 14;
1466 dctint t10a = (IN(10) * 7723 - IN(22) * 14449 + (1 << 13)) >> 14;
1467 dctint t13a = (IN(10) * 14449 + IN(22) * 7723 + (1 << 13)) >> 14;
1468 dctint t11a = (IN(26) * 15679 - IN( 6) * 4756 + (1 << 13)) >> 14;
1469 dctint t12a = (IN(26) * 4756 + IN( 6) * 15679 + (1 << 13)) >> 14;
1470 dctint t16a = (IN( 1) * 804 - IN(31) * 16364 + (1 << 13)) >> 14;
1471 dctint t31a = (IN( 1) * 16364 + IN(31) * 804 + (1 << 13)) >> 14;
1472 dctint t17a = (IN(17) * 12140 - IN(15) * 11003 + (1 << 13)) >> 14;
1473 dctint t30a = (IN(17) * 11003 + IN(15) * 12140 + (1 << 13)) >> 14;
1474 dctint t18a = (IN( 9) * 7005 - IN(23) * 14811 + (1 << 13)) >> 14;
1475 dctint t29a = (IN( 9) * 14811 + IN(23) * 7005 + (1 << 13)) >> 14;
1476 dctint t19a = (IN(25) * 15426 - IN( 7) * 5520 + (1 << 13)) >> 14;
1477 dctint t28a = (IN(25) * 5520 + IN( 7) * 15426 + (1 << 13)) >> 14;
1478 dctint t20a = (IN( 5) * 3981 - IN(27) * 15893 + (1 << 13)) >> 14;
1479 dctint t27a = (IN( 5) * 15893 + IN(27) * 3981 + (1 << 13)) >> 14;
1480 dctint t21a = (IN(21) * 14053 - IN(11) * 8423 + (1 << 13)) >> 14;
1481 dctint t26a = (IN(21) * 8423 + IN(11) * 14053 + (1 << 13)) >> 14;
1482 dctint t22a = (IN(13) * 9760 - IN(19) * 13160 + (1 << 13)) >> 14;
1483 dctint t25a = (IN(13) * 13160 + IN(19) * 9760 + (1 << 13)) >> 14;
1484 dctint t23a = (IN(29) * 16207 - IN( 3) * 2404 + (1 << 13)) >> 14;
1485 dctint t24a = (IN(29) * 2404 + IN( 3) * 16207 + (1 << 13)) >> 14;
1487 dctint t0 = t0a + t3a;
1488 dctint t1 = t1a + t2a;
1489 dctint t2 = t1a - t2a;
1490 dctint t3 = t0a - t3a;
1491 dctint t4 = t4a + t5a;
1492 dctint t5 = t4a - t5a;
1493 dctint t6 = t7a - t6a;
1494 dctint t7 = t7a + t6a;
1495 dctint t8 = t8a + t9a;
1496 dctint t9 = t8a - t9a;
1497 dctint t10 = t11a - t10a;
1498 dctint t11 = t11a + t10a;
1499 dctint t12 = t12a + t13a;
1500 dctint t13 = t12a - t13a;
1501 dctint t14 = t15a - t14a;
1502 dctint t15 = t15a + t14a;
1503 dctint t16 = t16a + t17a;
1504 dctint t17 = t16a - t17a;
1505 dctint t18 = t19a - t18a;
1506 dctint t19 = t19a + t18a;
1507 dctint t20 = t20a + t21a;
1508 dctint t21 = t20a - t21a;
1509 dctint t22 = t23a - t22a;
1510 dctint t23 = t23a + t22a;
1511 dctint t24 = t24a + t25a;
1512 dctint t25 = t24a - t25a;
1513 dctint t26 = t27a - t26a;
1514 dctint t27 = t27a + t26a;
1515 dctint t28 = t28a + t29a;
1516 dctint t29 = t28a - t29a;
1517 dctint t30 = t31a - t30a;
1518 dctint t31 = t31a + t30a;
1520 t5a = ((t6 - t5) * 11585 + (1 << 13)) >> 14;
1521 t6a = ((t6 + t5) * 11585 + (1 << 13)) >> 14;
1522 t9a = ( t14 * 6270 - t9 * 15137 + (1 << 13)) >> 14;
1523 t14a = ( t14 * 15137 + t9 * 6270 + (1 << 13)) >> 14;
1524 t10a = (-(t13 * 15137 + t10 * 6270) + (1 << 13)) >> 14;
1525 t13a = ( t13 * 6270 - t10 * 15137 + (1 << 13)) >> 14;
1526 t17a = ( t30 * 3196 - t17 * 16069 + (1 << 13)) >> 14;
1527 t30a = ( t30 * 16069 + t17 * 3196 + (1 << 13)) >> 14;
1528 t18a = (-(t29 * 16069 + t18 * 3196) + (1 << 13)) >> 14;
1529 t29a = ( t29 * 3196 - t18 * 16069 + (1 << 13)) >> 14;
1530 t21a = ( t26 * 13623 - t21 * 9102 + (1 << 13)) >> 14;
1531 t26a = ( t26 * 9102 + t21 * 13623 + (1 << 13)) >> 14;
1532 t22a = (-(t25 * 9102 + t22 * 13623) + (1 << 13)) >> 14;
1533 t25a = ( t25 * 13623 - t22 * 9102 + (1 << 13)) >> 14;
1568 t10a = ((t13 - t10) * 11585 + (1 << 13)) >> 14;
1569 t13a = ((t13 + t10) * 11585 + (1 << 13)) >> 14;
1570 t11 = ((t12a - t11a) * 11585 + (1 << 13)) >> 14;
1571 t12 = ((t12a + t11a) * 11585 + (1 << 13)) >> 14;
1572 t18a = ( t29 * 6270 - t18 * 15137 + (1 << 13)) >> 14;
1573 t29a = ( t29 * 15137 + t18 * 6270 + (1 << 13)) >> 14;
1574 t19 = ( t28a * 6270 - t19a * 15137 + (1 << 13)) >> 14;
1575 t28 = ( t28a * 15137 + t19a * 6270 + (1 << 13)) >> 14;
1576 t20 = (-(t27a * 15137 + t20a * 6270) + (1 << 13)) >> 14;
1577 t27 = ( t27a * 6270 - t20a * 15137 + (1 << 13)) >> 14;
1578 t21a = (-(t26 * 15137 + t21 * 6270) + (1 << 13)) >> 14;
1579 t26a = ( t26 * 6270 - t21 * 15137 + (1 << 13)) >> 14;
1614 t20 = ((t27a - t20a) * 11585 + (1 << 13)) >> 14;
1615 t27 = ((t27a + t20a) * 11585 + (1 << 13)) >> 14;
1616 t21a = ((t26 - t21 ) * 11585 + (1 << 13)) >> 14;
1617 t26a = ((t26 + t21 ) * 11585 + (1 << 13)) >> 14;
1618 t22 = ((t25a - t22a) * 11585 + (1 << 13)) >> 14;
1619 t25 = ((t25a + t22a) * 11585 + (1 << 13)) >> 14;
1620 t23a = ((t24 - t23 ) * 11585 + (1 << 13)) >> 14;
1621 t24a = ((t24 + t23 ) * 11585 + (1 << 13)) >> 14;
1624 out[ 1] = t1 + t30a;
1626 out[ 3] = t3 + t28a;
1628 out[ 5] = t5a + t26a;
1629 out[ 6] = t6a + t25;
1630 out[ 7] = t7 + t24a;
1631 out[ 8] = t8 + t23a;
1632 out[ 9] = t9a + t22;
1633 out[10] = t10 + t21a;
1634 out[11] = t11a + t20;
1635 out[12] = t12a + t19a;
1636 out[13] = t13 + t18;
1637 out[14] = t14a + t17a;
1638 out[15] = t15 + t16;
1639 out[16] = t15 - t16;
1640 out[17] = t14a - t17a;
1641 out[18] = t13 - t18;
1642 out[19] = t12a - t19a;
1643 out[20] = t11a - t20;
1644 out[21] = t10 - t21a;
1645 out[22] = t9a - t22;
1646 out[23] = t8 - t23a;
1647 out[24] = t7 - t24a;
1648 out[25] = t6a - t25;
1649 out[26] = t5a - t26a;
1651 out[28] = t3 - t28a;
1653 out[30] = t1 - t30a;
1657 itxfm_wrapper(idct, idct, 32, 6, 1)
1659 static av_always_inline void iwht4_1d(const dctcoef *in, ptrdiff_t stride,
1660 dctcoef *out, int pass)
1662 int t0, t1, t2, t3, t4;
1678 t4 = (t0 - t3) >> 1;
1690 itxfm_wrapper(iwht, iwht, 4, 0, 0)
1693 #undef itxfm_wrapper
1696 static av_cold void vp9dsp_itxfm_init(VP9DSPContext *dsp)
1698 #define init_itxfm(tx, sz) \
1699 dsp->itxfm_add[tx][DCT_DCT] = idct_idct_##sz##_add_c; \
1700 dsp->itxfm_add[tx][DCT_ADST] = iadst_idct_##sz##_add_c; \
1701 dsp->itxfm_add[tx][ADST_DCT] = idct_iadst_##sz##_add_c; \
1702 dsp->itxfm_add[tx][ADST_ADST] = iadst_iadst_##sz##_add_c
1704 #define init_idct(tx, nm) \
1705 dsp->itxfm_add[tx][DCT_DCT] = \
1706 dsp->itxfm_add[tx][ADST_DCT] = \
1707 dsp->itxfm_add[tx][DCT_ADST] = \
1708 dsp->itxfm_add[tx][ADST_ADST] = nm##_add_c
1710 init_itxfm(TX_4X4, 4x4);
1711 init_itxfm(TX_8X8, 8x8);
1712 init_itxfm(TX_16X16, 16x16);
1713 init_idct(TX_32X32, idct_idct_32x32);
1714 init_idct(4 /* lossless */, iwht_iwht_4x4);
1720 static av_always_inline void loop_filter(pixel *dst, int E, int I, int H,
1721 ptrdiff_t stridea, ptrdiff_t strideb,
1724 int i, F = 1 << (BIT_DEPTH - 8);
1726 E <<= (BIT_DEPTH - 8);
1727 I <<= (BIT_DEPTH - 8);
1728 H <<= (BIT_DEPTH - 8);
1729 for (i = 0; i < 8; i++, dst += stridea) {
1731 int p3 = dst[strideb * -4], p2 = dst[strideb * -3];
1732 int p1 = dst[strideb * -2], p0 = dst[strideb * -1];
1733 int q0 = dst[strideb * +0], q1 = dst[strideb * +1];
1734 int q2 = dst[strideb * +2], q3 = dst[strideb * +3];
1736 int fm = FFABS(p3 - p2) <= I && FFABS(p2 - p1) <= I &&
1737 FFABS(p1 - p0) <= I && FFABS(q1 - q0) <= I &&
1738 FFABS(q2 - q1) <= I && FFABS(q3 - q2) <= I &&
1739 FFABS(p0 - q0) * 2 + (FFABS(p1 - q1) >> 1) <= E;
1740 int flat8out, flat8in;
1746 p7 = dst[strideb * -8];
1747 p6 = dst[strideb * -7];
1748 p5 = dst[strideb * -6];
1749 p4 = dst[strideb * -5];
1750 q4 = dst[strideb * +4];
1751 q5 = dst[strideb * +5];
1752 q6 = dst[strideb * +6];
1753 q7 = dst[strideb * +7];
1755 flat8out = FFABS(p7 - p0) <= F && FFABS(p6 - p0) <= F &&
1756 FFABS(p5 - p0) <= F && FFABS(p4 - p0) <= F &&
1757 FFABS(q4 - q0) <= F && FFABS(q5 - q0) <= F &&
1758 FFABS(q6 - q0) <= F && FFABS(q7 - q0) <= F;
1762 flat8in = FFABS(p3 - p0) <= F && FFABS(p2 - p0) <= F &&
1763 FFABS(p1 - p0) <= F && FFABS(q1 - q0) <= F &&
1764 FFABS(q2 - q0) <= F && FFABS(q3 - q0) <= F;
1766 if (wd >= 16 && flat8out && flat8in) {
1767 dst[strideb * -7] = (p7 + p7 + p7 + p7 + p7 + p7 + p7 + p6 * 2 +
1768 p5 + p4 + p3 + p2 + p1 + p0 + q0 + 8) >> 4;
1769 dst[strideb * -6] = (p7 + p7 + p7 + p7 + p7 + p7 + p6 + p5 * 2 +
1770 p4 + p3 + p2 + p1 + p0 + q0 + q1 + 8) >> 4;
1771 dst[strideb * -5] = (p7 + p7 + p7 + p7 + p7 + p6 + p5 + p4 * 2 +
1772 p3 + p2 + p1 + p0 + q0 + q1 + q2 + 8) >> 4;
1773 dst[strideb * -4] = (p7 + p7 + p7 + p7 + p6 + p5 + p4 + p3 * 2 +
1774 p2 + p1 + p0 + q0 + q1 + q2 + q3 + 8) >> 4;
1775 dst[strideb * -3] = (p7 + p7 + p7 + p6 + p5 + p4 + p3 + p2 * 2 +
1776 p1 + p0 + q0 + q1 + q2 + q3 + q4 + 8) >> 4;
1777 dst[strideb * -2] = (p7 + p7 + p6 + p5 + p4 + p3 + p2 + p1 * 2 +
1778 p0 + q0 + q1 + q2 + q3 + q4 + q5 + 8) >> 4;
1779 dst[strideb * -1] = (p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 +
1780 q0 + q1 + q2 + q3 + q4 + q5 + q6 + 8) >> 4;
1781 dst[strideb * +0] = (p6 + p5 + p4 + p3 + p2 + p1 + p0 + q0 * 2 +
1782 q1 + q2 + q3 + q4 + q5 + q6 + q7 + 8) >> 4;
1783 dst[strideb * +1] = (p5 + p4 + p3 + p2 + p1 + p0 + q0 + q1 * 2 +
1784 q2 + q3 + q4 + q5 + q6 + q7 + q7 + 8) >> 4;
1785 dst[strideb * +2] = (p4 + p3 + p2 + p1 + p0 + q0 + q1 + q2 * 2 +
1786 q3 + q4 + q5 + q6 + q7 + q7 + q7 + 8) >> 4;
1787 dst[strideb * +3] = (p3 + p2 + p1 + p0 + q0 + q1 + q2 + q3 * 2 +
1788 q4 + q5 + q6 + q7 + q7 + q7 + q7 + 8) >> 4;
1789 dst[strideb * +4] = (p2 + p1 + p0 + q0 + q1 + q2 + q3 + q4 * 2 +
1790 q5 + q6 + q7 + q7 + q7 + q7 + q7 + 8) >> 4;
1791 dst[strideb * +5] = (p1 + p0 + q0 + q1 + q2 + q3 + q4 + q5 * 2 +
1792 q6 + q7 + q7 + q7 + q7 + q7 + q7 + 8) >> 4;
1793 dst[strideb * +6] = (p0 + q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 +
1794 q7 + q7 + q7 + q7 + q7 + q7 + q7 + 8) >> 4;
1795 } else if (wd >= 8 && flat8in) {
1796 dst[strideb * -3] = (p3 + p3 + p3 + 2 * p2 + p1 + p0 + q0 + 4) >> 3;
1797 dst[strideb * -2] = (p3 + p3 + p2 + 2 * p1 + p0 + q0 + q1 + 4) >> 3;
1798 dst[strideb * -1] = (p3 + p2 + p1 + 2 * p0 + q0 + q1 + q2 + 4) >> 3;
1799 dst[strideb * +0] = (p2 + p1 + p0 + 2 * q0 + q1 + q2 + q3 + 4) >> 3;
1800 dst[strideb * +1] = (p1 + p0 + q0 + 2 * q1 + q2 + q3 + q3 + 4) >> 3;
1801 dst[strideb * +2] = (p0 + q0 + q1 + 2 * q2 + q3 + q3 + q3 + 4) >> 3;
1803 int hev = FFABS(p1 - p0) > H || FFABS(q1 - q0) > H;
1806 int f = av_clip_intp2(p1 - q1, BIT_DEPTH - 1), f1, f2;
1807 f = av_clip_intp2(3 * (q0 - p0) + f, BIT_DEPTH - 1);
1809 f1 = FFMIN(f + 4, (1 << (BIT_DEPTH - 1)) - 1) >> 3;
1810 f2 = FFMIN(f + 3, (1 << (BIT_DEPTH - 1)) - 1) >> 3;
1812 dst[strideb * -1] = av_clip_pixel(p0 + f2);
1813 dst[strideb * +0] = av_clip_pixel(q0 - f1);
1815 int f = av_clip_intp2(3 * (q0 - p0), BIT_DEPTH - 1), f1, f2;
1817 f1 = FFMIN(f + 4, (1 << (BIT_DEPTH - 1)) - 1) >> 3;
1818 f2 = FFMIN(f + 3, (1 << (BIT_DEPTH - 1)) - 1) >> 3;
1820 dst[strideb * -1] = av_clip_pixel(p0 + f2);
1821 dst[strideb * +0] = av_clip_pixel(q0 - f1);
1824 dst[strideb * -2] = av_clip_pixel(p1 + f);
1825 dst[strideb * +1] = av_clip_pixel(q1 - f);
1831 #define lf_8_fn(dir, wd, stridea, strideb) \
1832 static void loop_filter_##dir##_##wd##_8_c(uint8_t *_dst, \
1834 int E, int I, int H) \
1836 pixel *dst = (pixel *) _dst; \
1837 stride /= sizeof(pixel); \
1838 loop_filter(dst, E, I, H, stridea, strideb, wd); \
1841 #define lf_8_fns(wd) \
1842 lf_8_fn(h, wd, stride, 1) \
1843 lf_8_fn(v, wd, 1, stride)
1852 #define lf_16_fn(dir, stridea) \
1853 static void loop_filter_##dir##_16_16_c(uint8_t *dst, \
1855 int E, int I, int H) \
1857 loop_filter_##dir##_16_8_c(dst, stride, E, I, H); \
1858 loop_filter_##dir##_16_8_c(dst + 8 * stridea, stride, E, I, H); \
1862 lf_16_fn(v, sizeof(pixel))
1866 #define lf_mix_fn(dir, wd1, wd2, stridea) \
1867 static void loop_filter_##dir##_##wd1##wd2##_16_c(uint8_t *dst, \
1869 int E, int I, int H) \
1871 loop_filter_##dir##_##wd1##_8_c(dst, stride, E & 0xff, I & 0xff, H & 0xff); \
1872 loop_filter_##dir##_##wd2##_8_c(dst + 8 * stridea, stride, E >> 8, I >> 8, H >> 8); \
1875 #define lf_mix_fns(wd1, wd2) \
1876 lf_mix_fn(h, wd1, wd2, stride) \
1877 lf_mix_fn(v, wd1, wd2, sizeof(pixel))
1887 static av_cold void vp9dsp_loopfilter_init(VP9DSPContext *dsp)
1889 dsp->loop_filter_8[0][0] = loop_filter_h_4_8_c;
1890 dsp->loop_filter_8[0][1] = loop_filter_v_4_8_c;
1891 dsp->loop_filter_8[1][0] = loop_filter_h_8_8_c;
1892 dsp->loop_filter_8[1][1] = loop_filter_v_8_8_c;
1893 dsp->loop_filter_8[2][0] = loop_filter_h_16_8_c;
1894 dsp->loop_filter_8[2][1] = loop_filter_v_16_8_c;
1896 dsp->loop_filter_16[0] = loop_filter_h_16_16_c;
1897 dsp->loop_filter_16[1] = loop_filter_v_16_16_c;
1899 dsp->loop_filter_mix2[0][0][0] = loop_filter_h_44_16_c;
1900 dsp->loop_filter_mix2[0][0][1] = loop_filter_v_44_16_c;
1901 dsp->loop_filter_mix2[0][1][0] = loop_filter_h_48_16_c;
1902 dsp->loop_filter_mix2[0][1][1] = loop_filter_v_48_16_c;
1903 dsp->loop_filter_mix2[1][0][0] = loop_filter_h_84_16_c;
1904 dsp->loop_filter_mix2[1][0][1] = loop_filter_v_84_16_c;
1905 dsp->loop_filter_mix2[1][1][0] = loop_filter_h_88_16_c;
1906 dsp->loop_filter_mix2[1][1][1] = loop_filter_v_88_16_c;
1909 static av_always_inline void copy_c(uint8_t *dst, ptrdiff_t dst_stride,
1910 const uint8_t *src, ptrdiff_t src_stride,
1914 memcpy(dst, src, w * sizeof(pixel));
1921 static av_always_inline void avg_c(uint8_t *_dst, ptrdiff_t dst_stride,
1922 const uint8_t *_src, ptrdiff_t src_stride,
1925 pixel *dst = (pixel *) _dst;
1926 const pixel *src = (const pixel *) _src;
1928 dst_stride /= sizeof(pixel);
1929 src_stride /= sizeof(pixel);
1933 for (x = 0; x < w; x += 4)
1934 AV_WN4PA(&dst[x], rnd_avg_pixel4(AV_RN4PA(&dst[x]), AV_RN4P(&src[x])));
1941 #define fpel_fn(type, sz) \
1942 static void type##sz##_c(uint8_t *dst, ptrdiff_t dst_stride, \
1943 const uint8_t *src, ptrdiff_t src_stride, \
1944 int h, int mx, int my) \
1946 type##_c(dst, dst_stride, src, src_stride, sz, h); \
1949 #define copy_avg_fn(sz) \
1962 static const int16_t vp9_subpel_filters[3][16][8] = {
1963 [FILTER_8TAP_REGULAR] = {
1964 { 0, 0, 0, 128, 0, 0, 0, 0 },
1965 { 0, 1, -5, 126, 8, -3, 1, 0 },
1966 { -1, 3, -10, 122, 18, -6, 2, 0 },
1967 { -1, 4, -13, 118, 27, -9, 3, -1 },
1968 { -1, 4, -16, 112, 37, -11, 4, -1 },
1969 { -1, 5, -18, 105, 48, -14, 4, -1 },
1970 { -1, 5, -19, 97, 58, -16, 5, -1 },
1971 { -1, 6, -19, 88, 68, -18, 5, -1 },
1972 { -1, 6, -19, 78, 78, -19, 6, -1 },
1973 { -1, 5, -18, 68, 88, -19, 6, -1 },
1974 { -1, 5, -16, 58, 97, -19, 5, -1 },
1975 { -1, 4, -14, 48, 105, -18, 5, -1 },
1976 { -1, 4, -11, 37, 112, -16, 4, -1 },
1977 { -1, 3, -9, 27, 118, -13, 4, -1 },
1978 { 0, 2, -6, 18, 122, -10, 3, -1 },
1979 { 0, 1, -3, 8, 126, -5, 1, 0 },
1980 }, [FILTER_8TAP_SHARP] = {
1981 { 0, 0, 0, 128, 0, 0, 0, 0 },
1982 { -1, 3, -7, 127, 8, -3, 1, 0 },
1983 { -2, 5, -13, 125, 17, -6, 3, -1 },
1984 { -3, 7, -17, 121, 27, -10, 5, -2 },
1985 { -4, 9, -20, 115, 37, -13, 6, -2 },
1986 { -4, 10, -23, 108, 48, -16, 8, -3 },
1987 { -4, 10, -24, 100, 59, -19, 9, -3 },
1988 { -4, 11, -24, 90, 70, -21, 10, -4 },
1989 { -4, 11, -23, 80, 80, -23, 11, -4 },
1990 { -4, 10, -21, 70, 90, -24, 11, -4 },
1991 { -3, 9, -19, 59, 100, -24, 10, -4 },
1992 { -3, 8, -16, 48, 108, -23, 10, -4 },
1993 { -2, 6, -13, 37, 115, -20, 9, -4 },
1994 { -2, 5, -10, 27, 121, -17, 7, -3 },
1995 { -1, 3, -6, 17, 125, -13, 5, -2 },
1996 { 0, 1, -3, 8, 127, -7, 3, -1 },
1997 }, [FILTER_8TAP_SMOOTH] = {
1998 { 0, 0, 0, 128, 0, 0, 0, 0 },
1999 { -3, -1, 32, 64, 38, 1, -3, 0 },
2000 { -2, -2, 29, 63, 41, 2, -3, 0 },
2001 { -2, -2, 26, 63, 43, 4, -4, 0 },
2002 { -2, -3, 24, 62, 46, 5, -4, 0 },
2003 { -2, -3, 21, 60, 49, 7, -4, 0 },
2004 { -1, -4, 18, 59, 51, 9, -4, 0 },
2005 { -1, -4, 16, 57, 53, 12, -4, -1 },
2006 { -1, -4, 14, 55, 55, 14, -4, -1 },
2007 { -1, -4, 12, 53, 57, 16, -4, -1 },
2008 { 0, -4, 9, 51, 59, 18, -4, -1 },
2009 { 0, -4, 7, 49, 60, 21, -3, -2 },
2010 { 0, -4, 5, 46, 62, 24, -3, -2 },
2011 { 0, -4, 4, 43, 63, 26, -2, -2 },
2012 { 0, -3, 2, 41, 63, 29, -2, -2 },
2013 { 0, -3, 1, 38, 64, 32, -1, -3 },
2017 #define FILTER_8TAP(src, x, F, stride) \
2018 av_clip_pixel((F[0] * src[x + -3 * stride] + \
2019 F[1] * src[x + -2 * stride] + \
2020 F[2] * src[x + -1 * stride] + \
2021 F[3] * src[x + +0 * stride] + \
2022 F[4] * src[x + +1 * stride] + \
2023 F[5] * src[x + +2 * stride] + \
2024 F[6] * src[x + +3 * stride] + \
2025 F[7] * src[x + +4 * stride] + 64) >> 7)
2027 static av_always_inline void do_8tap_1d_c(uint8_t *_dst, ptrdiff_t dst_stride,
2028 const uint8_t *_src, ptrdiff_t src_stride,
2029 int w, int h, ptrdiff_t ds,
2030 const int16_t *filter, int avg)
2032 pixel *dst = (pixel *) _dst;
2033 const pixel *src = (const pixel *) _src;
2035 dst_stride /= sizeof(pixel);
2036 src_stride /= sizeof(pixel);
2040 for (x = 0; x < w; x++)
2042 dst[x] = (dst[x] + FILTER_8TAP(src, x, filter, ds) + 1) >> 1;
2044 dst[x] = FILTER_8TAP(src, x, filter, ds);
2052 #define filter_8tap_1d_fn(opn, opa, dir, ds) \
2053 static av_noinline void opn##_8tap_1d_##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
2054 const uint8_t *src, ptrdiff_t src_stride, \
2055 int w, int h, const int16_t *filter) \
2057 do_8tap_1d_c(dst, dst_stride, src, src_stride, w, h, ds, filter, opa); \
2060 filter_8tap_1d_fn(put, 0, v, src_stride / sizeof(pixel))
2061 filter_8tap_1d_fn(put, 0, h, 1)
2062 filter_8tap_1d_fn(avg, 1, v, src_stride / sizeof(pixel))
2063 filter_8tap_1d_fn(avg, 1, h, 1)
2065 #undef filter_8tap_1d_fn
2067 static av_always_inline void do_8tap_2d_c(uint8_t *_dst, ptrdiff_t dst_stride,
2068 const uint8_t *_src, ptrdiff_t src_stride,
2069 int w, int h, const int16_t *filterx,
2070 const int16_t *filtery, int avg)
2073 pixel tmp[64 * 71], *tmp_ptr = tmp;
2074 pixel *dst = (pixel *) _dst;
2075 const pixel *src = (const pixel *) _src;
2077 dst_stride /= sizeof(pixel);
2078 src_stride /= sizeof(pixel);
2079 src -= src_stride * 3;
2083 for (x = 0; x < w; x++)
2084 tmp_ptr[x] = FILTER_8TAP(src, x, filterx, 1);
2090 tmp_ptr = tmp + 64 * 3;
2094 for (x = 0; x < w; x++)
2096 dst[x] = (dst[x] + FILTER_8TAP(tmp_ptr, x, filtery, 64) + 1) >> 1;
2098 dst[x] = FILTER_8TAP(tmp_ptr, x, filtery, 64);
2106 #define filter_8tap_2d_fn(opn, opa) \
2107 static av_noinline void opn##_8tap_2d_hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
2108 const uint8_t *src, ptrdiff_t src_stride, \
2109 int w, int h, const int16_t *filterx, \
2110 const int16_t *filtery) \
2112 do_8tap_2d_c(dst, dst_stride, src, src_stride, w, h, filterx, filtery, opa); \
2115 filter_8tap_2d_fn(put, 0)
2116 filter_8tap_2d_fn(avg, 1)
2118 #undef filter_8tap_2d_fn
2120 #define filter_fn_1d(sz, dir, dir_m, type, type_idx, avg) \
2121 static void avg##_8tap_##type##_##sz##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
2122 const uint8_t *src, ptrdiff_t src_stride, \
2123 int h, int mx, int my) \
2125 avg##_8tap_1d_##dir##_c(dst, dst_stride, src, src_stride, sz, h, \
2126 vp9_subpel_filters[type_idx][dir_m]); \
2129 #define filter_fn_2d(sz, type, type_idx, avg) \
2130 static void avg##_8tap_##type##_##sz##hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
2131 const uint8_t *src, ptrdiff_t src_stride, \
2132 int h, int mx, int my) \
2134 avg##_8tap_2d_hv_c(dst, dst_stride, src, src_stride, sz, h, \
2135 vp9_subpel_filters[type_idx][mx], \
2136 vp9_subpel_filters[type_idx][my]); \
2139 #define FILTER_BILIN(src, x, mxy, stride) \
2140 (src[x] + ((mxy * (src[x + stride] - src[x]) + 8) >> 4))
2142 static av_always_inline void do_bilin_1d_c(uint8_t *_dst, ptrdiff_t dst_stride,
2143 const uint8_t *_src, ptrdiff_t src_stride,
2144 int w, int h, ptrdiff_t ds, int mxy, int avg)
2146 pixel *dst = (pixel *) _dst;
2147 const pixel *src = (const pixel *) _src;
2149 dst_stride /= sizeof(pixel);
2150 src_stride /= sizeof(pixel);
2154 for (x = 0; x < w; x++)
2156 dst[x] = (dst[x] + FILTER_BILIN(src, x, mxy, ds) + 1) >> 1;
2158 dst[x] = FILTER_BILIN(src, x, mxy, ds);
2166 #define bilin_1d_fn(opn, opa, dir, ds) \
2167 static av_noinline void opn##_bilin_1d_##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
2168 const uint8_t *src, ptrdiff_t src_stride, \
2169 int w, int h, int mxy) \
2171 do_bilin_1d_c(dst, dst_stride, src, src_stride, w, h, ds, mxy, opa); \
2174 bilin_1d_fn(put, 0, v, src_stride / sizeof(pixel))
2175 bilin_1d_fn(put, 0, h, 1)
2176 bilin_1d_fn(avg, 1, v, src_stride / sizeof(pixel))
2177 bilin_1d_fn(avg, 1, h, 1)
2181 static av_always_inline void do_bilin_2d_c(uint8_t *_dst, ptrdiff_t dst_stride,
2182 const uint8_t *_src, ptrdiff_t src_stride,
2183 int w, int h, int mx, int my, int avg)
2185 pixel tmp[64 * 65], *tmp_ptr = tmp;
2187 pixel *dst = (pixel *) _dst;
2188 const pixel *src = (const pixel *) _src;
2190 dst_stride /= sizeof(pixel);
2191 src_stride /= sizeof(pixel);
2195 for (x = 0; x < w; x++)
2196 tmp_ptr[x] = FILTER_BILIN(src, x, mx, 1);
2206 for (x = 0; x < w; x++)
2208 dst[x] = (dst[x] + FILTER_BILIN(tmp_ptr, x, my, 64) + 1) >> 1;
2210 dst[x] = FILTER_BILIN(tmp_ptr, x, my, 64);
2218 #define bilin_2d_fn(opn, opa) \
2219 static av_noinline void opn##_bilin_2d_hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
2220 const uint8_t *src, ptrdiff_t src_stride, \
2221 int w, int h, int mx, int my) \
2223 do_bilin_2d_c(dst, dst_stride, src, src_stride, w, h, mx, my, opa); \
2231 #define bilinf_fn_1d(sz, dir, dir_m, avg) \
2232 static void avg##_bilin_##sz##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \
2233 const uint8_t *src, ptrdiff_t src_stride, \
2234 int h, int mx, int my) \
2236 avg##_bilin_1d_##dir##_c(dst, dst_stride, src, src_stride, sz, h, dir_m); \
2239 #define bilinf_fn_2d(sz, avg) \
2240 static void avg##_bilin_##sz##hv_c(uint8_t *dst, ptrdiff_t dst_stride, \
2241 const uint8_t *src, ptrdiff_t src_stride, \
2242 int h, int mx, int my) \
2244 avg##_bilin_2d_hv_c(dst, dst_stride, src, src_stride, sz, h, mx, my); \
2247 #define filter_fn(sz, avg) \
2248 filter_fn_1d(sz, h, mx, regular, FILTER_8TAP_REGULAR, avg) \
2249 filter_fn_1d(sz, v, my, regular, FILTER_8TAP_REGULAR, avg) \
2250 filter_fn_2d(sz, regular, FILTER_8TAP_REGULAR, avg) \
2251 filter_fn_1d(sz, h, mx, smooth, FILTER_8TAP_SMOOTH, avg) \
2252 filter_fn_1d(sz, v, my, smooth, FILTER_8TAP_SMOOTH, avg) \
2253 filter_fn_2d(sz, smooth, FILTER_8TAP_SMOOTH, avg) \
2254 filter_fn_1d(sz, h, mx, sharp, FILTER_8TAP_SHARP, avg) \
2255 filter_fn_1d(sz, v, my, sharp, FILTER_8TAP_SHARP, avg) \
2256 filter_fn_2d(sz, sharp, FILTER_8TAP_SHARP, avg) \
2257 bilinf_fn_1d(sz, h, mx, avg) \
2258 bilinf_fn_1d(sz, v, my, avg) \
2259 bilinf_fn_2d(sz, avg)
2261 #define filter_fn_set(avg) \
2262 filter_fn(64, avg) \
2263 filter_fn(32, avg) \
2264 filter_fn(16, avg) \
2272 #undef filter_fn_set
2278 static av_cold void vp9dsp_mc_init(VP9DSPContext *dsp)
2280 #define init_fpel(idx1, idx2, sz, type) \
2281 dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = type##sz##_c; \
2282 dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = type##sz##_c; \
2283 dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = type##sz##_c; \
2284 dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = type##sz##_c
2286 #define init_copy_avg(idx, sz) \
2287 init_fpel(idx, 0, sz, copy); \
2288 init_fpel(idx, 1, sz, avg)
2290 init_copy_avg(0, 64);
2291 init_copy_avg(1, 32);
2292 init_copy_avg(2, 16);
2293 init_copy_avg(3, 8);
2294 init_copy_avg(4, 4);
2296 #undef init_copy_avg
2299 #define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type) \
2300 dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = type##_8tap_smooth_##sz##dir##_c; \
2301 dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = type##_8tap_regular_##sz##dir##_c; \
2302 dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = type##_8tap_sharp_##sz##dir##_c; \
2303 dsp->mc[idx1][FILTER_BILINEAR ][idx2][idxh][idxv] = type##_bilin_##sz##dir##_c
2305 #define init_subpel2(idx, idxh, idxv, dir, type) \
2306 init_subpel1(0, idx, idxh, idxv, 64, dir, type); \
2307 init_subpel1(1, idx, idxh, idxv, 32, dir, type); \
2308 init_subpel1(2, idx, idxh, idxv, 16, dir, type); \
2309 init_subpel1(3, idx, idxh, idxv, 8, dir, type); \
2310 init_subpel1(4, idx, idxh, idxv, 4, dir, type)
2312 #define init_subpel3(idx, type) \
2313 init_subpel2(idx, 1, 1, hv, type); \
2314 init_subpel2(idx, 0, 1, v, type); \
2315 init_subpel2(idx, 1, 0, h, type)
2317 init_subpel3(0, put);
2318 init_subpel3(1, avg);
2325 static av_always_inline void do_scaled_8tap_c(uint8_t *_dst, ptrdiff_t dst_stride,
2326 const uint8_t *_src, ptrdiff_t src_stride,
2327 int w, int h, int mx, int my,
2328 int dx, int dy, int avg,
2329 const int16_t (*filters)[8])
2331 int tmp_h = (((h - 1) * dy + my) >> 4) + 8;
2332 pixel tmp[64 * 135], *tmp_ptr = tmp;
2333 pixel *dst = (pixel *) _dst;
2334 const pixel *src = (const pixel *) _src;
2336 dst_stride /= sizeof(pixel);
2337 src_stride /= sizeof(pixel);
2338 src -= src_stride * 3;
2341 int imx = mx, ioff = 0;
2343 for (x = 0; x < w; x++) {
2344 tmp_ptr[x] = FILTER_8TAP(src, ioff, filters[imx], 1);
2354 tmp_ptr = tmp + 64 * 3;
2357 const int16_t *filter = filters[my];
2359 for (x = 0; x < w; x++)
2361 dst[x] = (dst[x] + FILTER_8TAP(tmp_ptr, x, filter, 64) + 1) >> 1;
2363 dst[x] = FILTER_8TAP(tmp_ptr, x, filter, 64);
2367 tmp_ptr += (my >> 4) * 64;
2373 #define scaled_filter_8tap_fn(opn, opa) \
2374 static av_noinline void opn##_scaled_8tap_c(uint8_t *dst, ptrdiff_t dst_stride, \
2375 const uint8_t *src, ptrdiff_t src_stride, \
2376 int w, int h, int mx, int my, int dx, int dy, \
2377 const int16_t (*filters)[8]) \
2379 do_scaled_8tap_c(dst, dst_stride, src, src_stride, w, h, mx, my, dx, dy, \
2383 scaled_filter_8tap_fn(put, 0)
2384 scaled_filter_8tap_fn(avg, 1)
2386 #undef scaled_filter_8tap_fn
2390 #define scaled_filter_fn(sz, type, type_idx, avg) \
2391 static void avg##_scaled_##type##_##sz##_c(uint8_t *dst, ptrdiff_t dst_stride, \
2392 const uint8_t *src, ptrdiff_t src_stride, \
2393 int h, int mx, int my, int dx, int dy) \
2395 avg##_scaled_8tap_c(dst, dst_stride, src, src_stride, sz, h, mx, my, dx, dy, \
2396 vp9_subpel_filters[type_idx]); \
2399 static av_always_inline void do_scaled_bilin_c(uint8_t *_dst, ptrdiff_t dst_stride,
2400 const uint8_t *_src, ptrdiff_t src_stride,
2401 int w, int h, int mx, int my,
2402 int dx, int dy, int avg)
2404 pixel tmp[64 * 129], *tmp_ptr = tmp;
2405 int tmp_h = (((h - 1) * dy + my) >> 4) + 2;
2406 pixel *dst = (pixel *) _dst;
2407 const pixel *src = (const pixel *) _src;
2409 dst_stride /= sizeof(pixel);
2410 src_stride /= sizeof(pixel);
2413 int imx = mx, ioff = 0;
2415 for (x = 0; x < w; x++) {
2416 tmp_ptr[x] = FILTER_BILIN(src, ioff, imx, 1);
2430 for (x = 0; x < w; x++)
2432 dst[x] = (dst[x] + FILTER_BILIN(tmp_ptr, x, my, 64) + 1) >> 1;
2434 dst[x] = FILTER_BILIN(tmp_ptr, x, my, 64);
2438 tmp_ptr += (my >> 4) * 64;
2444 #define scaled_bilin_fn(opn, opa) \
2445 static av_noinline void opn##_scaled_bilin_c(uint8_t *dst, ptrdiff_t dst_stride, \
2446 const uint8_t *src, ptrdiff_t src_stride, \
2447 int w, int h, int mx, int my, int dx, int dy) \
2449 do_scaled_bilin_c(dst, dst_stride, src, src_stride, w, h, mx, my, dx, dy, opa); \
2452 scaled_bilin_fn(put, 0)
2453 scaled_bilin_fn(avg, 1)
2455 #undef scaled_bilin_fn
2459 #define scaled_bilinf_fn(sz, avg) \
2460 static void avg##_scaled_bilin_##sz##_c(uint8_t *dst, ptrdiff_t dst_stride, \
2461 const uint8_t *src, ptrdiff_t src_stride, \
2462 int h, int mx, int my, int dx, int dy) \
2464 avg##_scaled_bilin_c(dst, dst_stride, src, src_stride, sz, h, mx, my, dx, dy); \
2467 #define scaled_filter_fns(sz, avg) \
2468 scaled_filter_fn(sz, regular, FILTER_8TAP_REGULAR, avg) \
2469 scaled_filter_fn(sz, smooth, FILTER_8TAP_SMOOTH, avg) \
2470 scaled_filter_fn(sz, sharp, FILTER_8TAP_SHARP, avg) \
2471 scaled_bilinf_fn(sz, avg)
2473 #define scaled_filter_fn_set(avg) \
2474 scaled_filter_fns(64, avg) \
2475 scaled_filter_fns(32, avg) \
2476 scaled_filter_fns(16, avg) \
2477 scaled_filter_fns(8, avg) \
2478 scaled_filter_fns(4, avg)
2480 scaled_filter_fn_set(put)
2481 scaled_filter_fn_set(avg)
2483 #undef scaled_filter_fns
2484 #undef scaled_filter_fn_set
2485 #undef scaled_filter_fn
2486 #undef scaled_bilinf_fn
2488 static av_cold void vp9dsp_scaled_mc_init(VP9DSPContext *dsp)
2490 #define init_scaled(idx1, idx2, sz, type) \
2491 dsp->smc[idx1][FILTER_8TAP_SMOOTH ][idx2] = type##_scaled_smooth_##sz##_c; \
2492 dsp->smc[idx1][FILTER_8TAP_REGULAR][idx2] = type##_scaled_regular_##sz##_c; \
2493 dsp->smc[idx1][FILTER_8TAP_SHARP ][idx2] = type##_scaled_sharp_##sz##_c; \
2494 dsp->smc[idx1][FILTER_BILINEAR ][idx2] = type##_scaled_bilin_##sz##_c
2496 #define init_scaled_put_avg(idx, sz) \
2497 init_scaled(idx, 0, sz, put); \
2498 init_scaled(idx, 1, sz, avg)
2500 init_scaled_put_avg(0, 64);
2501 init_scaled_put_avg(1, 32);
2502 init_scaled_put_avg(2, 16);
2503 init_scaled_put_avg(3, 8);
2504 init_scaled_put_avg(4, 4);
2506 #undef init_scaled_put_avg
2510 av_cold void FUNC(ff_vp9dsp_init)(VP9DSPContext *dsp)
2512 vp9dsp_intrapred_init(dsp);
2513 vp9dsp_itxfm_init(dsp);
2514 vp9dsp_loopfilter_init(dsp);
2515 vp9dsp_mc_init(dsp);
2516 vp9dsp_scaled_mc_init(dsp);