2 * Copyright (c) 2015 Ronald S. Bultje <rsbultje@gmail.com>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 #include "libavcodec/vp9data.h"
25 #include "libavcodec/vp9dsp.h"
26 #include "libavutil/common.h"
27 #include "libavutil/internal.h"
28 #include "libavutil/intreadwrite.h"
29 #include "libavutil/mathematics.h"
31 static const uint32_t pixel_mask[3] = { 0xffffffff, 0x03ff03ff, 0x0fff0fff };
32 #define SIZEOF_PIXEL ((bit_depth + 7) / 8)
34 #define randomize_buffers() \
36 uint32_t mask = pixel_mask[(bit_depth - 8) >> 1]; \
38 for (k = -4; k < SIZEOF_PIXEL * FFMAX(8, size); k += 4) { \
39 uint32_t r = rnd() & mask; \
42 for (k = 0; k < size * SIZEOF_PIXEL; k += 4) { \
43 uint32_t r = rnd() & mask; \
48 static void check_ipred(void)
50 LOCAL_ALIGNED_32(uint8_t, a_buf, [64 * 2]);
51 uint8_t *a = &a_buf[32 * 2];
52 LOCAL_ALIGNED_32(uint8_t, l, [32 * 2]);
53 LOCAL_ALIGNED_32(uint8_t, dst0, [32 * 32 * 2]);
54 LOCAL_ALIGNED_32(uint8_t, dst1, [32 * 32 * 2]);
56 int tx, mode, bit_depth;
57 declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t stride,
58 const uint8_t *left, const uint8_t *top);
59 static const char *const mode_names[N_INTRA_PRED_MODES] = {
63 [DIAG_DOWN_LEFT_PRED] = "diag_downleft",
64 [DIAG_DOWN_RIGHT_PRED] = "diag_downright",
65 [VERT_RIGHT_PRED] = "vert_right",
66 [HOR_DOWN_PRED] = "hor_down",
67 [VERT_LEFT_PRED] = "vert_left",
68 [HOR_UP_PRED] = "hor_up",
70 [LEFT_DC_PRED] = "dc_left",
71 [TOP_DC_PRED] = "dc_top",
72 [DC_128_PRED] = "dc_128",
73 [DC_127_PRED] = "dc_127",
74 [DC_129_PRED] = "dc_129",
77 for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
78 ff_vp9dsp_init(&dsp, bit_depth, 0);
79 for (tx = 0; tx < 4; tx++) {
82 for (mode = 0; mode < N_INTRA_PRED_MODES; mode++) {
83 if (check_func(dsp.intra_pred[tx][mode], "vp9_%s_%dx%d_%dbpp",
84 mode_names[mode], size, size, bit_depth)) {
86 call_ref(dst0, size * SIZEOF_PIXEL, l, a);
87 call_new(dst1, size * SIZEOF_PIXEL, l, a);
88 if (memcmp(dst0, dst1, size * size * SIZEOF_PIXEL))
90 bench_new(dst1, size * SIZEOF_PIXEL,l, a);
98 #undef randomize_buffers
100 #define randomize_buffers() \
102 uint32_t mask = pixel_mask[(bit_depth - 8) >> 1]; \
103 for (y = 0; y < sz; y++) { \
104 for (x = 0; x < sz * SIZEOF_PIXEL; x += 4) { \
105 uint32_t r = rnd() & mask; \
106 AV_WN32A(dst + y * sz * SIZEOF_PIXEL + x, r); \
107 AV_WN32A(src + y * sz * SIZEOF_PIXEL + x, rnd() & mask); \
109 for (x = 0; x < sz; x++) { \
110 if (bit_depth == 8) { \
111 coef[y * sz + x] = src[y * sz + x] - dst[y * sz + x]; \
113 ((int32_t *) coef)[y * sz + x] = \
114 ((uint16_t *) src)[y * sz + x] - \
115 ((uint16_t *) dst)[y * sz + x]; \
121 // wht function copied from libvpx
122 static void fwht_1d(double *out, const double *in, int sz)
124 double t0 = in[0] + in[1];
125 double t3 = in[3] - in[2];
126 double t4 = trunc((t0 - t3) * 0.5);
127 double t1 = t4 - in[1];
128 double t2 = t4 - in[2];
137 static void fdct_1d(double *out, const double *in, int sz)
141 for (k = 0; k < sz; k++) {
143 for (n = 0; n < sz; n++)
144 out[k] += in[n] * cos(M_PI * (2 * n + 1) * k / (sz * 2.0));
149 // see "Towards jointly optimal spatial prediction and adaptive transform in
150 // video/image coding", by J. Han, A. Saxena, and K. Rose
151 // IEEE Proc. ICASSP, pp. 726-729, Mar. 2010.
152 static void fadst4_1d(double *out, const double *in, int sz)
156 for (k = 0; k < sz; k++) {
158 for (n = 0; n < sz; n++)
159 out[k] += in[n] * sin(M_PI * (n + 1) * (2 * k + 1) / (sz * 2.0 + 1.0));
163 // see "A Butterfly Structured Design of The Hybrid Transform Coding Scheme",
164 // by Jingning Han, Yaowu Xu, and Debargha Mukherjee
165 // http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/41418.pdf
166 static void fadst_1d(double *out, const double *in, int sz)
170 for (k = 0; k < sz; k++) {
172 for (n = 0; n < sz; n++)
173 out[k] += in[n] * sin(M_PI * (2 * n + 1) * (2 * k + 1) / (sz * 4.0));
177 typedef void (*ftx1d_fn)(double *out, const double *in, int sz);
178 static void ftx_2d(double *out, const double *in, enum TxfmMode tx,
179 enum TxfmType txtp, int sz)
181 static const double scaling_factors[5][4] = {
182 { 4.0, 16.0 * M_SQRT1_2 / 3.0, 16.0 * M_SQRT1_2 / 3.0, 32.0 / 9.0 },
183 { 2.0, 2.0, 2.0, 2.0 },
184 { 1.0, 1.0, 1.0, 1.0 },
188 static const ftx1d_fn ftx1d_tbl[5][4][2] = {
190 { fdct_1d, fdct_1d },
191 { fadst4_1d, fdct_1d },
192 { fdct_1d, fadst4_1d },
193 { fadst4_1d, fadst4_1d },
195 { fdct_1d, fdct_1d },
196 { fadst_1d, fdct_1d },
197 { fdct_1d, fadst_1d },
198 { fadst_1d, fadst_1d },
200 { fdct_1d, fdct_1d },
201 { fadst_1d, fdct_1d },
202 { fdct_1d, fadst_1d },
203 { fadst_1d, fadst_1d },
205 { fdct_1d, fdct_1d },
207 { fwht_1d, fwht_1d },
211 double scaling_factor = scaling_factors[tx][txtp];
215 for (i = 0; i < sz; ++i) {
218 ftx1d_tbl[tx][txtp][0](temp_out, &in[i * sz], sz);
219 // scale and transpose
220 for (j = 0; j < sz; ++j)
221 temp[j * sz + i] = temp_out[j] * scaling_factor;
225 for (i = 0; i < sz; i++)
226 ftx1d_tbl[tx][txtp][1](&out[i * sz], &temp[i * sz], sz);
229 static void ftx(int16_t *buf, enum TxfmMode tx,
230 enum TxfmType txtp, int sz, int bit_depth)
232 double ind[1024], outd[1024];
236 for (n = 0; n < sz * sz; n++) {
240 ind[n] = ((int32_t *) buf)[n];
242 ftx_2d(outd, ind, tx, txtp, sz);
243 for (n = 0; n < sz * sz; n++) {
245 buf[n] = lrint(outd[n]);
247 ((int32_t *) buf)[n] = lrint(outd[n]);
251 static int copy_subcoefs(int16_t *out, const int16_t *in, enum TxfmMode tx,
252 enum TxfmType txtp, int sz, int sub, int bit_depth)
254 // copy the topleft coefficients such that the return value (being the
255 // coefficient scantable index for the eob token) guarantees that only
256 // the topleft $sub out of $sz (where $sz >= $sub) coefficients in both
257 // dimensions are non-zero. This leads to braching to specific optimized
258 // simd versions (e.g. dc-only) so that we get full asm coverage in this
262 const int16_t *scan = vp9_scans[tx][txtp];
265 for (n = 0; n < sz * sz; n++) {
266 int rc = scan[n], rcx = rc % sz, rcy = rc / sz;
268 // find eob for this sub-idct
269 if (rcx >= sub || rcy >= sub)
273 if (bit_depth == 8) {
276 AV_COPY32(&out[rc * 2], &in[rc * 2]);
282 for (; n < sz * sz; n++) {
286 if (bit_depth == 8) {
289 AV_ZERO32(&out[rc * 2]);
296 static int iszero(const int16_t *c, int sz)
300 for (n = 0; n < sz / sizeof(int16_t); n += 2)
307 #define SIZEOF_COEF (2 * ((bit_depth + 7) / 8))
309 static void check_itxfm(void)
311 LOCAL_ALIGNED_32(uint8_t, src, [32 * 32 * 2]);
312 LOCAL_ALIGNED_32(uint8_t, dst, [32 * 32 * 2]);
313 LOCAL_ALIGNED_32(uint8_t, dst0, [32 * 32 * 2]);
314 LOCAL_ALIGNED_32(uint8_t, dst1, [32 * 32 * 2]);
315 LOCAL_ALIGNED_32(int16_t, coef, [32 * 32 * 2]);
316 LOCAL_ALIGNED_32(int16_t, subcoef0, [32 * 32 * 2]);
317 LOCAL_ALIGNED_32(int16_t, subcoef1, [32 * 32 * 2]);
318 declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
320 int y, x, tx, txtp, bit_depth, sub;
321 static const char *const txtp_types[N_TXFM_TYPES] = {
322 [DCT_DCT] = "dct_dct", [DCT_ADST] = "adst_dct",
323 [ADST_DCT] = "dct_adst", [ADST_ADST] = "adst_adst"
326 for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
327 ff_vp9dsp_init(&dsp, bit_depth, 0);
329 for (tx = TX_4X4; tx <= N_TXFM_SIZES /* 4 = lossless */; tx++) {
330 int sz = 4 << (tx & 3);
331 int n_txtps = tx < TX_32X32 ? N_TXFM_TYPES : 1;
333 for (txtp = 0; txtp < n_txtps; txtp++) {
334 if (check_func(dsp.itxfm_add[tx][txtp], "vp9_inv_%s_%dx%d_add_%d",
335 tx == 4 ? "wht_wht" : txtp_types[txtp], sz, sz,
338 ftx(coef, tx, txtp, sz, bit_depth);
340 for (sub = (txtp == 0) ? 1 : 2; sub <= sz; sub <<= 1) {
344 eob = copy_subcoefs(subcoef0, coef, tx, txtp,
348 memcpy(subcoef0, coef, sz * sz * SIZEOF_COEF);
351 memcpy(dst0, dst, sz * sz * SIZEOF_PIXEL);
352 memcpy(dst1, dst, sz * sz * SIZEOF_PIXEL);
353 memcpy(subcoef1, subcoef0, sz * sz * SIZEOF_COEF);
354 call_ref(dst0, sz * SIZEOF_PIXEL, subcoef0, eob);
355 call_new(dst1, sz * SIZEOF_PIXEL, subcoef1, eob);
356 if (memcmp(dst0, dst1, sz * sz * SIZEOF_PIXEL) ||
357 !iszero(subcoef0, sz * sz * SIZEOF_COEF) ||
358 !iszero(subcoef1, sz * sz * SIZEOF_COEF))
361 bench_new(dst, sz * SIZEOF_PIXEL, coef, sz * sz);
369 #undef randomize_buffers
371 #define setpx(a,b,c) \
373 if (SIZEOF_PIXEL == 1) { \
374 buf0[(a) + (b) * jstride] = av_clip_uint8(c); \
376 ((uint16_t *)buf0)[(a) + (b) * jstride] = av_clip_uintp2(c, bit_depth); \
380 // c can be an assignment and must not be put under ()
381 #define setdx(a,b,c,d) setpx(a,b,c-(d)+(rnd()%((d)*2+1)))
382 #define setsx(a,b,c,d) setdx(a,b,c,(d) << (bit_depth - 8))
383 static void randomize_loopfilter_buffers(int bidx, int lineoff, int str,
384 int bit_depth, int dir, const int *E,
385 const int *F, const int *H, const int *I,
386 uint8_t *buf0, uint8_t *buf1)
388 uint32_t mask = (1 << bit_depth) - 1;
389 int off = dir ? lineoff : lineoff * 16;
390 int istride = dir ? 1 : 16;
391 int jstride = dir ? str : 1;
393 for (i = 0; i < 2; i++) /* flat16 */ {
394 int idx = off + i * istride, p0, q0;
395 setpx(idx, 0, q0 = rnd() & mask);
396 setsx(idx, -1, p0 = q0, E[bidx] >> 2);
397 for (j = 1; j < 8; j++) {
398 setsx(idx, -1 - j, p0, F[bidx]);
399 setsx(idx, j, q0, F[bidx]);
402 for (i = 2; i < 4; i++) /* flat8 */ {
403 int idx = off + i * istride, p0, q0;
404 setpx(idx, 0, q0 = rnd() & mask);
405 setsx(idx, -1, p0 = q0, E[bidx] >> 2);
406 for (j = 1; j < 4; j++) {
407 setsx(idx, -1 - j, p0, F[bidx]);
408 setsx(idx, j, q0, F[bidx]);
410 for (j = 4; j < 8; j++) {
411 setpx(idx, -1 - j, rnd() & mask);
412 setpx(idx, j, rnd() & mask);
415 for (i = 4; i < 6; i++) /* regular */ {
416 int idx = off + i * istride, p2, p1, p0, q0, q1, q2;
417 setpx(idx, 0, q0 = rnd() & mask);
418 setsx(idx, 1, q1 = q0, I[bidx]);
419 setsx(idx, 2, q2 = q1, I[bidx]);
420 setsx(idx, 3, q2, I[bidx]);
421 setsx(idx, -1, p0 = q0, E[bidx] >> 2);
422 setsx(idx, -2, p1 = p0, I[bidx]);
423 setsx(idx, -3, p2 = p1, I[bidx]);
424 setsx(idx, -4, p2, I[bidx]);
425 for (j = 4; j < 8; j++) {
426 setpx(idx, -1 - j, rnd() & mask);
427 setpx(idx, j, rnd() & mask);
430 for (i = 6; i < 8; i++) /* off */ {
431 int idx = off + i * istride;
432 for (j = 0; j < 8; j++) {
433 setpx(idx, -1 - j, rnd() & mask);
434 setpx(idx, j, rnd() & mask);
438 #define randomize_buffers(bidx, lineoff, str) \
439 randomize_loopfilter_buffers(bidx, lineoff, str, bit_depth, dir, \
440 E, F, H, I, buf0, buf1)
442 static void check_loopfilter(void)
444 LOCAL_ALIGNED_32(uint8_t, base0, [32 + 16 * 16 * 2]);
445 LOCAL_ALIGNED_32(uint8_t, base1, [32 + 16 * 16 * 2]);
447 int dir, wd, wd2, bit_depth;
448 static const char *const dir_name[2] = { "h", "v" };
449 static const int E[2] = { 20, 28 }, I[2] = { 10, 16 };
450 static const int H[2] = { 7, 11 }, F[2] = { 1, 1 };
451 declare_func(void, uint8_t *dst, ptrdiff_t stride, int E, int I, int H);
453 for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
454 ff_vp9dsp_init(&dsp, bit_depth, 0);
456 for (dir = 0; dir < 2; dir++) {
457 int midoff = (dir ? 8 * 8 : 8) * SIZEOF_PIXEL;
458 int midoff_aligned = (dir ? 8 * 8 : 16) * SIZEOF_PIXEL;
459 uint8_t *buf0 = base0 + midoff_aligned;
460 uint8_t *buf1 = base1 + midoff_aligned;
462 for (wd = 0; wd < 3; wd++) {
464 if (check_func(dsp.loop_filter_8[wd][dir],
465 "vp9_loop_filter_%s_%d_8_%dbpp",
466 dir_name[dir], 4 << wd, bit_depth)) {
467 randomize_buffers(0, 0, 8);
468 memcpy(buf1 - midoff, buf0 - midoff,
469 16 * 8 * SIZEOF_PIXEL);
470 call_ref(buf0, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]);
471 call_new(buf1, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]);
472 if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 8 * SIZEOF_PIXEL))
474 bench_new(buf1, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]);
478 midoff = (dir ? 16 * 8 : 8) * SIZEOF_PIXEL;
479 midoff_aligned = (dir ? 16 * 8 : 16) * SIZEOF_PIXEL;
481 buf0 = base0 + midoff_aligned;
482 buf1 = base1 + midoff_aligned;
484 // 16wd_16px loopfilter
485 if (check_func(dsp.loop_filter_16[dir],
486 "vp9_loop_filter_%s_16_16_%dbpp",
487 dir_name[dir], bit_depth)) {
488 randomize_buffers(0, 0, 16);
489 randomize_buffers(0, 8, 16);
490 memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 * SIZEOF_PIXEL);
491 call_ref(buf0, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]);
492 call_new(buf1, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]);
493 if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 * SIZEOF_PIXEL))
495 bench_new(buf1, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]);
498 for (wd = 0; wd < 2; wd++) {
499 for (wd2 = 0; wd2 < 2; wd2++) {
501 if (check_func(dsp.loop_filter_mix2[wd][wd2][dir],
502 "vp9_loop_filter_mix2_%s_%d%d_16_%dbpp",
503 dir_name[dir], 4 << wd, 4 << wd2, bit_depth)) {
504 randomize_buffers(0, 0, 16);
505 randomize_buffers(1, 8, 16);
506 memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 * SIZEOF_PIXEL);
507 #define M(a) (((a)[1] << 8) | (a)[0])
508 call_ref(buf0, 16 * SIZEOF_PIXEL, M(E), M(I), M(H));
509 call_new(buf1, 16 * SIZEOF_PIXEL, M(E), M(I), M(H));
510 if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 * SIZEOF_PIXEL))
512 bench_new(buf1, 16 * SIZEOF_PIXEL, M(E), M(I), M(H));
519 report("loopfilter");
525 #undef randomize_buffers
527 #define DST_BUF_SIZE (size * size * SIZEOF_PIXEL)
528 #define SRC_BUF_STRIDE 72
529 #define SRC_BUF_SIZE ((size + 7) * SRC_BUF_STRIDE * SIZEOF_PIXEL)
530 #define src (buf + 3 * SIZEOF_PIXEL * (SRC_BUF_STRIDE + 1))
532 #define randomize_buffers() \
534 uint32_t mask = pixel_mask[(bit_depth - 8) >> 1]; \
536 for (k = 0; k < SRC_BUF_SIZE; k += 4) { \
537 uint32_t r = rnd() & mask; \
538 AV_WN32A(buf + k, r); \
541 for (k = 0; k < DST_BUF_SIZE; k += 4) { \
542 uint32_t r = rnd() & mask; \
543 AV_WN32A(dst0 + k, r); \
544 AV_WN32A(dst1 + k, r); \
549 static void check_mc(void)
551 LOCAL_ALIGNED_32(uint8_t, buf, [72 * 72 * 2]);
552 LOCAL_ALIGNED_32(uint8_t, dst0, [64 * 64 * 2]);
553 LOCAL_ALIGNED_32(uint8_t, dst1, [64 * 64 * 2]);
555 int op, hsize, bit_depth, filter, dx, dy;
556 declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dst_stride,
557 const uint8_t *ref, ptrdiff_t ref_stride,
558 int h, int mx, int my);
559 static const char *const filter_names[4] = {
560 "8tap_smooth", "8tap_regular", "8tap_sharp", "bilin"
562 static const char *const subpel_names[2][2] = { { "", "h" }, { "v", "hv" } };
563 static const char *const op_names[2] = { "put", "avg" };
566 for (op = 0; op < 2; op++) {
567 for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
568 ff_vp9dsp_init(&dsp, bit_depth, 0);
569 for (hsize = 0; hsize < 5; hsize++) {
570 int size = 64 >> hsize;
572 for (filter = 0; filter < 4; filter++) {
573 for (dx = 0; dx < 2; dx++) {
574 for (dy = 0; dy < 2; dy++) {
576 snprintf(str, sizeof(str),
577 "%s_%s_%d%s", op_names[op],
578 filter_names[filter], size,
579 subpel_names[dy][dx]);
581 snprintf(str, sizeof(str),
582 "%s%d", op_names[op], size);
584 if (check_func(dsp.mc[hsize][filter][op][dx][dy],
585 "vp9_%s_%dbpp", str, bit_depth)) {
586 int mx = dx ? 1 + (rnd() % 14) : 0;
587 int my = dy ? 1 + (rnd() % 14) : 0;
589 call_ref(dst0, size * SIZEOF_PIXEL,
590 src, SRC_BUF_STRIDE * SIZEOF_PIXEL,
592 call_new(dst1, size * SIZEOF_PIXEL,
593 src, SRC_BUF_STRIDE * SIZEOF_PIXEL,
595 if (memcmp(dst0, dst1, DST_BUF_SIZE))
598 // simd implementations for each filter of subpel
599 // functions are identical
600 if (filter >= 1 && filter <= 2) continue;
601 // 10/12 bpp for bilin are identical
602 if (bit_depth == 12 && filter == 3) continue;
604 bench_new(dst1, size * SIZEOF_PIXEL,
605 src, SRC_BUF_STRIDE * SIZEOF_PIXEL,
617 void checkasm_check_vp9dsp(void)