2 * Copyright (c) 2015 Henrik Gramner
3 * Copyright (c) 2021 Josh Dekker
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 #include "libavcodec/hevcdsp.h"
25 #include "libavutil/common.h"
26 #include "libavutil/internal.h"
27 #include "libavutil/intreadwrite.h"
29 static const uint32_t pixel_mask[] = { 0xffffffff, 0x01ff01ff, 0x03ff03ff, 0x07ff07ff, 0x0fff0fff };
30 static const uint32_t pixel_mask16[] = { 0x00ff00ff, 0x01ff01ff, 0x03ff03ff, 0x07ff07ff, 0x0fff0fff };
31 static const int sizes[] = { -1, 4, 6, 8, 12, 16, 24, 32, 48, 64 };
32 static const int weights[] = { 0, 128, 255, -1 };
33 static const int denoms[] = {0, 7, 12, -1 };
34 static const int offsets[] = {0, 255, -1 };
36 #define SIZEOF_PIXEL ((bit_depth + 7) / 8)
37 #define BUF_SIZE (2 * MAX_PB_SIZE * (2 * 4 + MAX_PB_SIZE))
39 #define randomize_buffers() \
41 uint32_t mask = pixel_mask[bit_depth - 8]; \
43 for (k = 0; k < BUF_SIZE; k += 4) { \
44 uint32_t r = rnd() & mask; \
45 AV_WN32A(buf0 + k, r); \
46 AV_WN32A(buf1 + k, r); \
48 AV_WN32A(dst0 + k, r); \
49 AV_WN32A(dst1 + k, r); \
53 #define randomize_buffers_ref() \
54 randomize_buffers(); \
56 uint32_t mask = pixel_mask16[bit_depth - 8]; \
58 for (k = 0; k < BUF_SIZE; k += 2) { \
59 uint32_t r = rnd() & mask; \
60 AV_WN32A(ref0 + k, r); \
61 AV_WN32A(ref1 + k, r); \
65 #define src0 (buf0 + 2 * 4 * MAX_PB_SIZE) /* hevc qpel functions read data from negative src pointer offsets */
66 #define src1 (buf1 + 2 * 4 * MAX_PB_SIZE)
68 void checkasm_check_hevc_qpel(void)
70 LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
71 LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
72 LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
73 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
76 int size, bit_depth, i, j, row;
77 declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, int16_t *dst, uint8_t *src, ptrdiff_t srcstride,
78 int height, intptr_t mx, intptr_t my, int width);
80 for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
81 ff_hevc_dsp_init(&h, bit_depth);
83 for (i = 0; i < 2; i++) {
84 for (j = 0; j < 2; j++) {
85 for (size = 1; size < 10; size++) {
87 switch ((j << 1) | i) {
88 case 0: type = "pel_pixels"; break; // 0 0
89 case 1: type = "qpel_h"; break; // 0 1
90 case 2: type = "qpel_v"; break; // 1 0
91 case 3: type = "qpel_hv"; break; // 1 1
94 if (check_func(h.put_hevc_qpel[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
95 int16_t *dstw0 = (int16_t *) dst0, *dstw1 = (int16_t *) dst1;
97 call_ref(dstw0, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
98 call_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
99 for (row = 0; row < size[sizes]; row++) {
100 if (memcmp(dstw0 + row * MAX_PB_SIZE, dstw1 + row * MAX_PB_SIZE, sizes[size] * SIZEOF_PIXEL))
103 bench_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
112 void checkasm_check_hevc_qpel_uni(void)
114 LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
115 LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
116 LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
117 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
120 int size, bit_depth, i, j;
121 declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
122 int height, intptr_t mx, intptr_t my, int width);
124 for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
125 ff_hevc_dsp_init(&h, bit_depth);
127 for (i = 0; i < 2; i++) {
128 for (j = 0; j < 2; j++) {
129 for (size = 1; size < 10; size++) {
131 switch ((j << 1) | i) {
132 case 0: type = "pel_uni_pixels"; break; // 0 0
133 case 1: type = "qpel_uni_h"; break; // 0 1
134 case 2: type = "qpel_uni_v"; break; // 1 0
135 case 3: type = "qpel_uni_hv"; break; // 1 1
138 if (check_func(h.put_hevc_qpel_uni[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
140 call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
141 call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
142 if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
144 bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
153 void checkasm_check_hevc_qpel_uni_w(void)
155 LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
156 LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
157 LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
158 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
161 int size, bit_depth, i, j;
162 const int *denom, *wx, *ox;
163 declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
164 int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width);
166 for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
167 ff_hevc_dsp_init(&h, bit_depth);
169 for (i = 0; i < 2; i++) {
170 for (j = 0; j < 2; j++) {
171 for (size = 1; size < 10; size++) {
173 switch ((j << 1) | i) {
174 case 0: type = "pel_uni_w_pixels"; break; // 0 0
175 case 1: type = "qpel_uni_w_h"; break; // 0 1
176 case 2: type = "qpel_uni_w_v"; break; // 1 0
177 case 3: type = "qpel_uni_w_hv"; break; // 1 1
180 if (check_func(h.put_hevc_qpel_uni_w[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
181 for (denom = denoms; *denom >= 0; denom++) {
182 for (wx = weights; *wx >= 0; wx++) {
183 for (ox = offsets; *ox >= 0; ox++) {
185 call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
186 call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
187 if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
189 bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
198 report("qpel_uni_w");
201 void checkasm_check_hevc_qpel_bi(void)
203 LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
204 LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
205 LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
206 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
207 LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
208 LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
211 int size, bit_depth, i, j;
212 declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
214 int height, intptr_t mx, intptr_t my, int width);
216 for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
217 ff_hevc_dsp_init(&h, bit_depth);
219 for (i = 0; i < 2; i++) {
220 for (j = 0; j < 2; j++) {
221 for (size = 1; size < 10; size++) {
223 switch ((j << 1) | i) {
224 case 0: type = "pel_bi_pixels"; break; // 0 0
225 case 1: type = "qpel_bi_h"; break; // 0 1
226 case 2: type = "qpel_bi_v"; break; // 1 0
227 case 3: type = "qpel_bi_hv"; break; // 1 1
230 if (check_func(h.put_hevc_qpel_bi[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
231 randomize_buffers_ref();
232 call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, ref0, sizes[size], i, j, sizes[size]);
233 call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], i, j, sizes[size]);
234 if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
236 bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], i, j, sizes[size]);
245 void checkasm_check_hevc_qpel_bi_w(void)
247 LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
248 LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
249 LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
250 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
251 LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
252 LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
255 int size, bit_depth, i, j;
256 const int *denom, *wx, *ox;
257 declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
259 int height, int denom, int wx0, int wx1,
260 int ox0, int ox1, intptr_t mx, intptr_t my, int width);
262 for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
263 ff_hevc_dsp_init(&h, bit_depth);
265 for (i = 0; i < 2; i++) {
266 for (j = 0; j < 2; j++) {
267 for (size = 1; size < 10; size++) {
269 switch ((j << 1) | i) {
270 case 0: type = "pel_bi_w_pixels"; break; // 0 0
271 case 1: type = "qpel_bi_w_h"; break; // 0 1
272 case 2: type = "qpel_bi_w_v"; break; // 1 0
273 case 3: type = "qpel_bi_w_hv"; break; // 1 1
276 if (check_func(h.put_hevc_qpel_bi_w[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
277 for (denom = denoms; *denom >= 0; denom++) {
278 for (wx = weights; *wx >= 0; wx++) {
279 for (ox = offsets; *ox >= 0; ox++) {
280 randomize_buffers_ref();
281 call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, ref0, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
282 call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
283 if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
285 bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
297 void checkasm_check_hevc_epel(void)
299 LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
300 LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
301 LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
302 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
305 int size, bit_depth, i, j, row;
306 declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, int16_t *dst, uint8_t *src, ptrdiff_t srcstride,
307 int height, intptr_t mx, intptr_t my, int width);
309 for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
310 ff_hevc_dsp_init(&h, bit_depth);
312 for (i = 0; i < 2; i++) {
313 for (j = 0; j < 2; j++) {
314 for (size = 1; size < 10; size++) {
316 switch ((j << 1) | i) {
317 case 0: type = "pel_pixels"; break; // 0 0
318 case 1: type = "epel_h"; break; // 0 1
319 case 2: type = "epel_v"; break; // 1 0
320 case 3: type = "epel_hv"; break; // 1 1
323 if (check_func(h.put_hevc_epel[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
324 int16_t *dstw0 = (int16_t *) dst0, *dstw1 = (int16_t *) dst1;
326 call_ref(dstw0, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
327 call_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
328 for (row = 0; row < size[sizes]; row++) {
329 if (memcmp(dstw0 + row * MAX_PB_SIZE, dstw1 + row * MAX_PB_SIZE, sizes[size] * SIZEOF_PIXEL))
332 bench_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
341 void checkasm_check_hevc_epel_uni(void)
343 LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
344 LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
345 LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
346 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
349 int size, bit_depth, i, j;
350 declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
351 int height, intptr_t mx, intptr_t my, int width);
353 for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
354 ff_hevc_dsp_init(&h, bit_depth);
356 for (i = 0; i < 2; i++) {
357 for (j = 0; j < 2; j++) {
358 for (size = 1; size < 10; size++) {
360 switch ((j << 1) | i) {
361 case 0: type = "pel_uni_pixels"; break; // 0 0
362 case 1: type = "epel_uni_h"; break; // 0 1
363 case 2: type = "epel_uni_v"; break; // 1 0
364 case 3: type = "epel_uni_hv"; break; // 1 1
367 if (check_func(h.put_hevc_epel_uni[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
369 call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
370 call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
371 if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
373 bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
382 void checkasm_check_hevc_epel_uni_w(void)
384 LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
385 LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
386 LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
387 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
390 int size, bit_depth, i, j;
391 const int *denom, *wx, *ox;
392 declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
393 int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width);
395 for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
396 ff_hevc_dsp_init(&h, bit_depth);
398 for (i = 0; i < 2; i++) {
399 for (j = 0; j < 2; j++) {
400 for (size = 1; size < 10; size++) {
402 switch ((j << 1) | i) {
403 case 0: type = "pel_uni_w_pixels"; break; // 0 0
404 case 1: type = "epel_uni_w_h"; break; // 0 1
405 case 2: type = "epel_uni_w_v"; break; // 1 0
406 case 3: type = "epel_uni_w_hv"; break; // 1 1
409 if (check_func(h.put_hevc_epel_uni_w[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
410 for (denom = denoms; *denom >= 0; denom++) {
411 for (wx = weights; *wx >= 0; wx++) {
412 for (ox = offsets; *ox >= 0; ox++) {
414 call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
415 call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
416 if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
418 bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
427 report("epel_uni_w");
430 void checkasm_check_hevc_epel_bi(void)
432 LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
433 LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
434 LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
435 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
436 LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
437 LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
440 int size, bit_depth, i, j;
441 declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
443 int height, intptr_t mx, intptr_t my, int width);
445 for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
446 ff_hevc_dsp_init(&h, bit_depth);
448 for (i = 0; i < 2; i++) {
449 for (j = 0; j < 2; j++) {
450 for (size = 1; size < 10; size++) {
452 switch ((j << 1) | i) {
453 case 0: type = "pel_bi_pixels"; break; // 0 0
454 case 1: type = "epel_bi_h"; break; // 0 1
455 case 2: type = "epel_bi_v"; break; // 1 0
456 case 3: type = "epel_bi_hv"; break; // 1 1
459 if (check_func(h.put_hevc_epel_bi[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
460 randomize_buffers_ref();
461 call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, ref0, sizes[size], i, j, sizes[size]);
462 call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], i, j, sizes[size]);
463 if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
465 bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], i, j, sizes[size]);
474 void checkasm_check_hevc_epel_bi_w(void)
476 LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
477 LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
478 LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
479 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
480 LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
481 LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
484 int size, bit_depth, i, j;
485 const int *denom, *wx, *ox;
486 declare_func_emms(AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride,
488 int height, int denom, int wx0, int wx1,
489 int ox0, int ox1, intptr_t mx, intptr_t my, int width);
491 for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
492 ff_hevc_dsp_init(&h, bit_depth);
494 for (i = 0; i < 2; i++) {
495 for (j = 0; j < 2; j++) {
496 for (size = 1; size < 10; size++) {
498 switch ((j << 1) | i) {
499 case 0: type = "pel_bi_w_pixels"; break; // 0 0
500 case 1: type = "epel_bi_w_h"; break; // 0 1
501 case 2: type = "epel_bi_w_v"; break; // 1 0
502 case 3: type = "epel_bi_w_hv"; break; // 1 1
505 if (check_func(h.put_hevc_epel_bi_w[size][j][i], "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
506 for (denom = denoms; *denom >= 0; denom++) {
507 for (wx = weights; *wx >= 0; wx++) {
508 for (ox = offsets; *ox >= 0; ox++) {
509 randomize_buffers_ref();
510 call_ref(dst0, sizes[size] * SIZEOF_PIXEL, src0, sizes[size] * SIZEOF_PIXEL, ref0, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
511 call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
512 if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL))
514 bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);