3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavutil/attributes.h"
24 #include "libavutil/internal.h"
25 #include "libavutil/mem_internal.h"
27 #include "copy_block.h"
28 #include "simple_idct.h"
30 #include "mpegvideo.h"
33 /* (i - 256) * (i - 256) */
34 const uint32_t ff_square_tab[512] = {
35 65536, 65025, 64516, 64009, 63504, 63001, 62500, 62001, 61504, 61009, 60516, 60025, 59536, 59049, 58564, 58081,
36 57600, 57121, 56644, 56169, 55696, 55225, 54756, 54289, 53824, 53361, 52900, 52441, 51984, 51529, 51076, 50625,
37 50176, 49729, 49284, 48841, 48400, 47961, 47524, 47089, 46656, 46225, 45796, 45369, 44944, 44521, 44100, 43681,
38 43264, 42849, 42436, 42025, 41616, 41209, 40804, 40401, 40000, 39601, 39204, 38809, 38416, 38025, 37636, 37249,
39 36864, 36481, 36100, 35721, 35344, 34969, 34596, 34225, 33856, 33489, 33124, 32761, 32400, 32041, 31684, 31329,
40 30976, 30625, 30276, 29929, 29584, 29241, 28900, 28561, 28224, 27889, 27556, 27225, 26896, 26569, 26244, 25921,
41 25600, 25281, 24964, 24649, 24336, 24025, 23716, 23409, 23104, 22801, 22500, 22201, 21904, 21609, 21316, 21025,
42 20736, 20449, 20164, 19881, 19600, 19321, 19044, 18769, 18496, 18225, 17956, 17689, 17424, 17161, 16900, 16641,
43 16384, 16129, 15876, 15625, 15376, 15129, 14884, 14641, 14400, 14161, 13924, 13689, 13456, 13225, 12996, 12769,
44 12544, 12321, 12100, 11881, 11664, 11449, 11236, 11025, 10816, 10609, 10404, 10201, 10000, 9801, 9604, 9409,
45 9216, 9025, 8836, 8649, 8464, 8281, 8100, 7921, 7744, 7569, 7396, 7225, 7056, 6889, 6724, 6561,
46 6400, 6241, 6084, 5929, 5776, 5625, 5476, 5329, 5184, 5041, 4900, 4761, 4624, 4489, 4356, 4225,
47 4096, 3969, 3844, 3721, 3600, 3481, 3364, 3249, 3136, 3025, 2916, 2809, 2704, 2601, 2500, 2401,
48 2304, 2209, 2116, 2025, 1936, 1849, 1764, 1681, 1600, 1521, 1444, 1369, 1296, 1225, 1156, 1089,
49 1024, 961, 900, 841, 784, 729, 676, 625, 576, 529, 484, 441, 400, 361, 324, 289,
50 256, 225, 196, 169, 144, 121, 100, 81, 64, 49, 36, 25, 16, 9, 4, 1,
51 0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225,
52 256, 289, 324, 361, 400, 441, 484, 529, 576, 625, 676, 729, 784, 841, 900, 961,
53 1024, 1089, 1156, 1225, 1296, 1369, 1444, 1521, 1600, 1681, 1764, 1849, 1936, 2025, 2116, 2209,
54 2304, 2401, 2500, 2601, 2704, 2809, 2916, 3025, 3136, 3249, 3364, 3481, 3600, 3721, 3844, 3969,
55 4096, 4225, 4356, 4489, 4624, 4761, 4900, 5041, 5184, 5329, 5476, 5625, 5776, 5929, 6084, 6241,
56 6400, 6561, 6724, 6889, 7056, 7225, 7396, 7569, 7744, 7921, 8100, 8281, 8464, 8649, 8836, 9025,
57 9216, 9409, 9604, 9801, 10000, 10201, 10404, 10609, 10816, 11025, 11236, 11449, 11664, 11881, 12100, 12321,
58 12544, 12769, 12996, 13225, 13456, 13689, 13924, 14161, 14400, 14641, 14884, 15129, 15376, 15625, 15876, 16129,
59 16384, 16641, 16900, 17161, 17424, 17689, 17956, 18225, 18496, 18769, 19044, 19321, 19600, 19881, 20164, 20449,
60 20736, 21025, 21316, 21609, 21904, 22201, 22500, 22801, 23104, 23409, 23716, 24025, 24336, 24649, 24964, 25281,
61 25600, 25921, 26244, 26569, 26896, 27225, 27556, 27889, 28224, 28561, 28900, 29241, 29584, 29929, 30276, 30625,
62 30976, 31329, 31684, 32041, 32400, 32761, 33124, 33489, 33856, 34225, 34596, 34969, 35344, 35721, 36100, 36481,
63 36864, 37249, 37636, 38025, 38416, 38809, 39204, 39601, 40000, 40401, 40804, 41209, 41616, 42025, 42436, 42849,
64 43264, 43681, 44100, 44521, 44944, 45369, 45796, 46225, 46656, 47089, 47524, 47961, 48400, 48841, 49284, 49729,
65 50176, 50625, 51076, 51529, 51984, 52441, 52900, 53361, 53824, 54289, 54756, 55225, 55696, 56169, 56644, 57121,
66 57600, 58081, 58564, 59049, 59536, 60025, 60516, 61009, 61504, 62001, 62500, 63001, 63504, 64009, 64516, 65025,
69 static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
70 ptrdiff_t stride, int h)
73 const uint32_t *sq = ff_square_tab + 256;
75 for (i = 0; i < h; i++) {
76 s += sq[pix1[0] - pix2[0]];
77 s += sq[pix1[1] - pix2[1]];
78 s += sq[pix1[2] - pix2[2]];
79 s += sq[pix1[3] - pix2[3]];
86 static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
87 ptrdiff_t stride, int h)
90 const uint32_t *sq = ff_square_tab + 256;
92 for (i = 0; i < h; i++) {
93 s += sq[pix1[0] - pix2[0]];
94 s += sq[pix1[1] - pix2[1]];
95 s += sq[pix1[2] - pix2[2]];
96 s += sq[pix1[3] - pix2[3]];
97 s += sq[pix1[4] - pix2[4]];
98 s += sq[pix1[5] - pix2[5]];
99 s += sq[pix1[6] - pix2[6]];
100 s += sq[pix1[7] - pix2[7]];
107 static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
108 ptrdiff_t stride, int h)
111 const uint32_t *sq = ff_square_tab + 256;
113 for (i = 0; i < h; i++) {
114 s += sq[pix1[0] - pix2[0]];
115 s += sq[pix1[1] - pix2[1]];
116 s += sq[pix1[2] - pix2[2]];
117 s += sq[pix1[3] - pix2[3]];
118 s += sq[pix1[4] - pix2[4]];
119 s += sq[pix1[5] - pix2[5]];
120 s += sq[pix1[6] - pix2[6]];
121 s += sq[pix1[7] - pix2[7]];
122 s += sq[pix1[8] - pix2[8]];
123 s += sq[pix1[9] - pix2[9]];
124 s += sq[pix1[10] - pix2[10]];
125 s += sq[pix1[11] - pix2[11]];
126 s += sq[pix1[12] - pix2[12]];
127 s += sq[pix1[13] - pix2[13]];
128 s += sq[pix1[14] - pix2[14]];
129 s += sq[pix1[15] - pix2[15]];
137 static int sum_abs_dctelem_c(int16_t *block)
141 for (i = 0; i < 64; i++)
142 sum += FFABS(block[i]);
146 #define avg2(a, b) (((a) + (b) + 1) >> 1)
147 #define avg4(a, b, c, d) (((a) + (b) + (c) + (d) + 2) >> 2)
149 static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
150 ptrdiff_t stride, int h)
154 for (i = 0; i < h; i++) {
155 s += abs(pix1[0] - pix2[0]);
156 s += abs(pix1[1] - pix2[1]);
157 s += abs(pix1[2] - pix2[2]);
158 s += abs(pix1[3] - pix2[3]);
159 s += abs(pix1[4] - pix2[4]);
160 s += abs(pix1[5] - pix2[5]);
161 s += abs(pix1[6] - pix2[6]);
162 s += abs(pix1[7] - pix2[7]);
163 s += abs(pix1[8] - pix2[8]);
164 s += abs(pix1[9] - pix2[9]);
165 s += abs(pix1[10] - pix2[10]);
166 s += abs(pix1[11] - pix2[11]);
167 s += abs(pix1[12] - pix2[12]);
168 s += abs(pix1[13] - pix2[13]);
169 s += abs(pix1[14] - pix2[14]);
170 s += abs(pix1[15] - pix2[15]);
177 static inline int pix_median_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
178 ptrdiff_t stride, int h)
182 #define V(x) (pix1[x] - pix2[x])
185 s += abs(V(1) - V(0));
186 s += abs(V(2) - V(1));
187 s += abs(V(3) - V(2));
188 s += abs(V(4) - V(3));
189 s += abs(V(5) - V(4));
190 s += abs(V(6) - V(5));
191 s += abs(V(7) - V(6));
192 s += abs(V(8) - V(7));
193 s += abs(V(9) - V(8));
194 s += abs(V(10) - V(9));
195 s += abs(V(11) - V(10));
196 s += abs(V(12) - V(11));
197 s += abs(V(13) - V(12));
198 s += abs(V(14) - V(13));
199 s += abs(V(15) - V(14));
204 for (i = 1; i < h; i++) {
205 s += abs(V(0) - V(-stride));
206 for (j = 1; j < 16; j++)
207 s += abs(V(j) - mid_pred(V(j-stride), V(j-1), V(j-stride) + V(j-1) - V(j-stride-1)));
216 static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
217 ptrdiff_t stride, int h)
221 for (i = 0; i < h; i++) {
222 s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
223 s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
224 s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
225 s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
226 s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
227 s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
228 s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
229 s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
230 s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
231 s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
232 s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
233 s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
234 s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
235 s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
236 s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
237 s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
244 static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
245 ptrdiff_t stride, int h)
248 uint8_t *pix3 = pix2 + stride;
250 for (i = 0; i < h; i++) {
251 s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
252 s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
253 s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
254 s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
255 s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
256 s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
257 s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
258 s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
259 s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
260 s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
261 s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
262 s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
263 s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
264 s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
265 s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
266 s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
274 static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
275 ptrdiff_t stride, int h)
278 uint8_t *pix3 = pix2 + stride;
280 for (i = 0; i < h; i++) {
281 s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
282 s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
283 s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
284 s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
285 s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
286 s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
287 s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
288 s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
289 s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
290 s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
291 s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
292 s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
293 s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
294 s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
295 s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
296 s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
304 static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
305 ptrdiff_t stride, int h)
309 for (i = 0; i < h; i++) {
310 s += abs(pix1[0] - pix2[0]);
311 s += abs(pix1[1] - pix2[1]);
312 s += abs(pix1[2] - pix2[2]);
313 s += abs(pix1[3] - pix2[3]);
314 s += abs(pix1[4] - pix2[4]);
315 s += abs(pix1[5] - pix2[5]);
316 s += abs(pix1[6] - pix2[6]);
317 s += abs(pix1[7] - pix2[7]);
324 static inline int pix_median_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
325 ptrdiff_t stride, int h)
329 #define V(x) (pix1[x] - pix2[x])
332 s += abs(V(1) - V(0));
333 s += abs(V(2) - V(1));
334 s += abs(V(3) - V(2));
335 s += abs(V(4) - V(3));
336 s += abs(V(5) - V(4));
337 s += abs(V(6) - V(5));
338 s += abs(V(7) - V(6));
343 for (i = 1; i < h; i++) {
344 s += abs(V(0) - V(-stride));
345 for (j = 1; j < 8; j++)
346 s += abs(V(j) - mid_pred(V(j-stride), V(j-1), V(j-stride) + V(j-1) - V(j-stride-1)));
355 static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
356 ptrdiff_t stride, int h)
360 for (i = 0; i < h; i++) {
361 s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
362 s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
363 s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
364 s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
365 s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
366 s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
367 s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
368 s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
375 static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
376 ptrdiff_t stride, int h)
379 uint8_t *pix3 = pix2 + stride;
381 for (i = 0; i < h; i++) {
382 s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
383 s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
384 s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
385 s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
386 s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
387 s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
388 s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
389 s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
397 static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
398 ptrdiff_t stride, int h)
401 uint8_t *pix3 = pix2 + stride;
403 for (i = 0; i < h; i++) {
404 s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
405 s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
406 s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
407 s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
408 s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
409 s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
410 s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
411 s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
419 static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
420 ptrdiff_t stride, int h)
422 int score1 = 0, score2 = 0, x, y;
424 for (y = 0; y < h; y++) {
425 for (x = 0; x < 16; x++)
426 score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
428 for (x = 0; x < 15; x++)
429 score2 += FFABS(s1[x] - s1[x + stride] -
430 s1[x + 1] + s1[x + stride + 1]) -
431 FFABS(s2[x] - s2[x + stride] -
432 s2[x + 1] + s2[x + stride + 1]);
439 return score1 + FFABS(score2) * c->avctx->nsse_weight;
441 return score1 + FFABS(score2) * 8;
444 static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
445 ptrdiff_t stride, int h)
447 int score1 = 0, score2 = 0, x, y;
449 for (y = 0; y < h; y++) {
450 for (x = 0; x < 8; x++)
451 score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
453 for (x = 0; x < 7; x++)
454 score2 += FFABS(s1[x] - s1[x + stride] -
455 s1[x + 1] + s1[x + stride + 1]) -
456 FFABS(s2[x] - s2[x + stride] -
457 s2[x + 1] + s2[x + stride + 1]);
464 return score1 + FFABS(score2) * c->avctx->nsse_weight;
466 return score1 + FFABS(score2) * 8;
469 static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
470 ptrdiff_t stride, int h)
475 void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
479 memset(cmp, 0, sizeof(void *) * 6);
481 for (i = 0; i < 6; i++) {
482 switch (type & 0xFF) {
486 case FF_CMP_MEDIAN_SAD:
487 cmp[i] = c->median_sad[i];
490 cmp[i] = c->hadamard8_diff[i];
496 cmp[i] = c->dct_sad[i];
499 cmp[i] = c->dct264_sad[i];
502 cmp[i] = c->dct_max[i];
505 cmp[i] = c->quant_psnr[i];
534 av_log(NULL, AV_LOG_ERROR,
535 "internal error in cmp function selection\n");
540 #define BUTTERFLY2(o1, o2, i1, i2) \
544 #define BUTTERFLY1(x, y) \
553 #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
555 static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
556 uint8_t *src, ptrdiff_t stride, int h)
558 int i, temp[64], sum = 0;
562 for (i = 0; i < 8; i++) {
563 // FIXME: try pointer walks
564 BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
565 src[stride * i + 0] - dst[stride * i + 0],
566 src[stride * i + 1] - dst[stride * i + 1]);
567 BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
568 src[stride * i + 2] - dst[stride * i + 2],
569 src[stride * i + 3] - dst[stride * i + 3]);
570 BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
571 src[stride * i + 4] - dst[stride * i + 4],
572 src[stride * i + 5] - dst[stride * i + 5]);
573 BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
574 src[stride * i + 6] - dst[stride * i + 6],
575 src[stride * i + 7] - dst[stride * i + 7]);
577 BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
578 BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
579 BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
580 BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
582 BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
583 BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
584 BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
585 BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
588 for (i = 0; i < 8; i++) {
589 BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
590 BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
591 BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
592 BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
594 BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
595 BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
596 BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
597 BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
599 sum += BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i]) +
600 BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i]) +
601 BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i]) +
602 BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
607 static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
608 uint8_t *dummy, ptrdiff_t stride, int h)
610 int i, temp[64], sum = 0;
614 for (i = 0; i < 8; i++) {
615 // FIXME: try pointer walks
616 BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
617 src[stride * i + 0], src[stride * i + 1]);
618 BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
619 src[stride * i + 2], src[stride * i + 3]);
620 BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
621 src[stride * i + 4], src[stride * i + 5]);
622 BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
623 src[stride * i + 6], src[stride * i + 7]);
625 BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
626 BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
627 BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
628 BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
630 BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
631 BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
632 BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
633 BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
636 for (i = 0; i < 8; i++) {
637 BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
638 BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
639 BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
640 BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
642 BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
643 BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
644 BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
645 BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
648 BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i])
649 + BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i])
650 + BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i])
651 + BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
654 sum -= FFABS(temp[8 * 0] + temp[8 * 4]); // -mean
659 static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
660 uint8_t *src2, ptrdiff_t stride, int h)
662 LOCAL_ALIGNED_16(int16_t, temp, [64]);
666 s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
668 return s->mecc.sum_abs_dctelem(temp);
674 const int s07 = SRC(0) + SRC(7); \
675 const int s16 = SRC(1) + SRC(6); \
676 const int s25 = SRC(2) + SRC(5); \
677 const int s34 = SRC(3) + SRC(4); \
678 const int a0 = s07 + s34; \
679 const int a1 = s16 + s25; \
680 const int a2 = s07 - s34; \
681 const int a3 = s16 - s25; \
682 const int d07 = SRC(0) - SRC(7); \
683 const int d16 = SRC(1) - SRC(6); \
684 const int d25 = SRC(2) - SRC(5); \
685 const int d34 = SRC(3) - SRC(4); \
686 const int a4 = d16 + d25 + (d07 + (d07 >> 1)); \
687 const int a5 = d07 - d34 - (d25 + (d25 >> 1)); \
688 const int a6 = d07 + d34 - (d16 + (d16 >> 1)); \
689 const int a7 = d16 - d25 + (d34 + (d34 >> 1)); \
691 DST(1, a4 + (a7 >> 2)); \
692 DST(2, a2 + (a3 >> 1)); \
693 DST(3, a5 + (a6 >> 2)); \
695 DST(5, a6 - (a5 >> 2)); \
696 DST(6, (a2 >> 1) - a3); \
697 DST(7, (a4 >> 2) - a7); \
700 static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
701 uint8_t *src2, ptrdiff_t stride, int h)
706 s->pdsp.diff_pixels_unaligned(dct[0], src1, src2, stride);
708 #define SRC(x) dct[i][x]
709 #define DST(x, v) dct[i][x] = v
710 for (i = 0; i < 8; i++)
715 #define SRC(x) dct[x][i]
716 #define DST(x, v) sum += FFABS(v)
717 for (i = 0; i < 8; i++)
725 static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
726 uint8_t *src2, ptrdiff_t stride, int h)
728 LOCAL_ALIGNED_16(int16_t, temp, [64]);
733 s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
736 for (i = 0; i < 64; i++)
737 sum = FFMAX(sum, FFABS(temp[i]));
742 static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
743 uint8_t *src2, ptrdiff_t stride, int h)
745 LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
746 int16_t *const bak = temp + 64;
752 s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
754 memcpy(bak, temp, 64 * sizeof(int16_t));
756 s->block_last_index[0 /* FIXME */] =
757 s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
758 s->dct_unquantize_inter(s, temp, 0, s->qscale);
759 ff_simple_idct_int16_8bit(temp); // FIXME
761 for (i = 0; i < 64; i++)
762 sum += (temp[i] - bak[i]) * (temp[i] - bak[i]);
767 static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
768 ptrdiff_t stride, int h)
770 const uint8_t *scantable = s->intra_scantable.permutated;
771 LOCAL_ALIGNED_16(int16_t, temp, [64]);
772 LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
773 LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
774 int i, last, run, bits, level, distortion, start_i;
775 const int esc_length = s->ac_esc_length;
776 uint8_t *length, *last_length;
780 copy_block8(lsrc1, src1, 8, stride, 8);
781 copy_block8(lsrc2, src2, 8, stride, 8);
783 s->pdsp.diff_pixels(temp, lsrc1, lsrc2, 8);
785 s->block_last_index[0 /* FIXME */] =
787 s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
793 length = s->intra_ac_vlc_length;
794 last_length = s->intra_ac_vlc_last_length;
795 bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
798 length = s->inter_ac_vlc_length;
799 last_length = s->inter_ac_vlc_last_length;
802 if (last >= start_i) {
804 for (i = start_i; i < last; i++) {
805 int j = scantable[i];
810 if ((level & (~127)) == 0)
811 bits += length[UNI_AC_ENC_INDEX(run, level)];
820 level = temp[i] + 64;
822 av_assert2(level - 64);
824 if ((level & (~127)) == 0) {
825 bits += last_length[UNI_AC_ENC_INDEX(run, level)];
832 s->dct_unquantize_intra(s, temp, 0, s->qscale);
834 s->dct_unquantize_inter(s, temp, 0, s->qscale);
837 s->idsp.idct_add(lsrc2, 8, temp);
839 distortion = s->mecc.sse[1](NULL, lsrc2, lsrc1, 8, 8);
841 return distortion + ((bits * s->qscale * s->qscale * 109 + 64) >> 7);
844 static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
845 ptrdiff_t stride, int h)
847 const uint8_t *scantable = s->intra_scantable.permutated;
848 LOCAL_ALIGNED_16(int16_t, temp, [64]);
849 int i, last, run, bits, level, start_i;
850 const int esc_length = s->ac_esc_length;
851 uint8_t *length, *last_length;
855 s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
857 s->block_last_index[0 /* FIXME */] =
859 s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
865 length = s->intra_ac_vlc_length;
866 last_length = s->intra_ac_vlc_last_length;
867 bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
870 length = s->inter_ac_vlc_length;
871 last_length = s->inter_ac_vlc_last_length;
874 if (last >= start_i) {
876 for (i = start_i; i < last; i++) {
877 int j = scantable[i];
882 if ((level & (~127)) == 0)
883 bits += length[UNI_AC_ENC_INDEX(run, level)];
892 level = temp[i] + 64;
894 av_assert2(level - 64);
896 if ((level & (~127)) == 0)
897 bits += last_length[UNI_AC_ENC_INDEX(run, level)];
905 #define VSAD_INTRA(size) \
906 static int vsad_intra ## size ## _c(MpegEncContext *c, \
907 uint8_t *s, uint8_t *dummy, \
908 ptrdiff_t stride, int h) \
910 int score = 0, x, y; \
912 for (y = 1; y < h; y++) { \
913 for (x = 0; x < size; x += 4) { \
914 score += FFABS(s[x] - s[x + stride]) + \
915 FFABS(s[x + 1] - s[x + stride + 1]) + \
916 FFABS(s[x + 2] - s[x + 2 + stride]) + \
917 FFABS(s[x + 3] - s[x + 3 + stride]); \
928 static int vsad ## size ## _c(MpegEncContext *c, \
929 uint8_t *s1, uint8_t *s2, \
930 ptrdiff_t stride, int h) \
932 int score = 0, x, y; \
934 for (y = 1; y < h; y++) { \
935 for (x = 0; x < size; x++) \
936 score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
946 #define SQ(a) ((a) * (a))
947 #define VSSE_INTRA(size) \
948 static int vsse_intra ## size ## _c(MpegEncContext *c, \
949 uint8_t *s, uint8_t *dummy, \
950 ptrdiff_t stride, int h) \
952 int score = 0, x, y; \
954 for (y = 1; y < h; y++) { \
955 for (x = 0; x < size; x += 4) { \
956 score += SQ(s[x] - s[x + stride]) + \
957 SQ(s[x + 1] - s[x + stride + 1]) + \
958 SQ(s[x + 2] - s[x + stride + 2]) + \
959 SQ(s[x + 3] - s[x + stride + 3]); \
970 static int vsse ## size ## _c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, \
971 ptrdiff_t stride, int h) \
973 int score = 0, x, y; \
975 for (y = 1; y < h; y++) { \
976 for (x = 0; x < size; x++) \
977 score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
987 #define WRAPPER8_16_SQ(name8, name16) \
988 static int name16(MpegEncContext *s, uint8_t *dst, uint8_t *src, \
989 ptrdiff_t stride, int h) \
993 score += name8(s, dst, src, stride, 8); \
994 score += name8(s, dst + 8, src + 8, stride, 8); \
998 score += name8(s, dst, src, stride, 8); \
999 score += name8(s, dst + 8, src + 8, stride, 8); \
1004 WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
1005 WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
1006 WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
1008 WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
1010 WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
1011 WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
1012 WRAPPER8_16_SQ(rd8x8_c, rd16_c)
1013 WRAPPER8_16_SQ(bit8x8_c, bit16_c)
1015 av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
1017 c->sum_abs_dctelem = sum_abs_dctelem_c;
1019 /* TODO [0] 16 [1] 8 */
1020 c->pix_abs[0][0] = pix_abs16_c;
1021 c->pix_abs[0][1] = pix_abs16_x2_c;
1022 c->pix_abs[0][2] = pix_abs16_y2_c;
1023 c->pix_abs[0][3] = pix_abs16_xy2_c;
1024 c->pix_abs[1][0] = pix_abs8_c;
1025 c->pix_abs[1][1] = pix_abs8_x2_c;
1026 c->pix_abs[1][2] = pix_abs8_y2_c;
1027 c->pix_abs[1][3] = pix_abs8_xy2_c;
1029 #define SET_CMP_FUNC(name) \
1030 c->name[0] = name ## 16_c; \
1031 c->name[1] = name ## 8x8_c;
1033 SET_CMP_FUNC(hadamard8_diff)
1034 c->hadamard8_diff[4] = hadamard8_intra16_c;
1035 c->hadamard8_diff[5] = hadamard8_intra8x8_c;
1036 SET_CMP_FUNC(dct_sad)
1037 SET_CMP_FUNC(dct_max)
1039 SET_CMP_FUNC(dct264_sad)
1041 c->sad[0] = pix_abs16_c;
1042 c->sad[1] = pix_abs8_c;
1043 c->sse[0] = sse16_c;
1046 SET_CMP_FUNC(quant_psnr)
1049 c->vsad[0] = vsad16_c;
1050 c->vsad[1] = vsad8_c;
1051 c->vsad[4] = vsad_intra16_c;
1052 c->vsad[5] = vsad_intra8_c;
1053 c->vsse[0] = vsse16_c;
1054 c->vsse[1] = vsse8_c;
1055 c->vsse[4] = vsse_intra16_c;
1056 c->vsse[5] = vsse_intra8_c;
1057 c->nsse[0] = nsse16_c;
1058 c->nsse[1] = nsse8_c;
1059 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
1060 ff_dsputil_init_dwt(c);
1064 ff_me_cmp_init_alpha(c, avctx);
1066 ff_me_cmp_init_arm(c, avctx);
1068 ff_me_cmp_init_ppc(c, avctx);
1070 ff_me_cmp_init_x86(c, avctx);
1072 ff_me_cmp_init_mips(c, avctx);
1074 c->median_sad[0] = pix_median_abs16_c;
1075 c->median_sad[1] = pix_median_abs8_c;