3 * Copyright (c) 2000, 2001 Fabrice Bellard.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
23 #include "mpegvideo.h"
27 UINT8 cropTbl[256 + 2 * MAX_NEG_CROP];
28 UINT32 squareTbl[512];
30 const UINT8 ff_zigzag_direct[64] = {
31 0, 1, 8, 16, 9, 2, 3, 10,
32 17, 24, 32, 25, 18, 11, 4, 5,
33 12, 19, 26, 33, 40, 48, 41, 34,
34 27, 20, 13, 6, 7, 14, 21, 28,
35 35, 42, 49, 56, 57, 50, 43, 36,
36 29, 22, 15, 23, 30, 37, 44, 51,
37 58, 59, 52, 45, 38, 31, 39, 46,
38 53, 60, 61, 54, 47, 55, 62, 63
41 /* not permutated inverse zigzag_direct + 1 for MMX quantizer */
42 UINT16 __align8 inv_zigzag_direct16[64];
44 const UINT8 ff_alternate_horizontal_scan[64] = {
45 0, 1, 2, 3, 8, 9, 16, 17,
46 10, 11, 4, 5, 6, 7, 15, 14,
47 13, 12, 19, 18, 24, 25, 32, 33,
48 26, 27, 20, 21, 22, 23, 28, 29,
49 30, 31, 34, 35, 40, 41, 48, 49,
50 42, 43, 36, 37, 38, 39, 44, 45,
51 46, 47, 50, 51, 56, 57, 58, 59,
52 52, 53, 54, 55, 60, 61, 62, 63,
55 const UINT8 ff_alternate_vertical_scan[64] = {
56 0, 8, 16, 24, 1, 9, 2, 10,
57 17, 25, 32, 40, 48, 56, 57, 49,
58 41, 33, 26, 18, 3, 11, 4, 12,
59 19, 27, 34, 42, 50, 58, 35, 43,
60 51, 59, 20, 28, 5, 13, 6, 14,
61 21, 29, 36, 44, 52, 60, 37, 45,
62 53, 61, 22, 30, 7, 15, 23, 31,
63 38, 46, 54, 62, 39, 47, 55, 63,
66 /* a*inverse[b]>>32 == a/b for all 0<=a<=65536 && 2<=b<=255 */
67 const UINT32 inverse[256]={
68 0, 4294967295U,2147483648U,1431655766, 1073741824, 858993460, 715827883, 613566757,
69 536870912, 477218589, 429496730, 390451573, 357913942, 330382100, 306783379, 286331154,
70 268435456, 252645136, 238609295, 226050911, 214748365, 204522253, 195225787, 186737709,
71 178956971, 171798692, 165191050, 159072863, 153391690, 148102321, 143165577, 138547333,
72 134217728, 130150525, 126322568, 122713352, 119304648, 116080198, 113025456, 110127367,
73 107374183, 104755300, 102261127, 99882961, 97612894, 95443718, 93368855, 91382283,
74 89478486, 87652394, 85899346, 84215046, 82595525, 81037119, 79536432, 78090315,
75 76695845, 75350304, 74051161, 72796056, 71582789, 70409300, 69273667, 68174085,
76 67108864, 66076420, 65075263, 64103990, 63161284, 62245903, 61356676, 60492498,
77 59652324, 58835169, 58040099, 57266231, 56512728, 55778797, 55063684, 54366675,
78 53687092, 53024288, 52377650, 51746594, 51130564, 50529028, 49941481, 49367441,
79 48806447, 48258060, 47721859, 47197443, 46684428, 46182445, 45691142, 45210183,
80 44739243, 44278014, 43826197, 43383509, 42949673, 42524429, 42107523, 41698712,
81 41297763, 40904451, 40518560, 40139882, 39768216, 39403370, 39045158, 38693400,
82 38347923, 38008561, 37675152, 37347542, 37025581, 36709123, 36398028, 36092163,
83 35791395, 35495598, 35204650, 34918434, 34636834, 34359739, 34087043, 33818641,
84 33554432, 33294321, 33038210, 32786010, 32537632, 32292988, 32051995, 31814573,
85 31580642, 31350127, 31122952, 30899046, 30678338, 30460761, 30246249, 30034737,
86 29826162, 29620465, 29417585, 29217465, 29020050, 28825284, 28633116, 28443493,
87 28256364, 28071682, 27889399, 27709467, 27531842, 27356480, 27183338, 27012373,
88 26843546, 26676816, 26512144, 26349493, 26188825, 26030105, 25873297, 25718368,
89 25565282, 25414008, 25264514, 25116768, 24970741, 24826401, 24683721, 24542671,
90 24403224, 24265352, 24129030, 23994231, 23860930, 23729102, 23598722, 23469767,
91 23342214, 23216040, 23091223, 22967740, 22845571, 22724695, 22605092, 22486740,
92 22369622, 22253717, 22139007, 22025474, 21913099, 21801865, 21691755, 21582751,
93 21474837, 21367997, 21262215, 21157475, 21053762, 20951060, 20849356, 20748635,
94 20648882, 20550083, 20452226, 20355296, 20259280, 20164166, 20069941, 19976593,
95 19884108, 19792477, 19701685, 19611723, 19522579, 19434242, 19346700, 19259944,
96 19173962, 19088744, 19004281, 18920561, 18837576, 18755316, 18673771, 18592933,
97 18512791, 18433337, 18354562, 18276457, 18199014, 18122225, 18046082, 17970575,
98 17895698, 17821442, 17747799, 17674763, 17602325, 17530479, 17459217, 17388532,
99 17318417, 17248865, 17179870, 17111424, 17043522, 16976156, 16909321, 16843010,
102 static int pix_sum_c(UINT8 * pix, int line_size)
107 for (i = 0; i < 16; i++) {
108 for (j = 0; j < 16; j += 8) {
119 pix += line_size - 16;
124 static int pix_norm1_c(UINT8 * pix, int line_size)
127 UINT32 *sq = squareTbl + 256;
130 for (i = 0; i < 16; i++) {
131 for (j = 0; j < 16; j += 8) {
142 #if LONG_MAX > 2147483647
143 register uint64_t x=*(uint64_t*)pix;
145 s += sq[(x>>8)&0xff];
146 s += sq[(x>>16)&0xff];
147 s += sq[(x>>24)&0xff];
148 s += sq[(x>>32)&0xff];
149 s += sq[(x>>40)&0xff];
150 s += sq[(x>>48)&0xff];
151 s += sq[(x>>56)&0xff];
153 register uint32_t x=*(uint32_t*)pix;
155 s += sq[(x>>8)&0xff];
156 s += sq[(x>>16)&0xff];
157 s += sq[(x>>24)&0xff];
158 x=*(uint32_t*)(pix+4);
160 s += sq[(x>>8)&0xff];
161 s += sq[(x>>16)&0xff];
162 s += sq[(x>>24)&0xff];
167 pix += line_size - 16;
173 static int sse8_c(void *v, UINT8 * pix1, UINT8 * pix2, int line_size)
176 UINT32 *sq = squareTbl + 256;
179 for (i = 0; i < 8; i++) {
180 s += sq[pix1[0] - pix2[0]];
181 s += sq[pix1[1] - pix2[1]];
182 s += sq[pix1[2] - pix2[2]];
183 s += sq[pix1[3] - pix2[3]];
184 s += sq[pix1[4] - pix2[4]];
185 s += sq[pix1[5] - pix2[5]];
186 s += sq[pix1[6] - pix2[6]];
187 s += sq[pix1[7] - pix2[7]];
194 static int sse16_c(void *v, UINT8 * pix1, UINT8 * pix2, int line_size)
197 UINT32 *sq = squareTbl + 256;
200 for (i = 0; i < 16; i++) {
201 for (j = 0; j < 16; j += 8) {
203 #if LONG_MAX > 2147483647
208 s += sq[(x&0xff) - (y&0xff)];
209 s += sq[((x>>8)&0xff) - ((y>>8)&0xff)];
210 s += sq[((x>>16)&0xff) - ((y>>16)&0xff)];
211 s += sq[((x>>24)&0xff) - ((y>>24)&0xff)];
212 s += sq[((x>>32)&0xff) - ((y>>32)&0xff)];
213 s += sq[((x>>40)&0xff) - ((y>>40)&0xff)];
214 s += sq[((x>>48)&0xff) - ((y>>48)&0xff)];
215 s += sq[((x>>56)&0xff) - ((y>>56)&0xff)];
221 s += sq[(x&0xff) - (y&0xff)];
222 s += sq[((x>>8)&0xff) - ((y>>8)&0xff)];
223 s += sq[((x>>16)&0xff) - ((y>>16)&0xff)];
224 s += sq[((x>>24)&0xff) - ((y>>24)&0xff)];
226 x=*(uint32_t*)(pix1+4);
227 y=*(uint32_t*)(pix2+4);
228 s += sq[(x&0xff) - (y&0xff)];
229 s += sq[((x>>8)&0xff) - ((y>>8)&0xff)];
230 s += sq[((x>>16)&0xff) - ((y>>16)&0xff)];
231 s += sq[((x>>24)&0xff) - ((y>>24)&0xff)];
234 s += sq[pix1[0] - pix2[0]];
235 s += sq[pix1[1] - pix2[1]];
236 s += sq[pix1[2] - pix2[2]];
237 s += sq[pix1[3] - pix2[3]];
238 s += sq[pix1[4] - pix2[4]];
239 s += sq[pix1[5] - pix2[5]];
240 s += sq[pix1[6] - pix2[6]];
241 s += sq[pix1[7] - pix2[7]];
246 pix1 += line_size - 16;
247 pix2 += line_size - 16;
252 static void get_pixels_c(DCTELEM *restrict block, const UINT8 *pixels, int line_size)
256 /* read the pixels */
258 block[0] = pixels[0];
259 block[1] = pixels[1];
260 block[2] = pixels[2];
261 block[3] = pixels[3];
262 block[4] = pixels[4];
263 block[5] = pixels[5];
264 block[6] = pixels[6];
265 block[7] = pixels[7];
271 static void diff_pixels_c(DCTELEM *restrict block, const UINT8 *s1,
272 const UINT8 *s2, int stride){
275 /* read the pixels */
277 block[0] = s1[0] - s2[0];
278 block[1] = s1[1] - s2[1];
279 block[2] = s1[2] - s2[2];
280 block[3] = s1[3] - s2[3];
281 block[4] = s1[4] - s2[4];
282 block[5] = s1[5] - s2[5];
283 block[6] = s1[6] - s2[6];
284 block[7] = s1[7] - s2[7];
292 static void put_pixels_clamped_c(const DCTELEM *block, UINT8 *restrict pixels,
296 UINT8 *cm = cropTbl + MAX_NEG_CROP;
298 /* read the pixels */
300 pixels[0] = cm[block[0]];
301 pixels[1] = cm[block[1]];
302 pixels[2] = cm[block[2]];
303 pixels[3] = cm[block[3]];
304 pixels[4] = cm[block[4]];
305 pixels[5] = cm[block[5]];
306 pixels[6] = cm[block[6]];
307 pixels[7] = cm[block[7]];
314 static void add_pixels_clamped_c(const DCTELEM *block, UINT8 *restrict pixels,
318 UINT8 *cm = cropTbl + MAX_NEG_CROP;
320 /* read the pixels */
322 pixels[0] = cm[pixels[0] + block[0]];
323 pixels[1] = cm[pixels[1] + block[1]];
324 pixels[2] = cm[pixels[2] + block[2]];
325 pixels[3] = cm[pixels[3] + block[3]];
326 pixels[4] = cm[pixels[4] + block[4]];
327 pixels[5] = cm[pixels[5] + block[5]];
328 pixels[6] = cm[pixels[6] + block[6]];
329 pixels[7] = cm[pixels[7] + block[7]];
336 #define PIXOP2(OPNAME, OP) \
337 static void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
341 OP(*((uint64_t*)block), LD64(pixels));\
347 static void OPNAME ## _no_rnd_pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
351 const uint64_t a= LD64(pixels );\
352 const uint64_t b= LD64(pixels+1);\
353 OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
359 static void OPNAME ## _pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
363 const uint64_t a= LD64(pixels );\
364 const uint64_t b= LD64(pixels+1);\
365 OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
371 static void OPNAME ## _no_rnd_pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
375 const uint64_t a= LD64(pixels );\
376 const uint64_t b= LD64(pixels+line_size);\
377 OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
383 static void OPNAME ## _pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
387 const uint64_t a= LD64(pixels );\
388 const uint64_t b= LD64(pixels+line_size);\
389 OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
395 static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
398 const uint64_t a= LD64(pixels );\
399 const uint64_t b= LD64(pixels+1);\
400 uint64_t l0= (a&0x0303030303030303ULL)\
401 + (b&0x0303030303030303ULL)\
402 + 0x0202020202020202ULL;\
403 uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
404 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
408 for(i=0; i<h; i+=2){\
409 uint64_t a= LD64(pixels );\
410 uint64_t b= LD64(pixels+1);\
411 l1= (a&0x0303030303030303ULL)\
412 + (b&0x0303030303030303ULL);\
413 h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
414 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
415 OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
420 l0= (a&0x0303030303030303ULL)\
421 + (b&0x0303030303030303ULL)\
422 + 0x0202020202020202ULL;\
423 h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
424 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
425 OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
431 static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
434 const uint64_t a= LD64(pixels );\
435 const uint64_t b= LD64(pixels+1);\
436 uint64_t l0= (a&0x0303030303030303ULL)\
437 + (b&0x0303030303030303ULL)\
438 + 0x0101010101010101ULL;\
439 uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
440 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
444 for(i=0; i<h; i+=2){\
445 uint64_t a= LD64(pixels );\
446 uint64_t b= LD64(pixels+1);\
447 l1= (a&0x0303030303030303ULL)\
448 + (b&0x0303030303030303ULL);\
449 h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
450 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
451 OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
456 l0= (a&0x0303030303030303ULL)\
457 + (b&0x0303030303030303ULL)\
458 + 0x0101010101010101ULL;\
459 h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
460 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
461 OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
467 CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels_c , 8)\
468 CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels_x2_c , 8)\
469 CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels_y2_c , 8)\
470 CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels_xy2_c, 8)\
471 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels_x2_c , 8)\
472 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels_y2_c , 8)\
473 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels_xy2_c, 8)
475 #define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEFEFEFEFEULL)>>1) )
476 #else // 64 bit variant
478 #define PIXOP2(OPNAME, OP) \
479 static void OPNAME ## _pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
482 OP(*((uint32_t*)(block )), LD32(pixels ));\
483 OP(*((uint32_t*)(block+4)), LD32(pixels+4));\
488 static inline void OPNAME ## _no_rnd_pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
489 OPNAME ## _pixels8_c(block, pixels, line_size, h);\
492 static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
493 int src_stride1, int src_stride2, int h){\
497 a= LD32(&src1[i*src_stride1 ]);\
498 b= LD32(&src2[i*src_stride2 ]);\
499 OP(*((uint32_t*)&dst[i*dst_stride ]), (a&b) + (((a^b)&0xFEFEFEFEUL)>>1));\
500 a= LD32(&src1[i*src_stride1+4]);\
501 b= LD32(&src2[i*src_stride2+4]);\
502 OP(*((uint32_t*)&dst[i*dst_stride+4]), (a&b) + (((a^b)&0xFEFEFEFEUL)>>1));\
506 static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
507 int src_stride1, int src_stride2, int h){\
511 a= LD32(&src1[i*src_stride1 ]);\
512 b= LD32(&src2[i*src_stride2 ]);\
513 OP(*((uint32_t*)&dst[i*dst_stride ]), (a|b) - (((a^b)&0xFEFEFEFEUL)>>1));\
514 a= LD32(&src1[i*src_stride1+4]);\
515 b= LD32(&src2[i*src_stride2+4]);\
516 OP(*((uint32_t*)&dst[i*dst_stride+4]), (a|b) - (((a^b)&0xFEFEFEFEUL)>>1));\
520 static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
521 int src_stride1, int src_stride2, int h){\
522 OPNAME ## _pixels8_l2(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
523 OPNAME ## _pixels8_l2(dst+8, src1+8, src2+8, dst_stride, src_stride1, src_stride2, h);\
526 static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
527 int src_stride1, int src_stride2, int h){\
528 OPNAME ## _no_rnd_pixels8_l2(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
529 OPNAME ## _no_rnd_pixels8_l2(dst+8, src1+8, src2+8, dst_stride, src_stride1, src_stride2, h);\
532 static inline void OPNAME ## _no_rnd_pixels8_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
533 OPNAME ## _no_rnd_pixels8_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
536 static inline void OPNAME ## _pixels8_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
537 OPNAME ## _pixels8_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
540 static inline void OPNAME ## _no_rnd_pixels8_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
541 OPNAME ## _no_rnd_pixels8_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
544 static inline void OPNAME ## _pixels8_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
545 OPNAME ## _pixels8_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
548 static inline void OPNAME ## _pixels8_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
549 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
552 uint32_t a, b, c, d, l0, l1, h0, h1;\
553 a= LD32(&src1[i*src_stride1]);\
554 b= LD32(&src2[i*src_stride2]);\
555 c= LD32(&src3[i*src_stride3]);\
556 d= LD32(&src4[i*src_stride4]);\
557 l0= (a&0x03030303UL)\
560 h0= ((a&0xFCFCFCFCUL)>>2)\
561 + ((b&0xFCFCFCFCUL)>>2);\
562 l1= (c&0x03030303UL)\
564 h1= ((c&0xFCFCFCFCUL)>>2)\
565 + ((d&0xFCFCFCFCUL)>>2);\
566 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
567 a= LD32(&src1[i*src_stride1+4]);\
568 b= LD32(&src2[i*src_stride2+4]);\
569 c= LD32(&src3[i*src_stride3+4]);\
570 d= LD32(&src4[i*src_stride4+4]);\
571 l0= (a&0x03030303UL)\
574 h0= ((a&0xFCFCFCFCUL)>>2)\
575 + ((b&0xFCFCFCFCUL)>>2);\
576 l1= (c&0x03030303UL)\
578 h1= ((c&0xFCFCFCFCUL)>>2)\
579 + ((d&0xFCFCFCFCUL)>>2);\
580 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
583 static inline void OPNAME ## _no_rnd_pixels8_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
584 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
587 uint32_t a, b, c, d, l0, l1, h0, h1;\
588 a= LD32(&src1[i*src_stride1]);\
589 b= LD32(&src2[i*src_stride2]);\
590 c= LD32(&src3[i*src_stride3]);\
591 d= LD32(&src4[i*src_stride4]);\
592 l0= (a&0x03030303UL)\
595 h0= ((a&0xFCFCFCFCUL)>>2)\
596 + ((b&0xFCFCFCFCUL)>>2);\
597 l1= (c&0x03030303UL)\
599 h1= ((c&0xFCFCFCFCUL)>>2)\
600 + ((d&0xFCFCFCFCUL)>>2);\
601 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
602 a= LD32(&src1[i*src_stride1+4]);\
603 b= LD32(&src2[i*src_stride2+4]);\
604 c= LD32(&src3[i*src_stride3+4]);\
605 d= LD32(&src4[i*src_stride4+4]);\
606 l0= (a&0x03030303UL)\
609 h0= ((a&0xFCFCFCFCUL)>>2)\
610 + ((b&0xFCFCFCFCUL)>>2);\
611 l1= (c&0x03030303UL)\
613 h1= ((c&0xFCFCFCFCUL)>>2)\
614 + ((d&0xFCFCFCFCUL)>>2);\
615 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
618 static inline void OPNAME ## _pixels16_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
619 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
620 OPNAME ## _pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
621 OPNAME ## _pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
623 static inline void OPNAME ## _no_rnd_pixels16_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
624 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
625 OPNAME ## _no_rnd_pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
626 OPNAME ## _no_rnd_pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
629 static inline void OPNAME ## _pixels8_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
634 const uint32_t a= LD32(pixels );\
635 const uint32_t b= LD32(pixels+1);\
636 uint32_t l0= (a&0x03030303UL)\
639 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
640 + ((b&0xFCFCFCFCUL)>>2);\
644 for(i=0; i<h; i+=2){\
645 uint32_t a= LD32(pixels );\
646 uint32_t b= LD32(pixels+1);\
647 l1= (a&0x03030303UL)\
649 h1= ((a&0xFCFCFCFCUL)>>2)\
650 + ((b&0xFCFCFCFCUL)>>2);\
651 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
656 l0= (a&0x03030303UL)\
659 h0= ((a&0xFCFCFCFCUL)>>2)\
660 + ((b&0xFCFCFCFCUL)>>2);\
661 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
665 pixels+=4-line_size*(h+1);\
666 block +=4-line_size*h;\
670 static inline void OPNAME ## _no_rnd_pixels8_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
675 const uint32_t a= LD32(pixels );\
676 const uint32_t b= LD32(pixels+1);\
677 uint32_t l0= (a&0x03030303UL)\
680 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
681 + ((b&0xFCFCFCFCUL)>>2);\
685 for(i=0; i<h; i+=2){\
686 uint32_t a= LD32(pixels );\
687 uint32_t b= LD32(pixels+1);\
688 l1= (a&0x03030303UL)\
690 h1= ((a&0xFCFCFCFCUL)>>2)\
691 + ((b&0xFCFCFCFCUL)>>2);\
692 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
697 l0= (a&0x03030303UL)\
700 h0= ((a&0xFCFCFCFCUL)>>2)\
701 + ((b&0xFCFCFCFCUL)>>2);\
702 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
706 pixels+=4-line_size*(h+1);\
707 block +=4-line_size*h;\
711 CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels8_c , 8)\
712 CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels8_x2_c , 8)\
713 CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels8_y2_c , 8)\
714 CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels8_xy2_c, 8)\
715 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_c , OPNAME ## _pixels8_c , 8)\
716 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels8_x2_c , 8)\
717 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels8_y2_c , 8)\
718 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels8_xy2_c, 8)\
720 #define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
722 #define op_put(a, b) a = b
729 #define avg2(a,b) ((a+b+1)>>1)
730 #define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
733 static void gmc1_c(UINT8 *dst, UINT8 *src, int stride, int h, int x16, int y16, int rounder)
735 const int A=(16-x16)*(16-y16);
736 const int B=( x16)*(16-y16);
737 const int C=(16-x16)*( y16);
738 const int D=( x16)*( y16);
743 dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8;
744 dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8;
745 dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8;
746 dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8;
747 dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8;
748 dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8;
749 dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8;
750 dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8;
756 static void gmc_c(UINT8 *dst, UINT8 *src, int stride, int h, int ox, int oy,
757 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
760 const int s= 1<<shift;
770 for(x=0; x<8; x++){ //XXX FIXME optimize
771 int src_x, src_y, frac_x, frac_y, index;
780 if((unsigned)src_x < width){
781 if((unsigned)src_y < height){
782 index= src_x + src_y*stride;
783 dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
784 + src[index +1]* frac_x )*(s-frac_y)
785 + ( src[index+stride ]*(s-frac_x)
786 + src[index+stride+1]* frac_x )* frac_y
789 index= src_x + clip(src_y, 0, height)*stride;
790 dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
791 + src[index +1]* frac_x )*s
795 if((unsigned)src_y < height){
796 index= clip(src_x, 0, width) + src_y*stride;
797 dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
798 + src[index+stride ]* frac_y )*s
801 index= clip(src_x, 0, width) + clip(src_y, 0, height)*stride;
802 dst[y*stride + x]= src[index ];
814 static inline void copy_block17(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int h)
819 ST32(dst , LD32(src ));
820 ST32(dst+4 , LD32(src+4 ));
821 ST32(dst+8 , LD32(src+8 ));
822 ST32(dst+12, LD32(src+12));
829 static inline void copy_block9(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int h)
834 ST32(dst , LD32(src ));
835 ST32(dst+4 , LD32(src+4 ));
843 #define QPEL_MC(r, OPNAME, RND, OP) \
844 static void OPNAME ## mpeg4_qpel8_h_lowpass(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int h){\
845 UINT8 *cm = cropTbl + MAX_NEG_CROP;\
849 OP(dst[0], (src[0]+src[1])*20 - (src[0]+src[2])*6 + (src[1]+src[3])*3 - (src[2]+src[4]));\
850 OP(dst[1], (src[1]+src[2])*20 - (src[0]+src[3])*6 + (src[0]+src[4])*3 - (src[1]+src[5]));\
851 OP(dst[2], (src[2]+src[3])*20 - (src[1]+src[4])*6 + (src[0]+src[5])*3 - (src[0]+src[6]));\
852 OP(dst[3], (src[3]+src[4])*20 - (src[2]+src[5])*6 + (src[1]+src[6])*3 - (src[0]+src[7]));\
853 OP(dst[4], (src[4]+src[5])*20 - (src[3]+src[6])*6 + (src[2]+src[7])*3 - (src[1]+src[8]));\
854 OP(dst[5], (src[5]+src[6])*20 - (src[4]+src[7])*6 + (src[3]+src[8])*3 - (src[2]+src[8]));\
855 OP(dst[6], (src[6]+src[7])*20 - (src[5]+src[8])*6 + (src[4]+src[8])*3 - (src[3]+src[7]));\
856 OP(dst[7], (src[7]+src[8])*20 - (src[6]+src[8])*6 + (src[5]+src[7])*3 - (src[4]+src[6]));\
862 static void OPNAME ## mpeg4_qpel8_v_lowpass(UINT8 *dst, UINT8 *src, int dstStride, int srcStride){\
864 UINT8 *cm = cropTbl + MAX_NEG_CROP;\
868 const int src0= src[0*srcStride];\
869 const int src1= src[1*srcStride];\
870 const int src2= src[2*srcStride];\
871 const int src3= src[3*srcStride];\
872 const int src4= src[4*srcStride];\
873 const int src5= src[5*srcStride];\
874 const int src6= src[6*srcStride];\
875 const int src7= src[7*srcStride];\
876 const int src8= src[8*srcStride];\
877 OP(dst[0*dstStride], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
878 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
879 OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
880 OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
881 OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
882 OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
883 OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
884 OP(dst[7*dstStride], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
890 static void OPNAME ## mpeg4_qpel16_h_lowpass(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int h){\
891 UINT8 *cm = cropTbl + MAX_NEG_CROP;\
896 OP(dst[ 0], (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]));\
897 OP(dst[ 1], (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]));\
898 OP(dst[ 2], (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]));\
899 OP(dst[ 3], (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]));\
900 OP(dst[ 4], (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]));\
901 OP(dst[ 5], (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]));\
902 OP(dst[ 6], (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]));\
903 OP(dst[ 7], (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]));\
904 OP(dst[ 8], (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]));\
905 OP(dst[ 9], (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]));\
906 OP(dst[10], (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]));\
907 OP(dst[11], (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]));\
908 OP(dst[12], (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]));\
909 OP(dst[13], (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]));\
910 OP(dst[14], (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]));\
911 OP(dst[15], (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]));\
917 static void OPNAME ## mpeg4_qpel16_v_lowpass(UINT8 *dst, UINT8 *src, int dstStride, int srcStride){\
918 UINT8 *cm = cropTbl + MAX_NEG_CROP;\
923 const int src0= src[0*srcStride];\
924 const int src1= src[1*srcStride];\
925 const int src2= src[2*srcStride];\
926 const int src3= src[3*srcStride];\
927 const int src4= src[4*srcStride];\
928 const int src5= src[5*srcStride];\
929 const int src6= src[6*srcStride];\
930 const int src7= src[7*srcStride];\
931 const int src8= src[8*srcStride];\
932 const int src9= src[9*srcStride];\
933 const int src10= src[10*srcStride];\
934 const int src11= src[11*srcStride];\
935 const int src12= src[12*srcStride];\
936 const int src13= src[13*srcStride];\
937 const int src14= src[14*srcStride];\
938 const int src15= src[15*srcStride];\
939 const int src16= src[16*srcStride];\
940 OP(dst[ 0*dstStride], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
941 OP(dst[ 1*dstStride], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
942 OP(dst[ 2*dstStride], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
943 OP(dst[ 3*dstStride], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
944 OP(dst[ 4*dstStride], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
945 OP(dst[ 5*dstStride], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
946 OP(dst[ 6*dstStride], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
947 OP(dst[ 7*dstStride], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
948 OP(dst[ 8*dstStride], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
949 OP(dst[ 9*dstStride], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
950 OP(dst[10*dstStride], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
951 OP(dst[11*dstStride], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
952 OP(dst[12*dstStride], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
953 OP(dst[13*dstStride], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
954 OP(dst[14*dstStride], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
955 OP(dst[15*dstStride], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
961 static void OPNAME ## qpel8_mc00_c (UINT8 *dst, UINT8 *src, int stride){\
962 OPNAME ## pixels8_c(dst, src, stride, 8);\
965 static void OPNAME ## qpel8_mc10_c(UINT8 *dst, UINT8 *src, int stride){\
967 put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
968 OPNAME ## pixels8_l2(dst, src, half, stride, stride, 8, 8);\
971 static void OPNAME ## qpel8_mc20_c(UINT8 *dst, UINT8 *src, int stride){\
972 OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
975 static void OPNAME ## qpel8_mc30_c(UINT8 *dst, UINT8 *src, int stride){\
977 put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
978 OPNAME ## pixels8_l2(dst, src+1, half, stride, stride, 8, 8);\
981 static void OPNAME ## qpel8_mc01_c(UINT8 *dst, UINT8 *src, int stride){\
984 copy_block9(full, src, 16, stride, 9);\
985 put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
986 OPNAME ## pixels8_l2(dst, full, half, stride, 16, 8, 8);\
989 static void OPNAME ## qpel8_mc02_c(UINT8 *dst, UINT8 *src, int stride){\
991 copy_block9(full, src, 16, stride, 9);\
992 OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
995 static void OPNAME ## qpel8_mc03_c(UINT8 *dst, UINT8 *src, int stride){\
998 copy_block9(full, src, 16, stride, 9);\
999 put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
1000 OPNAME ## pixels8_l2(dst, full+16, half, stride, 16, 8, 8);\
1002 void ff_ ## OPNAME ## qpel8_mc11_old_c(UINT8 *dst, UINT8 *src, int stride){\
1007 copy_block9(full, src, 16, stride, 9);\
1008 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1009 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
1010 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1011 OPNAME ## pixels8_l4(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
1013 static void OPNAME ## qpel8_mc11_c(UINT8 *dst, UINT8 *src, int stride){\
1017 copy_block9(full, src, 16, stride, 9);\
1018 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1019 put ## RND ## pixels8_l2(halfH, halfH, full, 8, 8, 16, 9);\
1020 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1021 OPNAME ## pixels8_l2(dst, halfH, halfHV, stride, 8, 8, 8);\
1023 void ff_ ## OPNAME ## qpel8_mc31_old_c(UINT8 *dst, UINT8 *src, int stride){\
1028 copy_block9(full, src, 16, stride, 9);\
1029 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1030 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
1031 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1032 OPNAME ## pixels8_l4(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
1034 static void OPNAME ## qpel8_mc31_c(UINT8 *dst, UINT8 *src, int stride){\
1038 copy_block9(full, src, 16, stride, 9);\
1039 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1040 put ## RND ## pixels8_l2(halfH, halfH, full+1, 8, 8, 16, 9);\
1041 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1042 OPNAME ## pixels8_l2(dst, halfH, halfHV, stride, 8, 8, 8);\
1044 void ff_ ## OPNAME ## qpel8_mc13_old_c(UINT8 *dst, UINT8 *src, int stride){\
1049 copy_block9(full, src, 16, stride, 9);\
1050 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1051 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
1052 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1053 OPNAME ## pixels8_l4(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
1055 static void OPNAME ## qpel8_mc13_c(UINT8 *dst, UINT8 *src, int stride){\
1059 copy_block9(full, src, 16, stride, 9);\
1060 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1061 put ## RND ## pixels8_l2(halfH, halfH, full, 8, 8, 16, 9);\
1062 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1063 OPNAME ## pixels8_l2(dst, halfH+8, halfHV, stride, 8, 8, 8);\
1065 void ff_ ## OPNAME ## qpel8_mc33_old_c(UINT8 *dst, UINT8 *src, int stride){\
1070 copy_block9(full, src, 16, stride, 9);\
1071 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full , 8, 16, 9);\
1072 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
1073 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1074 OPNAME ## pixels8_l4(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
1076 static void OPNAME ## qpel8_mc33_c(UINT8 *dst, UINT8 *src, int stride){\
1080 copy_block9(full, src, 16, stride, 9);\
1081 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1082 put ## RND ## pixels8_l2(halfH, halfH, full+1, 8, 8, 16, 9);\
1083 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1084 OPNAME ## pixels8_l2(dst, halfH+8, halfHV, stride, 8, 8, 8);\
1086 static void OPNAME ## qpel8_mc21_c(UINT8 *dst, UINT8 *src, int stride){\
1089 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
1090 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1091 OPNAME ## pixels8_l2(dst, halfH, halfHV, stride, 8, 8, 8);\
1093 static void OPNAME ## qpel8_mc23_c(UINT8 *dst, UINT8 *src, int stride){\
1096 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
1097 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1098 OPNAME ## pixels8_l2(dst, halfH+8, halfHV, stride, 8, 8, 8);\
1100 void ff_ ## OPNAME ## qpel8_mc12_old_c(UINT8 *dst, UINT8 *src, int stride){\
1105 copy_block9(full, src, 16, stride, 9);\
1106 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1107 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
1108 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1109 OPNAME ## pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);\
1111 static void OPNAME ## qpel8_mc12_c(UINT8 *dst, UINT8 *src, int stride){\
1114 copy_block9(full, src, 16, stride, 9);\
1115 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1116 put ## RND ## pixels8_l2(halfH, halfH, full, 8, 8, 16, 9);\
1117 OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
1119 void ff_ ## OPNAME ## qpel8_mc32_old_c(UINT8 *dst, UINT8 *src, int stride){\
1124 copy_block9(full, src, 16, stride, 9);\
1125 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1126 put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
1127 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
1128 OPNAME ## pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);\
1130 static void OPNAME ## qpel8_mc32_c(UINT8 *dst, UINT8 *src, int stride){\
1133 copy_block9(full, src, 16, stride, 9);\
1134 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
1135 put ## RND ## pixels8_l2(halfH, halfH, full+1, 8, 8, 16, 9);\
1136 OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
1138 static void OPNAME ## qpel8_mc22_c(UINT8 *dst, UINT8 *src, int stride){\
1140 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
1141 OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
1143 static void OPNAME ## qpel16_mc00_c (UINT8 *dst, UINT8 *src, int stride){\
1144 OPNAME ## pixels16_c(dst, src, stride, 16);\
1147 static void OPNAME ## qpel16_mc10_c(UINT8 *dst, UINT8 *src, int stride){\
1149 put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
1150 OPNAME ## pixels16_l2(dst, src, half, stride, stride, 16, 16);\
1153 static void OPNAME ## qpel16_mc20_c(UINT8 *dst, UINT8 *src, int stride){\
1154 OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
1157 static void OPNAME ## qpel16_mc30_c(UINT8 *dst, UINT8 *src, int stride){\
1159 put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
1160 OPNAME ## pixels16_l2(dst, src+1, half, stride, stride, 16, 16);\
1163 static void OPNAME ## qpel16_mc01_c(UINT8 *dst, UINT8 *src, int stride){\
1166 copy_block17(full, src, 24, stride, 17);\
1167 put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
1168 OPNAME ## pixels16_l2(dst, full, half, stride, 24, 16, 16);\
1171 static void OPNAME ## qpel16_mc02_c(UINT8 *dst, UINT8 *src, int stride){\
1173 copy_block17(full, src, 24, stride, 17);\
1174 OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
1177 static void OPNAME ## qpel16_mc03_c(UINT8 *dst, UINT8 *src, int stride){\
1180 copy_block17(full, src, 24, stride, 17);\
1181 put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
1182 OPNAME ## pixels16_l2(dst, full+24, half, stride, 24, 16, 16);\
1184 void ff_ ## OPNAME ## qpel16_mc11_old_c(UINT8 *dst, UINT8 *src, int stride){\
1189 copy_block17(full, src, 24, stride, 17);\
1190 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1191 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
1192 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1193 OPNAME ## pixels16_l4(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
1195 static void OPNAME ## qpel16_mc11_c(UINT8 *dst, UINT8 *src, int stride){\
1199 copy_block17(full, src, 24, stride, 17);\
1200 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1201 put ## RND ## pixels16_l2(halfH, halfH, full, 16, 16, 24, 17);\
1202 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1203 OPNAME ## pixels16_l2(dst, halfH, halfHV, stride, 16, 16, 16);\
1205 void ff_ ## OPNAME ## qpel16_mc31_old_c(UINT8 *dst, UINT8 *src, int stride){\
1210 copy_block17(full, src, 24, stride, 17);\
1211 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1212 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
1213 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1214 OPNAME ## pixels16_l4(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
1216 static void OPNAME ## qpel16_mc31_c(UINT8 *dst, UINT8 *src, int stride){\
1220 copy_block17(full, src, 24, stride, 17);\
1221 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1222 put ## RND ## pixels16_l2(halfH, halfH, full+1, 16, 16, 24, 17);\
1223 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1224 OPNAME ## pixels16_l2(dst, halfH, halfHV, stride, 16, 16, 16);\
1226 void ff_ ## OPNAME ## qpel16_mc13_old_c(UINT8 *dst, UINT8 *src, int stride){\
1231 copy_block17(full, src, 24, stride, 17);\
1232 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1233 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
1234 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1235 OPNAME ## pixels16_l4(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
1237 static void OPNAME ## qpel16_mc13_c(UINT8 *dst, UINT8 *src, int stride){\
1241 copy_block17(full, src, 24, stride, 17);\
1242 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1243 put ## RND ## pixels16_l2(halfH, halfH, full, 16, 16, 24, 17);\
1244 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1245 OPNAME ## pixels16_l2(dst, halfH+16, halfHV, stride, 16, 16, 16);\
1247 void ff_ ## OPNAME ## qpel16_mc33_old_c(UINT8 *dst, UINT8 *src, int stride){\
1252 copy_block17(full, src, 24, stride, 17);\
1253 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full , 16, 24, 17);\
1254 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
1255 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1256 OPNAME ## pixels16_l4(dst, full+25, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
1258 static void OPNAME ## qpel16_mc33_c(UINT8 *dst, UINT8 *src, int stride){\
1262 copy_block17(full, src, 24, stride, 17);\
1263 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1264 put ## RND ## pixels16_l2(halfH, halfH, full+1, 16, 16, 24, 17);\
1265 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1266 OPNAME ## pixels16_l2(dst, halfH+16, halfHV, stride, 16, 16, 16);\
1268 static void OPNAME ## qpel16_mc21_c(UINT8 *dst, UINT8 *src, int stride){\
1271 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
1272 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1273 OPNAME ## pixels16_l2(dst, halfH, halfHV, stride, 16, 16, 16);\
1275 static void OPNAME ## qpel16_mc23_c(UINT8 *dst, UINT8 *src, int stride){\
1278 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
1279 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1280 OPNAME ## pixels16_l2(dst, halfH+16, halfHV, stride, 16, 16, 16);\
1282 void ff_ ## OPNAME ## qpel16_mc12_old_c(UINT8 *dst, UINT8 *src, int stride){\
1287 copy_block17(full, src, 24, stride, 17);\
1288 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1289 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
1290 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1291 OPNAME ## pixels16_l2(dst, halfV, halfHV, stride, 16, 16, 16);\
1293 static void OPNAME ## qpel16_mc12_c(UINT8 *dst, UINT8 *src, int stride){\
1296 copy_block17(full, src, 24, stride, 17);\
1297 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1298 put ## RND ## pixels16_l2(halfH, halfH, full, 16, 16, 24, 17);\
1299 OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
1301 void ff_ ## OPNAME ## qpel16_mc32_old_c(UINT8 *dst, UINT8 *src, int stride){\
1306 copy_block17(full, src, 24, stride, 17);\
1307 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1308 put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
1309 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1310 OPNAME ## pixels16_l2(dst, halfV, halfHV, stride, 16, 16, 16);\
1312 static void OPNAME ## qpel16_mc32_c(UINT8 *dst, UINT8 *src, int stride){\
1315 copy_block17(full, src, 24, stride, 17);\
1316 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1317 put ## RND ## pixels16_l2(halfH, halfH, full+1, 16, 16, 24, 17);\
1318 OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
1320 static void OPNAME ## qpel16_mc22_c(UINT8 *dst, UINT8 *src, int stride){\
1322 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
1323 OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
1326 #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
1327 #define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
1328 #define op_put(a, b) a = cm[((b) + 16)>>5]
1329 #define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
1331 QPEL_MC(0, put_ , _ , op_put)
1332 QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
1333 QPEL_MC(0, avg_ , _ , op_avg)
1334 //QPEL_MC(1, avg_no_rnd , _ , op_avg)
1336 #undef op_avg_no_rnd
1338 #undef op_put_no_rnd
1340 static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
1341 uint8_t *cm = cropTbl + MAX_NEG_CROP;
1345 dst[0]= cm[(9*(src[0] + src[1]) - (src[-1] + src[2]) + 8)>>4];
1346 dst[1]= cm[(9*(src[1] + src[2]) - (src[ 0] + src[3]) + 8)>>4];
1347 dst[2]= cm[(9*(src[2] + src[3]) - (src[ 1] + src[4]) + 8)>>4];
1348 dst[3]= cm[(9*(src[3] + src[4]) - (src[ 2] + src[5]) + 8)>>4];
1349 dst[4]= cm[(9*(src[4] + src[5]) - (src[ 3] + src[6]) + 8)>>4];
1350 dst[5]= cm[(9*(src[5] + src[6]) - (src[ 4] + src[7]) + 8)>>4];
1351 dst[6]= cm[(9*(src[6] + src[7]) - (src[ 5] + src[8]) + 8)>>4];
1352 dst[7]= cm[(9*(src[7] + src[8]) - (src[ 6] + src[9]) + 8)>>4];
1358 static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
1359 uint8_t *cm = cropTbl + MAX_NEG_CROP;
1363 const int src_1= src[ -srcStride];
1364 const int src0 = src[0 ];
1365 const int src1 = src[ srcStride];
1366 const int src2 = src[2*srcStride];
1367 const int src3 = src[3*srcStride];
1368 const int src4 = src[4*srcStride];
1369 const int src5 = src[5*srcStride];
1370 const int src6 = src[6*srcStride];
1371 const int src7 = src[7*srcStride];
1372 const int src8 = src[8*srcStride];
1373 const int src9 = src[9*srcStride];
1374 dst[0*dstStride]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
1375 dst[1*dstStride]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
1376 dst[2*dstStride]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
1377 dst[3*dstStride]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
1378 dst[4*dstStride]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
1379 dst[5*dstStride]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
1380 dst[6*dstStride]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
1381 dst[7*dstStride]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
1387 static void put_mspel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){
1388 put_pixels8_c(dst, src, stride, 8);
1391 static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){
1393 wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
1394 put_pixels8_l2(dst, src, half, stride, stride, 8, 8);
1397 static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){
1398 wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
1401 static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){
1403 wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
1404 put_pixels8_l2(dst, src+1, half, stride, stride, 8, 8);
1407 static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){
1408 wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
1411 static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){
1415 wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
1416 wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
1417 wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
1418 put_pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);
1420 static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){
1424 wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
1425 wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
1426 wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
1427 put_pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);
1429 static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){
1431 wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
1432 wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
1436 static inline int pix_abs16x16_c(UINT8 *pix1, UINT8 *pix2, int line_size)
1442 s += abs(pix1[0] - pix2[0]);
1443 s += abs(pix1[1] - pix2[1]);
1444 s += abs(pix1[2] - pix2[2]);
1445 s += abs(pix1[3] - pix2[3]);
1446 s += abs(pix1[4] - pix2[4]);
1447 s += abs(pix1[5] - pix2[5]);
1448 s += abs(pix1[6] - pix2[6]);
1449 s += abs(pix1[7] - pix2[7]);
1450 s += abs(pix1[8] - pix2[8]);
1451 s += abs(pix1[9] - pix2[9]);
1452 s += abs(pix1[10] - pix2[10]);
1453 s += abs(pix1[11] - pix2[11]);
1454 s += abs(pix1[12] - pix2[12]);
1455 s += abs(pix1[13] - pix2[13]);
1456 s += abs(pix1[14] - pix2[14]);
1457 s += abs(pix1[15] - pix2[15]);
1464 static int pix_abs16x16_x2_c(UINT8 *pix1, UINT8 *pix2, int line_size)
1470 s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
1471 s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
1472 s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
1473 s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
1474 s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
1475 s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
1476 s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
1477 s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
1478 s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
1479 s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
1480 s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
1481 s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
1482 s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
1483 s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
1484 s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
1485 s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
1492 static int pix_abs16x16_y2_c(UINT8 *pix1, UINT8 *pix2, int line_size)
1495 UINT8 *pix3 = pix2 + line_size;
1499 s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
1500 s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
1501 s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
1502 s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
1503 s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
1504 s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
1505 s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
1506 s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
1507 s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
1508 s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
1509 s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
1510 s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
1511 s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
1512 s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
1513 s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
1514 s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
1522 static int pix_abs16x16_xy2_c(UINT8 *pix1, UINT8 *pix2, int line_size)
1525 UINT8 *pix3 = pix2 + line_size;
1529 s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
1530 s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
1531 s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
1532 s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
1533 s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
1534 s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
1535 s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
1536 s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
1537 s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
1538 s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
1539 s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
1540 s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
1541 s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
1542 s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
1543 s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
1544 s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
1552 static inline int pix_abs8x8_c(UINT8 *pix1, UINT8 *pix2, int line_size)
1558 s += abs(pix1[0] - pix2[0]);
1559 s += abs(pix1[1] - pix2[1]);
1560 s += abs(pix1[2] - pix2[2]);
1561 s += abs(pix1[3] - pix2[3]);
1562 s += abs(pix1[4] - pix2[4]);
1563 s += abs(pix1[5] - pix2[5]);
1564 s += abs(pix1[6] - pix2[6]);
1565 s += abs(pix1[7] - pix2[7]);
1572 static int pix_abs8x8_x2_c(UINT8 *pix1, UINT8 *pix2, int line_size)
1578 s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
1579 s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
1580 s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
1581 s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
1582 s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
1583 s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
1584 s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
1585 s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
1592 static int pix_abs8x8_y2_c(UINT8 *pix1, UINT8 *pix2, int line_size)
1595 UINT8 *pix3 = pix2 + line_size;
1599 s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
1600 s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
1601 s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
1602 s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
1603 s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
1604 s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
1605 s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
1606 s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
1614 static int pix_abs8x8_xy2_c(UINT8 *pix1, UINT8 *pix2, int line_size)
1617 UINT8 *pix3 = pix2 + line_size;
1621 s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
1622 s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
1623 s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
1624 s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
1625 s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
1626 s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
1627 s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
1628 s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
1636 static int sad16x16_c(void *s, uint8_t *a, uint8_t *b, int stride){
1637 return pix_abs16x16_c(a,b,stride);
1640 static int sad8x8_c(void *s, uint8_t *a, uint8_t *b, int stride){
1641 return pix_abs8x8_c(a,b,stride);
1644 void ff_block_permute(DCTELEM *block, UINT8 *permutation, const UINT8 *scantable, int last)
1650 //if(permutation[1]==1) return; //FIXME its ok but not clean and might fail for some perms
1652 for(i=0; i<=last; i++){
1653 const int j= scantable[i];
1658 for(i=0; i<=last; i++){
1659 const int j= scantable[i];
1660 const int perm_j= permutation[j];
1661 block[perm_j]= temp[j];
1665 static void clear_blocks_c(DCTELEM *blocks)
1667 memset(blocks, 0, sizeof(DCTELEM)*6*64);
1670 static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
1672 for(i=0; i+7<w; i+=8){
1673 dst[i+0] += src[i+0];
1674 dst[i+1] += src[i+1];
1675 dst[i+2] += src[i+2];
1676 dst[i+3] += src[i+3];
1677 dst[i+4] += src[i+4];
1678 dst[i+5] += src[i+5];
1679 dst[i+6] += src[i+6];
1680 dst[i+7] += src[i+7];
1683 dst[i+0] += src[i+0];
1686 static void diff_bytes_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
1688 for(i=0; i+7<w; i+=8){
1689 dst[i+0] = src1[i+0]-src2[i+0];
1690 dst[i+1] = src1[i+1]-src2[i+1];
1691 dst[i+2] = src1[i+2]-src2[i+2];
1692 dst[i+3] = src1[i+3]-src2[i+3];
1693 dst[i+4] = src1[i+4]-src2[i+4];
1694 dst[i+5] = src1[i+5]-src2[i+5];
1695 dst[i+6] = src1[i+6]-src2[i+6];
1696 dst[i+7] = src1[i+7]-src2[i+7];
1699 dst[i+0] = src1[i+0]-src2[i+0];
1702 #define BUTTERFLY2(o1,o2,i1,i2) \
1706 #define BUTTERFLY1(x,y) \
1715 #define BUTTERFLYA(x,y) (ABS((x)+(y)) + ABS((x)-(y)))
1717 static int hadamard8_diff_c(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride){
1723 //FIXME try pointer walks
1724 BUTTERFLY2(temp[8*i+0], temp[8*i+1], src[stride*i+0]-dst[stride*i+0],src[stride*i+1]-dst[stride*i+1]);
1725 BUTTERFLY2(temp[8*i+2], temp[8*i+3], src[stride*i+2]-dst[stride*i+2],src[stride*i+3]-dst[stride*i+3]);
1726 BUTTERFLY2(temp[8*i+4], temp[8*i+5], src[stride*i+4]-dst[stride*i+4],src[stride*i+5]-dst[stride*i+5]);
1727 BUTTERFLY2(temp[8*i+6], temp[8*i+7], src[stride*i+6]-dst[stride*i+6],src[stride*i+7]-dst[stride*i+7]);
1729 BUTTERFLY1(temp[8*i+0], temp[8*i+2]);
1730 BUTTERFLY1(temp[8*i+1], temp[8*i+3]);
1731 BUTTERFLY1(temp[8*i+4], temp[8*i+6]);
1732 BUTTERFLY1(temp[8*i+5], temp[8*i+7]);
1734 BUTTERFLY1(temp[8*i+0], temp[8*i+4]);
1735 BUTTERFLY1(temp[8*i+1], temp[8*i+5]);
1736 BUTTERFLY1(temp[8*i+2], temp[8*i+6]);
1737 BUTTERFLY1(temp[8*i+3], temp[8*i+7]);
1741 BUTTERFLY1(temp[8*0+i], temp[8*1+i]);
1742 BUTTERFLY1(temp[8*2+i], temp[8*3+i]);
1743 BUTTERFLY1(temp[8*4+i], temp[8*5+i]);
1744 BUTTERFLY1(temp[8*6+i], temp[8*7+i]);
1746 BUTTERFLY1(temp[8*0+i], temp[8*2+i]);
1747 BUTTERFLY1(temp[8*1+i], temp[8*3+i]);
1748 BUTTERFLY1(temp[8*4+i], temp[8*6+i]);
1749 BUTTERFLY1(temp[8*5+i], temp[8*7+i]);
1752 BUTTERFLYA(temp[8*0+i], temp[8*4+i])
1753 +BUTTERFLYA(temp[8*1+i], temp[8*5+i])
1754 +BUTTERFLYA(temp[8*2+i], temp[8*6+i])
1755 +BUTTERFLYA(temp[8*3+i], temp[8*7+i]);
1761 printf("MAX:%d\n", maxi);
1767 static int hadamard8_abs_c(uint8_t *src, int stride, int mean){
1771 //FIXME OOOPS ignore 0 term instead of mean mess
1773 //FIXME try pointer walks
1774 BUTTERFLY2(temp[8*i+0], temp[8*i+1], src[stride*i+0]-mean,src[stride*i+1]-mean);
1775 BUTTERFLY2(temp[8*i+2], temp[8*i+3], src[stride*i+2]-mean,src[stride*i+3]-mean);
1776 BUTTERFLY2(temp[8*i+4], temp[8*i+5], src[stride*i+4]-mean,src[stride*i+5]-mean);
1777 BUTTERFLY2(temp[8*i+6], temp[8*i+7], src[stride*i+6]-mean,src[stride*i+7]-mean);
1779 BUTTERFLY1(temp[8*i+0], temp[8*i+2]);
1780 BUTTERFLY1(temp[8*i+1], temp[8*i+3]);
1781 BUTTERFLY1(temp[8*i+4], temp[8*i+6]);
1782 BUTTERFLY1(temp[8*i+5], temp[8*i+7]);
1784 BUTTERFLY1(temp[8*i+0], temp[8*i+4]);
1785 BUTTERFLY1(temp[8*i+1], temp[8*i+5]);
1786 BUTTERFLY1(temp[8*i+2], temp[8*i+6]);
1787 BUTTERFLY1(temp[8*i+3], temp[8*i+7]);
1791 BUTTERFLY1(temp[8*0+i], temp[8*1+i]);
1792 BUTTERFLY1(temp[8*2+i], temp[8*3+i]);
1793 BUTTERFLY1(temp[8*4+i], temp[8*5+i]);
1794 BUTTERFLY1(temp[8*6+i], temp[8*7+i]);
1796 BUTTERFLY1(temp[8*0+i], temp[8*2+i]);
1797 BUTTERFLY1(temp[8*1+i], temp[8*3+i]);
1798 BUTTERFLY1(temp[8*4+i], temp[8*6+i]);
1799 BUTTERFLY1(temp[8*5+i], temp[8*7+i]);
1802 BUTTERFLYA(temp[8*0+i], temp[8*4+i])
1803 +BUTTERFLYA(temp[8*1+i], temp[8*5+i])
1804 +BUTTERFLYA(temp[8*2+i], temp[8*6+i])
1805 +BUTTERFLYA(temp[8*3+i], temp[8*7+i]);
1811 static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride){
1812 MpegEncContext * const s= (MpegEncContext *)c;
1816 s->dsp.diff_pixels(temp, src1, src2, stride);
1825 void simple_idct(INT16 *block); //FIXME
1827 static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride){
1828 MpegEncContext * const s= (MpegEncContext *)c;
1829 DCTELEM temp[64], bak[64];
1834 s->dsp.diff_pixels(temp, src1, src2, stride);
1836 memcpy(bak, temp, 64*sizeof(DCTELEM));
1838 s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
1839 s->dct_unquantize(s, temp, 0, s->qscale);
1840 simple_idct(temp); //FIXME
1843 sum+= (temp[i]-bak[i])*(temp[i]-bak[i]);
1848 static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride){
1849 MpegEncContext * const s= (MpegEncContext *)c;
1850 const UINT8 *scantable= s->intra_scantable.permutated;
1852 uint8_t bak[stride*8];
1853 int i, last, run, bits, level, distoration, start_i;
1854 const int esc_length= s->ac_esc_length;
1856 uint8_t * last_length;
1862 length = s->intra_ac_vlc_length;
1863 last_length= s->intra_ac_vlc_last_length;
1866 length = s->inter_ac_vlc_length;
1867 last_length= s->inter_ac_vlc_last_length;
1871 ((uint32_t*)(bak + i*stride))[0]= ((uint32_t*)(src2 + i*stride))[0];
1872 ((uint32_t*)(bak + i*stride))[1]= ((uint32_t*)(src2 + i*stride))[1];
1875 s->dsp.diff_pixels(temp, src1, src2, stride);
1877 last= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
1882 for(i=start_i; i<last; i++){
1883 int j= scantable[i];
1888 if((level&(~127)) == 0){
1889 bits+= length[UNI_AC_ENC_INDEX(run, level)];
1900 level= temp[i] + 64;
1901 if((level&(~127)) == 0){
1902 bits+= last_length[UNI_AC_ENC_INDEX(run, level)];
1906 s->dct_unquantize(s, temp, 0, s->qscale);
1909 s->idct_add(bak, stride, temp);
1911 distoration= s->dsp.sse[1](NULL, bak, src1, stride);
1913 return distoration + ((bits*s->qscale*s->qscale*105 + 64)>>7);
1916 static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride){
1917 MpegEncContext * const s= (MpegEncContext *)c;
1918 const UINT8 *scantable= s->intra_scantable.permutated;
1920 int i, last, run, bits, level, start_i;
1921 const int esc_length= s->ac_esc_length;
1923 uint8_t * last_length;
1929 length = s->intra_ac_vlc_length;
1930 last_length= s->intra_ac_vlc_last_length;
1933 length = s->inter_ac_vlc_length;
1934 last_length= s->inter_ac_vlc_last_length;
1937 s->dsp.diff_pixels(temp, src1, src2, stride);
1939 last= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
1944 for(i=start_i; i<last; i++){
1945 int j= scantable[i];
1950 if((level&(~127)) == 0){
1951 bits+= length[UNI_AC_ENC_INDEX(run, level)];
1962 level= temp[i] + 64;
1963 if((level&(~127)) == 0){
1964 bits+= last_length[UNI_AC_ENC_INDEX(run, level)];
1973 WARPER88_1616(hadamard8_diff_c, hadamard8_diff16_c)
1974 WARPER88_1616(dct_sad8x8_c, dct_sad16x16_c)
1975 WARPER88_1616(quant_psnr8x8_c, quant_psnr16x16_c)
1976 WARPER88_1616(rd8x8_c, rd16x16_c)
1977 WARPER88_1616(bit8x8_c, bit16x16_c)
1979 void dsputil_init(DSPContext* c, unsigned mask)
1981 static int init_done = 0;
1985 for(i=0;i<256;i++) cropTbl[i + MAX_NEG_CROP] = i;
1986 for(i=0;i<MAX_NEG_CROP;i++) {
1988 cropTbl[i + MAX_NEG_CROP + 256] = 255;
1991 for(i=0;i<512;i++) {
1992 squareTbl[i] = (i - 256) * (i - 256);
1995 for(i=0; i<64; i++) inv_zigzag_direct16[ff_zigzag_direct[i]]= i+1;
2000 c->get_pixels = get_pixels_c;
2001 c->diff_pixels = diff_pixels_c;
2002 c->put_pixels_clamped = put_pixels_clamped_c;
2003 c->add_pixels_clamped = add_pixels_clamped_c;
2006 c->clear_blocks = clear_blocks_c;
2007 c->pix_sum = pix_sum_c;
2008 c->pix_norm1 = pix_norm1_c;
2012 /* TODO [0] 16 [1] 8 */
2013 c->pix_abs16x16 = pix_abs16x16_c;
2014 c->pix_abs16x16_x2 = pix_abs16x16_x2_c;
2015 c->pix_abs16x16_y2 = pix_abs16x16_y2_c;
2016 c->pix_abs16x16_xy2 = pix_abs16x16_xy2_c;
2017 c->pix_abs8x8 = pix_abs8x8_c;
2018 c->pix_abs8x8_x2 = pix_abs8x8_x2_c;
2019 c->pix_abs8x8_y2 = pix_abs8x8_y2_c;
2020 c->pix_abs8x8_xy2 = pix_abs8x8_xy2_c;
2022 #define dspfunc(PFX, IDX, NUM) \
2023 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## NUM ## _c; \
2024 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## NUM ## _x2_c; \
2025 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## NUM ## _y2_c; \
2026 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## NUM ## _xy2_c
2028 dspfunc(put, 0, 16);
2029 dspfunc(put_no_rnd, 0, 16);
2031 dspfunc(put_no_rnd, 1, 8);
2033 dspfunc(avg, 0, 16);
2034 dspfunc(avg_no_rnd, 0, 16);
2036 dspfunc(avg_no_rnd, 1, 8);
2039 #define dspfunc(PFX, IDX, NUM) \
2040 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \
2041 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_c; \
2042 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_c; \
2043 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_c; \
2044 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_c; \
2045 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_c; \
2046 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_c; \
2047 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_c; \
2048 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_c; \
2049 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_c; \
2050 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
2051 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
2052 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
2053 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
2054 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
2055 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
2057 dspfunc(put_qpel, 0, 16);
2058 dspfunc(put_no_rnd_qpel, 0, 16);
2060 dspfunc(avg_qpel, 0, 16);
2061 /* dspfunc(avg_no_rnd_qpel, 0, 16); */
2063 dspfunc(put_qpel, 1, 8);
2064 dspfunc(put_no_rnd_qpel, 1, 8);
2066 dspfunc(avg_qpel, 1, 8);
2067 /* dspfunc(avg_no_rnd_qpel, 1, 8); */
2070 c->put_mspel_pixels_tab[0]= put_mspel8_mc00_c;
2071 c->put_mspel_pixels_tab[1]= put_mspel8_mc10_c;
2072 c->put_mspel_pixels_tab[2]= put_mspel8_mc20_c;
2073 c->put_mspel_pixels_tab[3]= put_mspel8_mc30_c;
2074 c->put_mspel_pixels_tab[4]= put_mspel8_mc02_c;
2075 c->put_mspel_pixels_tab[5]= put_mspel8_mc12_c;
2076 c->put_mspel_pixels_tab[6]= put_mspel8_mc22_c;
2077 c->put_mspel_pixels_tab[7]= put_mspel8_mc32_c;
2079 c->hadamard8_diff[0]= hadamard8_diff16_c;
2080 c->hadamard8_diff[1]= hadamard8_diff_c;
2081 c->hadamard8_abs = hadamard8_abs_c;
2083 c->dct_sad[0]= dct_sad16x16_c;
2084 c->dct_sad[1]= dct_sad8x8_c;
2086 c->sad[0]= sad16x16_c;
2087 c->sad[1]= sad8x8_c;
2089 c->quant_psnr[0]= quant_psnr16x16_c;
2090 c->quant_psnr[1]= quant_psnr8x8_c;
2092 c->rd[0]= rd16x16_c;
2095 c->bit[0]= bit16x16_c;
2096 c->bit[1]= bit8x8_c;
2098 c->add_bytes= add_bytes_c;
2099 c->diff_bytes= diff_bytes_c;
2102 dsputil_init_mmx(c, mask);
2105 /* FIXME - AVCodec context should have flag for bitexact match */
2106 /* fprintf(stderr, "\n\n\nff_bit_exact %d\n\n\n\n", ff_bit_exact); */
2107 dsputil_set_bit_exact_mmx(c, mask);
2111 dsputil_init_armv4l(c, mask);
2114 dsputil_init_mlib(c, mask);
2117 dsputil_init_alpha(c, mask);
2120 dsputil_init_ppc(c, mask);
2123 dsputil_init_mmi(c, mask);
2127 /* remove any non bit exact operation (testing purpose) */
2128 void avcodec_set_bit_exact(void)
2132 // FIXME - better set_bit_exact
2133 // dsputil_set_bit_exact_mmx();