3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "bit_depth_template.c"
32 static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
37 AV_WN2P(dst , AV_RN2P(src ));
43 static inline void FUNC(copy_block4)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
48 AV_WN4P(dst , AV_RN4P(src ));
54 static inline void FUNC(copy_block8)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
59 AV_WN4P(dst , AV_RN4P(src ));
60 AV_WN4P(dst+4*sizeof(pixel), AV_RN4P(src+4*sizeof(pixel)));
66 static inline void FUNC(copy_block16)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
71 AV_WN4P(dst , AV_RN4P(src ));
72 AV_WN4P(dst+ 4*sizeof(pixel), AV_RN4P(src+ 4*sizeof(pixel)));
73 AV_WN4P(dst+ 8*sizeof(pixel), AV_RN4P(src+ 8*sizeof(pixel)));
74 AV_WN4P(dst+12*sizeof(pixel), AV_RN4P(src+12*sizeof(pixel)));
80 /* draw the edges of width 'w' of an image of size width, height */
81 //FIXME check that this is ok for mpeg4 interlaced
82 static void FUNCC(draw_edges)(uint8_t *p_buf, int p_wrap, int width, int height, int w, int h, int sides)
84 pixel *buf = (pixel*)p_buf;
85 int wrap = p_wrap / sizeof(pixel);
86 pixel *ptr, *last_line;
91 for(i=0;i<height;i++) {
94 for (j = 0; j < w; j++) {
96 ptr[j+width] = ptr[width-1];
99 memset(ptr - w, ptr[0], w);
100 memset(ptr + width, ptr[width-1], w);
105 /* top and bottom + corners */
107 last_line = buf + (height - 1) * wrap;
108 if (sides & EDGE_TOP)
109 for(i = 0; i < h; i++)
110 memcpy(buf - (i + 1) * wrap, buf, (width + w + w) * sizeof(pixel)); // top
111 if (sides & EDGE_BOTTOM)
112 for (i = 0; i < h; i++)
113 memcpy(last_line + (i + 1) * wrap, last_line, (width + w + w) * sizeof(pixel)); // bottom
117 * Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
118 * @param buf destination buffer
119 * @param src source buffer
120 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
121 * @param block_w width of block
122 * @param block_h height of block
123 * @param src_x x coordinate of the top left sample of the block in the source buffer
124 * @param src_y y coordinate of the top left sample of the block in the source buffer
125 * @param w width of the source buffer
126 * @param h height of the source buffer
128 void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h,
129 int src_x, int src_y, int w, int h){
131 int start_y, start_x, end_y, end_x;
134 src+= (h-1-src_y)*linesize;
136 }else if(src_y<=-block_h){
137 src+= (1-block_h-src_y)*linesize;
141 src+= (w-1-src_x)*sizeof(pixel);
143 }else if(src_x<=-block_w){
144 src+= (1-block_w-src_x)*sizeof(pixel);
148 start_y= FFMAX(0, -src_y);
149 start_x= FFMAX(0, -src_x);
150 end_y= FFMIN(block_h, h-src_y);
151 end_x= FFMIN(block_w, w-src_x);
152 assert(start_y < end_y && block_h);
153 assert(start_x < end_x && block_w);
156 src += start_y*linesize + start_x*sizeof(pixel);
157 buf += start_x*sizeof(pixel);
160 for(y=0; y<start_y; y++){
161 memcpy(buf, src, w*sizeof(pixel));
165 // copy existing part
167 memcpy(buf, src, w*sizeof(pixel));
174 for(; y<block_h; y++){
175 memcpy(buf, src, w*sizeof(pixel));
179 buf -= block_h * linesize + start_x*sizeof(pixel);
181 pixel *bufp = (pixel*)buf;
183 for(x=0; x<start_x; x++){
184 bufp[x] = bufp[start_x];
188 for(x=end_x; x<block_w; x++){
189 bufp[x] = bufp[end_x - 1];
195 static void FUNCC(add_pixels8)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size)
198 pixel *restrict pixels = (pixel *restrict)p_pixels;
199 dctcoef *block = (dctcoef*)p_block;
200 line_size >>= sizeof(pixel)-1;
203 pixels[0] += block[0];
204 pixels[1] += block[1];
205 pixels[2] += block[2];
206 pixels[3] += block[3];
207 pixels[4] += block[4];
208 pixels[5] += block[5];
209 pixels[6] += block[6];
210 pixels[7] += block[7];
216 static void FUNCC(add_pixels4)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size)
219 pixel *restrict pixels = (pixel *restrict)p_pixels;
220 dctcoef *block = (dctcoef*)p_block;
221 line_size >>= sizeof(pixel)-1;
224 pixels[0] += block[0];
225 pixels[1] += block[1];
226 pixels[2] += block[2];
227 pixels[3] += block[3];
235 #define PIXOP2(OPNAME, OP) \
236 static void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
240 OP(*((uint64_t*)block), AV_RN64(pixels));\
246 static void OPNAME ## _no_rnd_pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
250 const uint64_t a= AV_RN64(pixels );\
251 const uint64_t b= AV_RN64(pixels+1);\
252 OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
258 static void OPNAME ## _pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
262 const uint64_t a= AV_RN64(pixels );\
263 const uint64_t b= AV_RN64(pixels+1);\
264 OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
270 static void OPNAME ## _no_rnd_pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
274 const uint64_t a= AV_RN64(pixels );\
275 const uint64_t b= AV_RN64(pixels+line_size);\
276 OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
282 static void OPNAME ## _pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
286 const uint64_t a= AV_RN64(pixels );\
287 const uint64_t b= AV_RN64(pixels+line_size);\
288 OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
294 static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
297 const uint64_t a= AV_RN64(pixels );\
298 const uint64_t b= AV_RN64(pixels+1);\
299 uint64_t l0= (a&0x0303030303030303ULL)\
300 + (b&0x0303030303030303ULL)\
301 + 0x0202020202020202ULL;\
302 uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
303 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
307 for(i=0; i<h; i+=2){\
308 uint64_t a= AV_RN64(pixels );\
309 uint64_t b= AV_RN64(pixels+1);\
310 l1= (a&0x0303030303030303ULL)\
311 + (b&0x0303030303030303ULL);\
312 h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
313 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
314 OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
317 a= AV_RN64(pixels );\
318 b= AV_RN64(pixels+1);\
319 l0= (a&0x0303030303030303ULL)\
320 + (b&0x0303030303030303ULL)\
321 + 0x0202020202020202ULL;\
322 h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
323 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
324 OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
330 static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
333 const uint64_t a= AV_RN64(pixels );\
334 const uint64_t b= AV_RN64(pixels+1);\
335 uint64_t l0= (a&0x0303030303030303ULL)\
336 + (b&0x0303030303030303ULL)\
337 + 0x0101010101010101ULL;\
338 uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
339 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
343 for(i=0; i<h; i+=2){\
344 uint64_t a= AV_RN64(pixels );\
345 uint64_t b= AV_RN64(pixels+1);\
346 l1= (a&0x0303030303030303ULL)\
347 + (b&0x0303030303030303ULL);\
348 h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
349 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
350 OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
353 a= AV_RN64(pixels );\
354 b= AV_RN64(pixels+1);\
355 l0= (a&0x0303030303030303ULL)\
356 + (b&0x0303030303030303ULL)\
357 + 0x0101010101010101ULL;\
358 h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
359 + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
360 OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
366 CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels_c , 8*sizeof(pixel))\
367 CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels_x2_c , 8*sizeof(pixel))\
368 CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels_y2_c , 8*sizeof(pixel))\
369 CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels_xy2_c, 8*sizeof(pixel))\
370 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels_x2_c , 8*sizeof(pixel))\
371 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels_y2_c , 8*sizeof(pixel))\
372 CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels_xy2_c, 8*sizeof(pixel))
374 #define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEFEFEFEFEULL)>>1) )
375 #else // 64 bit variant
377 #define PIXOP2(OPNAME, OP) \
378 static void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
381 OP(*((pixel2*)(block )), AV_RN2P(pixels ));\
386 static void FUNCC(OPNAME ## _pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
389 OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
394 static void FUNCC(OPNAME ## _pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
397 OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
398 OP(*((pixel4*)(block+4*sizeof(pixel))), AV_RN4P(pixels+4*sizeof(pixel)));\
403 static inline void FUNCC(OPNAME ## _no_rnd_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
404 FUNCC(OPNAME ## _pixels8)(block, pixels, line_size, h);\
407 static inline void FUNC(OPNAME ## _no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
408 int src_stride1, int src_stride2, int h){\
412 a= AV_RN4P(&src1[i*src_stride1 ]);\
413 b= AV_RN4P(&src2[i*src_stride2 ]);\
414 OP(*((pixel4*)&dst[i*dst_stride ]), no_rnd_avg_pixel4(a, b));\
415 a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
416 b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
417 OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), no_rnd_avg_pixel4(a, b));\
421 static inline void FUNC(OPNAME ## _pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
422 int src_stride1, int src_stride2, int h){\
426 a= AV_RN4P(&src1[i*src_stride1 ]);\
427 b= AV_RN4P(&src2[i*src_stride2 ]);\
428 OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
429 a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
430 b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
431 OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), rnd_avg_pixel4(a, b));\
435 static inline void FUNC(OPNAME ## _pixels4_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
436 int src_stride1, int src_stride2, int h){\
440 a= AV_RN4P(&src1[i*src_stride1 ]);\
441 b= AV_RN4P(&src2[i*src_stride2 ]);\
442 OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
446 static inline void FUNC(OPNAME ## _pixels2_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
447 int src_stride1, int src_stride2, int h){\
451 a= AV_RN2P(&src1[i*src_stride1 ]);\
452 b= AV_RN2P(&src2[i*src_stride2 ]);\
453 OP(*((pixel2*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
457 static inline void FUNC(OPNAME ## _pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
458 int src_stride1, int src_stride2, int h){\
459 FUNC(OPNAME ## _pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
460 FUNC(OPNAME ## _pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
463 static inline void FUNC(OPNAME ## _no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
464 int src_stride1, int src_stride2, int h){\
465 FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
466 FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
469 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
470 FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
473 static inline void FUNCC(OPNAME ## _pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
474 FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
477 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
478 FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
481 static inline void FUNCC(OPNAME ## _pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
482 FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
485 static inline void FUNC(OPNAME ## _pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
486 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
487 /* FIXME HIGH BIT DEPTH */\
490 uint32_t a, b, c, d, l0, l1, h0, h1;\
491 a= AV_RN32(&src1[i*src_stride1]);\
492 b= AV_RN32(&src2[i*src_stride2]);\
493 c= AV_RN32(&src3[i*src_stride3]);\
494 d= AV_RN32(&src4[i*src_stride4]);\
495 l0= (a&0x03030303UL)\
498 h0= ((a&0xFCFCFCFCUL)>>2)\
499 + ((b&0xFCFCFCFCUL)>>2);\
500 l1= (c&0x03030303UL)\
502 h1= ((c&0xFCFCFCFCUL)>>2)\
503 + ((d&0xFCFCFCFCUL)>>2);\
504 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
505 a= AV_RN32(&src1[i*src_stride1+4]);\
506 b= AV_RN32(&src2[i*src_stride2+4]);\
507 c= AV_RN32(&src3[i*src_stride3+4]);\
508 d= AV_RN32(&src4[i*src_stride4+4]);\
509 l0= (a&0x03030303UL)\
512 h0= ((a&0xFCFCFCFCUL)>>2)\
513 + ((b&0xFCFCFCFCUL)>>2);\
514 l1= (c&0x03030303UL)\
516 h1= ((c&0xFCFCFCFCUL)>>2)\
517 + ((d&0xFCFCFCFCUL)>>2);\
518 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
522 static inline void FUNCC(OPNAME ## _pixels4_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
523 FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
526 static inline void FUNCC(OPNAME ## _pixels4_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
527 FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
530 static inline void FUNCC(OPNAME ## _pixels2_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
531 FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
534 static inline void FUNCC(OPNAME ## _pixels2_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
535 FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
538 static inline void FUNC(OPNAME ## _no_rnd_pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
539 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
540 /* FIXME HIGH BIT DEPTH*/\
543 uint32_t a, b, c, d, l0, l1, h0, h1;\
544 a= AV_RN32(&src1[i*src_stride1]);\
545 b= AV_RN32(&src2[i*src_stride2]);\
546 c= AV_RN32(&src3[i*src_stride3]);\
547 d= AV_RN32(&src4[i*src_stride4]);\
548 l0= (a&0x03030303UL)\
551 h0= ((a&0xFCFCFCFCUL)>>2)\
552 + ((b&0xFCFCFCFCUL)>>2);\
553 l1= (c&0x03030303UL)\
555 h1= ((c&0xFCFCFCFCUL)>>2)\
556 + ((d&0xFCFCFCFCUL)>>2);\
557 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
558 a= AV_RN32(&src1[i*src_stride1+4]);\
559 b= AV_RN32(&src2[i*src_stride2+4]);\
560 c= AV_RN32(&src3[i*src_stride3+4]);\
561 d= AV_RN32(&src4[i*src_stride4+4]);\
562 l0= (a&0x03030303UL)\
565 h0= ((a&0xFCFCFCFCUL)>>2)\
566 + ((b&0xFCFCFCFCUL)>>2);\
567 l1= (c&0x03030303UL)\
569 h1= ((c&0xFCFCFCFCUL)>>2)\
570 + ((d&0xFCFCFCFCUL)>>2);\
571 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
574 static inline void FUNC(OPNAME ## _pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
575 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
576 FUNC(OPNAME ## _pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
577 FUNC(OPNAME ## _pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
579 static inline void FUNC(OPNAME ## _no_rnd_pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
580 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
581 FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
582 FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
585 static inline void FUNCC(OPNAME ## _pixels2_xy2)(uint8_t *p_block, const uint8_t *p_pixels, int line_size, int h)\
587 int i, a0, b0, a1, b1;\
588 pixel *block = (pixel*)p_block;\
589 const pixel *pixels = (const pixel*)p_pixels;\
590 line_size >>= sizeof(pixel)-1;\
597 for(i=0; i<h; i+=2){\
603 block[0]= (a1+a0)>>2; /* FIXME non put */\
604 block[1]= (b1+b0)>>2;\
614 block[0]= (a1+a0)>>2;\
615 block[1]= (b1+b0)>>2;\
621 static inline void FUNCC(OPNAME ## _pixels4_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
623 /* FIXME HIGH BIT DEPTH */\
625 const uint32_t a= AV_RN32(pixels );\
626 const uint32_t b= AV_RN32(pixels+1);\
627 uint32_t l0= (a&0x03030303UL)\
630 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
631 + ((b&0xFCFCFCFCUL)>>2);\
635 for(i=0; i<h; i+=2){\
636 uint32_t a= AV_RN32(pixels );\
637 uint32_t b= AV_RN32(pixels+1);\
638 l1= (a&0x03030303UL)\
640 h1= ((a&0xFCFCFCFCUL)>>2)\
641 + ((b&0xFCFCFCFCUL)>>2);\
642 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
645 a= AV_RN32(pixels );\
646 b= AV_RN32(pixels+1);\
647 l0= (a&0x03030303UL)\
650 h0= ((a&0xFCFCFCFCUL)>>2)\
651 + ((b&0xFCFCFCFCUL)>>2);\
652 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
658 static inline void FUNCC(OPNAME ## _pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
660 /* FIXME HIGH BIT DEPTH */\
664 const uint32_t a= AV_RN32(pixels );\
665 const uint32_t b= AV_RN32(pixels+1);\
666 uint32_t l0= (a&0x03030303UL)\
669 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
670 + ((b&0xFCFCFCFCUL)>>2);\
674 for(i=0; i<h; i+=2){\
675 uint32_t a= AV_RN32(pixels );\
676 uint32_t b= AV_RN32(pixels+1);\
677 l1= (a&0x03030303UL)\
679 h1= ((a&0xFCFCFCFCUL)>>2)\
680 + ((b&0xFCFCFCFCUL)>>2);\
681 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
684 a= AV_RN32(pixels );\
685 b= AV_RN32(pixels+1);\
686 l0= (a&0x03030303UL)\
689 h0= ((a&0xFCFCFCFCUL)>>2)\
690 + ((b&0xFCFCFCFCUL)>>2);\
691 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
695 pixels+=4-line_size*(h+1);\
696 block +=4-line_size*h;\
700 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
702 /* FIXME HIGH BIT DEPTH */\
706 const uint32_t a= AV_RN32(pixels );\
707 const uint32_t b= AV_RN32(pixels+1);\
708 uint32_t l0= (a&0x03030303UL)\
711 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
712 + ((b&0xFCFCFCFCUL)>>2);\
716 for(i=0; i<h; i+=2){\
717 uint32_t a= AV_RN32(pixels );\
718 uint32_t b= AV_RN32(pixels+1);\
719 l1= (a&0x03030303UL)\
721 h1= ((a&0xFCFCFCFCUL)>>2)\
722 + ((b&0xFCFCFCFCUL)>>2);\
723 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
726 a= AV_RN32(pixels );\
727 b= AV_RN32(pixels+1);\
728 l0= (a&0x03030303UL)\
731 h0= ((a&0xFCFCFCFCUL)>>2)\
732 + ((b&0xFCFCFCFCUL)>>2);\
733 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
737 pixels+=4-line_size*(h+1);\
738 block +=4-line_size*h;\
742 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
743 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_x2) , FUNCC(OPNAME ## _pixels8_x2) , 8*sizeof(pixel))\
744 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_y2) , FUNCC(OPNAME ## _pixels8_y2) , 8*sizeof(pixel))\
745 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_xy2), FUNCC(OPNAME ## _pixels8_xy2), 8*sizeof(pixel))\
746 av_unused CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
747 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_x2) , FUNCC(OPNAME ## _no_rnd_pixels8_x2) , 8*sizeof(pixel))\
748 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_y2) , FUNCC(OPNAME ## _no_rnd_pixels8_y2) , 8*sizeof(pixel))\
749 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_xy2), FUNCC(OPNAME ## _no_rnd_pixels8_xy2), 8*sizeof(pixel))\
751 #define op_avg(a, b) a = rnd_avg_pixel4(a, b)
753 #define op_put(a, b) a = b
760 #define put_no_rnd_pixels8_c put_pixels8_c
761 #define put_no_rnd_pixels16_c put_pixels16_c
763 static void FUNCC(put_no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
764 FUNC(put_no_rnd_pixels16_l2)(dst, a, b, stride, stride, stride, h);
767 static void FUNCC(put_no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
768 FUNC(put_no_rnd_pixels8_l2)(dst, a, b, stride, stride, stride, h);
771 #define H264_CHROMA_MC(OPNAME, OP)\
772 static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
773 pixel *dst = (pixel*)p_dst;\
774 pixel *src = (pixel*)p_src;\
775 const int A=(8-x)*(8-y);\
776 const int B=( x)*(8-y);\
777 const int C=(8-x)*( y);\
778 const int D=( x)*( y);\
780 stride >>= sizeof(pixel)-1;\
782 assert(x<8 && y<8 && x>=0 && y>=0);\
786 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
787 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
793 const int step= C ? stride : 1;\
795 OP(dst[0], (A*src[0] + E*src[step+0]));\
796 OP(dst[1], (A*src[1] + E*src[step+1]));\
803 static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
804 pixel *dst = (pixel*)p_dst;\
805 pixel *src = (pixel*)p_src;\
806 const int A=(8-x)*(8-y);\
807 const int B=( x)*(8-y);\
808 const int C=(8-x)*( y);\
809 const int D=( x)*( y);\
811 stride >>= sizeof(pixel)-1;\
813 assert(x<8 && y<8 && x>=0 && y>=0);\
817 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
818 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
819 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
820 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
826 const int step= C ? stride : 1;\
828 OP(dst[0], (A*src[0] + E*src[step+0]));\
829 OP(dst[1], (A*src[1] + E*src[step+1]));\
830 OP(dst[2], (A*src[2] + E*src[step+2]));\
831 OP(dst[3], (A*src[3] + E*src[step+3]));\
838 static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
839 pixel *dst = (pixel*)p_dst;\
840 pixel *src = (pixel*)p_src;\
841 const int A=(8-x)*(8-y);\
842 const int B=( x)*(8-y);\
843 const int C=(8-x)*( y);\
844 const int D=( x)*( y);\
846 stride >>= sizeof(pixel)-1;\
848 assert(x<8 && y<8 && x>=0 && y>=0);\
852 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
853 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
854 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
855 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
856 OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
857 OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
858 OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
859 OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
865 const int step= C ? stride : 1;\
867 OP(dst[0], (A*src[0] + E*src[step+0]));\
868 OP(dst[1], (A*src[1] + E*src[step+1]));\
869 OP(dst[2], (A*src[2] + E*src[step+2]));\
870 OP(dst[3], (A*src[3] + E*src[step+3]));\
871 OP(dst[4], (A*src[4] + E*src[step+4]));\
872 OP(dst[5], (A*src[5] + E*src[step+5]));\
873 OP(dst[6], (A*src[6] + E*src[step+6]));\
874 OP(dst[7], (A*src[7] + E*src[step+7]));\
881 #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
882 #define op_put(a, b) a = (((b) + 32)>>6)
884 H264_CHROMA_MC(put_ , op_put)
885 H264_CHROMA_MC(avg_ , op_avg)
889 #define H264_LOWPASS(OPNAME, OP, OP2) \
890 static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
894 pixel *dst = (pixel*)p_dst;\
895 pixel *src = (pixel*)p_src;\
896 dstStride >>= sizeof(pixel)-1;\
897 srcStride >>= sizeof(pixel)-1;\
900 OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
901 OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
907 static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
911 pixel *dst = (pixel*)p_dst;\
912 pixel *src = (pixel*)p_src;\
913 dstStride >>= sizeof(pixel)-1;\
914 srcStride >>= sizeof(pixel)-1;\
917 const int srcB= src[-2*srcStride];\
918 const int srcA= src[-1*srcStride];\
919 const int src0= src[0 *srcStride];\
920 const int src1= src[1 *srcStride];\
921 const int src2= src[2 *srcStride];\
922 const int src3= src[3 *srcStride];\
923 const int src4= src[4 *srcStride];\
924 OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
925 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
931 static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
934 const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
937 pixel *dst = (pixel*)p_dst;\
938 pixel *src = (pixel*)p_src;\
939 dstStride >>= sizeof(pixel)-1;\
940 srcStride >>= sizeof(pixel)-1;\
942 for(i=0; i<h+5; i++)\
944 tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
945 tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
949 tmp -= tmpStride*(h+5-2);\
952 const int tmpB= tmp[-2*tmpStride] - pad;\
953 const int tmpA= tmp[-1*tmpStride] - pad;\
954 const int tmp0= tmp[0 *tmpStride] - pad;\
955 const int tmp1= tmp[1 *tmpStride] - pad;\
956 const int tmp2= tmp[2 *tmpStride] - pad;\
957 const int tmp3= tmp[3 *tmpStride] - pad;\
958 const int tmp4= tmp[4 *tmpStride] - pad;\
959 OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
960 OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
965 static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
969 pixel *dst = (pixel*)p_dst;\
970 pixel *src = (pixel*)p_src;\
971 dstStride >>= sizeof(pixel)-1;\
972 srcStride >>= sizeof(pixel)-1;\
975 OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
976 OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
977 OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
978 OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
984 static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
988 pixel *dst = (pixel*)p_dst;\
989 pixel *src = (pixel*)p_src;\
990 dstStride >>= sizeof(pixel)-1;\
991 srcStride >>= sizeof(pixel)-1;\
994 const int srcB= src[-2*srcStride];\
995 const int srcA= src[-1*srcStride];\
996 const int src0= src[0 *srcStride];\
997 const int src1= src[1 *srcStride];\
998 const int src2= src[2 *srcStride];\
999 const int src3= src[3 *srcStride];\
1000 const int src4= src[4 *srcStride];\
1001 const int src5= src[5 *srcStride];\
1002 const int src6= src[6 *srcStride];\
1003 OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
1004 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
1005 OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
1006 OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
1012 static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
1015 const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
1018 pixel *dst = (pixel*)p_dst;\
1019 pixel *src = (pixel*)p_src;\
1020 dstStride >>= sizeof(pixel)-1;\
1021 srcStride >>= sizeof(pixel)-1;\
1022 src -= 2*srcStride;\
1023 for(i=0; i<h+5; i++)\
1025 tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
1026 tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
1027 tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]) + pad;\
1028 tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]) + pad;\
1032 tmp -= tmpStride*(h+5-2);\
1035 const int tmpB= tmp[-2*tmpStride] - pad;\
1036 const int tmpA= tmp[-1*tmpStride] - pad;\
1037 const int tmp0= tmp[0 *tmpStride] - pad;\
1038 const int tmp1= tmp[1 *tmpStride] - pad;\
1039 const int tmp2= tmp[2 *tmpStride] - pad;\
1040 const int tmp3= tmp[3 *tmpStride] - pad;\
1041 const int tmp4= tmp[4 *tmpStride] - pad;\
1042 const int tmp5= tmp[5 *tmpStride] - pad;\
1043 const int tmp6= tmp[6 *tmpStride] - pad;\
1044 OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
1045 OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
1046 OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
1047 OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
1053 static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
1057 pixel *dst = (pixel*)p_dst;\
1058 pixel *src = (pixel*)p_src;\
1059 dstStride >>= sizeof(pixel)-1;\
1060 srcStride >>= sizeof(pixel)-1;\
1063 OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
1064 OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
1065 OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
1066 OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
1067 OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
1068 OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
1069 OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
1070 OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
1076 static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
1080 pixel *dst = (pixel*)p_dst;\
1081 pixel *src = (pixel*)p_src;\
1082 dstStride >>= sizeof(pixel)-1;\
1083 srcStride >>= sizeof(pixel)-1;\
1086 const int srcB= src[-2*srcStride];\
1087 const int srcA= src[-1*srcStride];\
1088 const int src0= src[0 *srcStride];\
1089 const int src1= src[1 *srcStride];\
1090 const int src2= src[2 *srcStride];\
1091 const int src3= src[3 *srcStride];\
1092 const int src4= src[4 *srcStride];\
1093 const int src5= src[5 *srcStride];\
1094 const int src6= src[6 *srcStride];\
1095 const int src7= src[7 *srcStride];\
1096 const int src8= src[8 *srcStride];\
1097 const int src9= src[9 *srcStride];\
1098 const int src10=src[10*srcStride];\
1099 OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
1100 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
1101 OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
1102 OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
1103 OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
1104 OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
1105 OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
1106 OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
1112 static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
1115 const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
1118 pixel *dst = (pixel*)p_dst;\
1119 pixel *src = (pixel*)p_src;\
1120 dstStride >>= sizeof(pixel)-1;\
1121 srcStride >>= sizeof(pixel)-1;\
1122 src -= 2*srcStride;\
1123 for(i=0; i<h+5; i++)\
1125 tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]) + pad;\
1126 tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]) + pad;\
1127 tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]) + pad;\
1128 tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]) + pad;\
1129 tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]) + pad;\
1130 tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]) + pad;\
1131 tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]) + pad;\
1132 tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]) + pad;\
1136 tmp -= tmpStride*(h+5-2);\
1139 const int tmpB= tmp[-2*tmpStride] - pad;\
1140 const int tmpA= tmp[-1*tmpStride] - pad;\
1141 const int tmp0= tmp[0 *tmpStride] - pad;\
1142 const int tmp1= tmp[1 *tmpStride] - pad;\
1143 const int tmp2= tmp[2 *tmpStride] - pad;\
1144 const int tmp3= tmp[3 *tmpStride] - pad;\
1145 const int tmp4= tmp[4 *tmpStride] - pad;\
1146 const int tmp5= tmp[5 *tmpStride] - pad;\
1147 const int tmp6= tmp[6 *tmpStride] - pad;\
1148 const int tmp7= tmp[7 *tmpStride] - pad;\
1149 const int tmp8= tmp[8 *tmpStride] - pad;\
1150 const int tmp9= tmp[9 *tmpStride] - pad;\
1151 const int tmp10=tmp[10*tmpStride] - pad;\
1152 OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
1153 OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
1154 OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
1155 OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
1156 OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
1157 OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
1158 OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
1159 OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
1165 static void FUNC(OPNAME ## h264_qpel16_v_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1166 FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
1167 FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1168 src += 8*srcStride;\
1169 dst += 8*dstStride;\
1170 FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
1171 FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1174 static void FUNC(OPNAME ## h264_qpel16_h_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1175 FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
1176 FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1177 src += 8*srcStride;\
1178 dst += 8*dstStride;\
1179 FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
1180 FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1183 static void FUNC(OPNAME ## h264_qpel16_hv_lowpass)(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1184 FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
1185 FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
1186 src += 8*srcStride;\
1187 dst += 8*dstStride;\
1188 FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
1189 FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
1192 #define H264_MC(OPNAME, SIZE) \
1193 static av_unused void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc00)(uint8_t *dst, uint8_t *src, int stride){\
1194 FUNCC(OPNAME ## pixels ## SIZE)(dst, src, stride, SIZE);\
1197 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc10)(uint8_t *dst, uint8_t *src, int stride){\
1198 uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1199 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
1200 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src, half, stride, stride, SIZE*sizeof(pixel), SIZE);\
1203 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc20)(uint8_t *dst, uint8_t *src, int stride){\
1204 FUNC(OPNAME ## h264_qpel ## SIZE ## _h_lowpass)(dst, src, stride, stride);\
1207 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc30)(uint8_t *dst, uint8_t *src, int stride){\
1208 uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1209 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
1210 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src+sizeof(pixel), half, stride, stride, SIZE*sizeof(pixel), SIZE);\
1213 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc01)(uint8_t *dst, uint8_t *src, int stride){\
1214 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1215 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1216 uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1217 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1218 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1219 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid, half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1222 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc02)(uint8_t *dst, uint8_t *src, int stride){\
1223 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1224 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1225 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1226 FUNC(OPNAME ## h264_qpel ## SIZE ## _v_lowpass)(dst, full_mid, stride, SIZE*sizeof(pixel));\
1229 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc03)(uint8_t *dst, uint8_t *src, int stride){\
1230 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1231 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1232 uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1233 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1234 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1235 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid+SIZE*sizeof(pixel), half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1238 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc11)(uint8_t *dst, uint8_t *src, int stride){\
1239 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1240 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1241 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1242 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1243 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
1244 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1245 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1246 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1249 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc31)(uint8_t *dst, uint8_t *src, int stride){\
1250 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1251 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1252 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1253 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1254 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
1255 FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
1256 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1257 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1260 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc13)(uint8_t *dst, uint8_t *src, int stride){\
1261 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1262 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1263 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1264 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1265 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
1266 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1267 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1268 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1271 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc33)(uint8_t *dst, uint8_t *src, int stride){\
1272 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1273 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1274 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1275 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1276 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
1277 FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
1278 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1279 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1282 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc22)(uint8_t *dst, uint8_t *src, int stride){\
1283 int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1284 FUNC(OPNAME ## h264_qpel ## SIZE ## _hv_lowpass)(dst, tmp, src, stride, SIZE*sizeof(pixel), stride);\
1287 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc21)(uint8_t *dst, uint8_t *src, int stride){\
1288 int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1289 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1290 uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1291 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
1292 FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1293 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1296 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc23)(uint8_t *dst, uint8_t *src, int stride){\
1297 int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1298 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1299 uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1300 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
1301 FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1302 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1305 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc12)(uint8_t *dst, uint8_t *src, int stride){\
1306 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1307 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1308 int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1309 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1310 uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1311 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1312 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1313 FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1314 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1317 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc32)(uint8_t *dst, uint8_t *src, int stride){\
1318 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1319 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1320 int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1321 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1322 uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1323 FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
1324 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1325 FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1326 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1329 #define op_avg(a, b) a = (((a)+CLIP(((b) + 16)>>5)+1)>>1)
1330 //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
1331 #define op_put(a, b) a = CLIP(((b) + 16)>>5)
1332 #define op2_avg(a, b) a = (((a)+CLIP(((b) + 512)>>10)+1)>>1)
1333 #define op2_put(a, b) a = CLIP(((b) + 512)>>10)
1335 H264_LOWPASS(put_ , op_put, op2_put)
1336 H264_LOWPASS(avg_ , op_avg, op2_avg)
1351 # define put_h264_qpel8_mc00_8_c ff_put_pixels8x8_8_c
1352 # define avg_h264_qpel8_mc00_8_c ff_avg_pixels8x8_8_c
1353 # define put_h264_qpel16_mc00_8_c ff_put_pixels16x16_8_c
1354 # define avg_h264_qpel16_mc00_8_c ff_avg_pixels16x16_8_c
1355 #elif BIT_DEPTH == 9
1356 # define put_h264_qpel8_mc00_9_c ff_put_pixels8x8_9_c
1357 # define avg_h264_qpel8_mc00_9_c ff_avg_pixels8x8_9_c
1358 # define put_h264_qpel16_mc00_9_c ff_put_pixels16x16_9_c
1359 # define avg_h264_qpel16_mc00_9_c ff_avg_pixels16x16_9_c
1360 #elif BIT_DEPTH == 10
1361 # define put_h264_qpel8_mc00_10_c ff_put_pixels8x8_10_c
1362 # define avg_h264_qpel8_mc00_10_c ff_avg_pixels8x8_10_c
1363 # define put_h264_qpel16_mc00_10_c ff_put_pixels16x16_10_c
1364 # define avg_h264_qpel16_mc00_10_c ff_avg_pixels16x16_10_c
1367 void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
1368 FUNCC(put_pixels8)(dst, src, stride, 8);
1370 void FUNCC(ff_avg_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
1371 FUNCC(avg_pixels8)(dst, src, stride, 8);
1373 void FUNCC(ff_put_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
1374 FUNCC(put_pixels16)(dst, src, stride, 16);
1376 void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
1377 FUNCC(avg_pixels16)(dst, src, stride, 16);
1380 static void FUNCC(clear_block)(DCTELEM *block)
1382 memset(block, 0, sizeof(dctcoef)*64);
1386 * memset(blocks, 0, sizeof(DCTELEM)*6*64)
1388 static void FUNCC(clear_blocks)(DCTELEM *blocks)
1390 memset(blocks, 0, sizeof(dctcoef)*6*64);