3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "bit_depth_template.c"
32 static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
37 AV_WN2P(dst , AV_RN2P(src ));
43 static inline void FUNC(copy_block4)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
48 AV_WN4P(dst , AV_RN4P(src ));
54 static inline void FUNC(copy_block8)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
59 AV_WN4P(dst , AV_RN4P(src ));
60 AV_WN4P(dst+4*sizeof(pixel), AV_RN4P(src+4*sizeof(pixel)));
66 static inline void FUNC(copy_block16)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
71 AV_WN4P(dst , AV_RN4P(src ));
72 AV_WN4P(dst+ 4*sizeof(pixel), AV_RN4P(src+ 4*sizeof(pixel)));
73 AV_WN4P(dst+ 8*sizeof(pixel), AV_RN4P(src+ 8*sizeof(pixel)));
74 AV_WN4P(dst+12*sizeof(pixel), AV_RN4P(src+12*sizeof(pixel)));
80 /* draw the edges of width 'w' of an image of size width, height */
81 //FIXME check that this is ok for mpeg4 interlaced
82 static void FUNCC(draw_edges)(uint8_t *p_buf, int p_wrap, int width, int height, int w, int h, int sides)
84 pixel *buf = (pixel*)p_buf;
85 int wrap = p_wrap / sizeof(pixel);
86 pixel *ptr, *last_line;
91 for(i=0;i<height;i++) {
94 for (j = 0; j < w; j++) {
96 ptr[j+width] = ptr[width-1];
99 memset(ptr - w, ptr[0], w);
100 memset(ptr + width, ptr[width-1], w);
105 /* top and bottom + corners */
107 last_line = buf + (height - 1) * wrap;
108 if (sides & EDGE_TOP)
109 for(i = 0; i < h; i++)
110 memcpy(buf - (i + 1) * wrap, buf, (width + w + w) * sizeof(pixel)); // top
111 if (sides & EDGE_BOTTOM)
112 for (i = 0; i < h; i++)
113 memcpy(last_line + (i + 1) * wrap, last_line, (width + w + w) * sizeof(pixel)); // bottom
117 * Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
118 * @param buf destination buffer
119 * @param src source buffer
120 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
121 * @param block_w width of block
122 * @param block_h height of block
123 * @param src_x x coordinate of the top left sample of the block in the source buffer
124 * @param src_y y coordinate of the top left sample of the block in the source buffer
125 * @param w width of the source buffer
126 * @param h height of the source buffer
128 void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h,
129 int src_x, int src_y, int w, int h){
131 int start_y, start_x, end_y, end_x;
137 src-= src_y*linesize;
138 src+= (h-1)*linesize;
140 }else if(src_y<=-block_h){
141 src-= src_y*linesize;
142 src+= (1-block_h)*linesize;
146 src+= (w-1-src_x)*sizeof(pixel);
148 }else if(src_x<=-block_w){
149 src+= (1-block_w-src_x)*sizeof(pixel);
153 start_y= FFMAX(0, -src_y);
154 start_x= FFMAX(0, -src_x);
155 end_y= FFMIN(block_h, h-src_y);
156 end_x= FFMIN(block_w, w-src_x);
157 av_assert2(start_y < end_y && block_h);
158 av_assert2(start_x < end_x && block_w);
161 src += start_y*linesize + start_x*sizeof(pixel);
162 buf += start_x*sizeof(pixel);
165 for(y=0; y<start_y; y++){
166 memcpy(buf, src, w*sizeof(pixel));
170 // copy existing part
172 memcpy(buf, src, w*sizeof(pixel));
179 for(; y<block_h; y++){
180 memcpy(buf, src, w*sizeof(pixel));
184 buf -= block_h * linesize + start_x*sizeof(pixel);
186 pixel *bufp = (pixel*)buf;
188 for(x=0; x<start_x; x++){
189 bufp[x] = bufp[start_x];
193 for(x=end_x; x<block_w; x++){
194 bufp[x] = bufp[end_x - 1];
200 #define DCTELEM_FUNCS(dctcoef, suffix) \
201 static void FUNCC(get_pixels ## suffix)(DCTELEM *av_restrict _block, \
202 const uint8_t *_pixels, \
205 const pixel *pixels = (const pixel *) _pixels; \
206 dctcoef *av_restrict block = (dctcoef *) _block; \
209 /* read the pixels */ \
211 block[0] = pixels[0]; \
212 block[1] = pixels[1]; \
213 block[2] = pixels[2]; \
214 block[3] = pixels[3]; \
215 block[4] = pixels[4]; \
216 block[5] = pixels[5]; \
217 block[6] = pixels[6]; \
218 block[7] = pixels[7]; \
219 pixels += line_size / sizeof(pixel); \
224 static void FUNCC(add_pixels8 ## suffix)(uint8_t *av_restrict _pixels, \
229 pixel *av_restrict pixels = (pixel *av_restrict)_pixels; \
230 dctcoef *block = (dctcoef*)_block; \
231 line_size /= sizeof(pixel); \
234 pixels[0] += block[0]; \
235 pixels[1] += block[1]; \
236 pixels[2] += block[2]; \
237 pixels[3] += block[3]; \
238 pixels[4] += block[4]; \
239 pixels[5] += block[5]; \
240 pixels[6] += block[6]; \
241 pixels[7] += block[7]; \
242 pixels += line_size; \
247 static void FUNCC(add_pixels4 ## suffix)(uint8_t *av_restrict _pixels, \
252 pixel *av_restrict pixels = (pixel *av_restrict)_pixels; \
253 dctcoef *block = (dctcoef*)_block; \
254 line_size /= sizeof(pixel); \
257 pixels[0] += block[0]; \
258 pixels[1] += block[1]; \
259 pixels[2] += block[2]; \
260 pixels[3] += block[3]; \
261 pixels += line_size; \
266 static void FUNCC(clear_block ## suffix)(DCTELEM *block) \
268 memset(block, 0, sizeof(dctcoef)*64); \
272 * memset(blocks, 0, sizeof(DCTELEM)*6*64) \
274 static void FUNCC(clear_blocks ## suffix)(DCTELEM *blocks) \
276 memset(blocks, 0, sizeof(dctcoef)*6*64); \
279 DCTELEM_FUNCS(DCTELEM, _16)
281 DCTELEM_FUNCS(dctcoef, _32)
284 #define PIXOP2(OPNAME, OP) \
285 static void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
288 OP(*((pixel2*)(block )), AV_RN2P(pixels ));\
293 static void FUNCC(OPNAME ## _pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
296 OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
301 static void FUNCC(OPNAME ## _pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
304 OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
305 OP(*((pixel4*)(block+4*sizeof(pixel))), AV_RN4P(pixels+4*sizeof(pixel)));\
310 static inline void FUNCC(OPNAME ## _no_rnd_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
311 FUNCC(OPNAME ## _pixels8)(block, pixels, line_size, h);\
314 static inline void FUNC(OPNAME ## _no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
315 int src_stride1, int src_stride2, int h){\
319 a= AV_RN4P(&src1[i*src_stride1 ]);\
320 b= AV_RN4P(&src2[i*src_stride2 ]);\
321 OP(*((pixel4*)&dst[i*dst_stride ]), no_rnd_avg_pixel4(a, b));\
322 a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
323 b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
324 OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), no_rnd_avg_pixel4(a, b));\
328 static inline void FUNC(OPNAME ## _pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
329 int src_stride1, int src_stride2, int h){\
333 a= AV_RN4P(&src1[i*src_stride1 ]);\
334 b= AV_RN4P(&src2[i*src_stride2 ]);\
335 OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
336 a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
337 b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
338 OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), rnd_avg_pixel4(a, b));\
342 static inline void FUNC(OPNAME ## _pixels4_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
343 int src_stride1, int src_stride2, int h){\
347 a= AV_RN4P(&src1[i*src_stride1 ]);\
348 b= AV_RN4P(&src2[i*src_stride2 ]);\
349 OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
353 static inline void FUNC(OPNAME ## _pixels2_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
354 int src_stride1, int src_stride2, int h){\
358 a= AV_RN2P(&src1[i*src_stride1 ]);\
359 b= AV_RN2P(&src2[i*src_stride2 ]);\
360 OP(*((pixel2*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
364 static inline void FUNC(OPNAME ## _pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
365 int src_stride1, int src_stride2, int h){\
366 FUNC(OPNAME ## _pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
367 FUNC(OPNAME ## _pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
370 static inline void FUNC(OPNAME ## _no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
371 int src_stride1, int src_stride2, int h){\
372 FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
373 FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
376 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
377 FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
380 static inline void FUNCC(OPNAME ## _pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
381 FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
384 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
385 FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
388 static inline void FUNCC(OPNAME ## _pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
389 FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
392 static inline void FUNC(OPNAME ## _pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
393 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
394 /* FIXME HIGH BIT DEPTH */\
397 uint32_t a, b, c, d, l0, l1, h0, h1;\
398 a= AV_RN32(&src1[i*src_stride1]);\
399 b= AV_RN32(&src2[i*src_stride2]);\
400 c= AV_RN32(&src3[i*src_stride3]);\
401 d= AV_RN32(&src4[i*src_stride4]);\
402 l0= (a&0x03030303UL)\
405 h0= ((a&0xFCFCFCFCUL)>>2)\
406 + ((b&0xFCFCFCFCUL)>>2);\
407 l1= (c&0x03030303UL)\
409 h1= ((c&0xFCFCFCFCUL)>>2)\
410 + ((d&0xFCFCFCFCUL)>>2);\
411 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
412 a= AV_RN32(&src1[i*src_stride1+4]);\
413 b= AV_RN32(&src2[i*src_stride2+4]);\
414 c= AV_RN32(&src3[i*src_stride3+4]);\
415 d= AV_RN32(&src4[i*src_stride4+4]);\
416 l0= (a&0x03030303UL)\
419 h0= ((a&0xFCFCFCFCUL)>>2)\
420 + ((b&0xFCFCFCFCUL)>>2);\
421 l1= (c&0x03030303UL)\
423 h1= ((c&0xFCFCFCFCUL)>>2)\
424 + ((d&0xFCFCFCFCUL)>>2);\
425 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
429 static inline void FUNCC(OPNAME ## _pixels4_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
430 FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
433 static inline void FUNCC(OPNAME ## _pixels4_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
434 FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
437 static inline void FUNCC(OPNAME ## _pixels2_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
438 FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
441 static inline void FUNCC(OPNAME ## _pixels2_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
442 FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
445 static inline void FUNC(OPNAME ## _no_rnd_pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
446 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
447 /* FIXME HIGH BIT DEPTH*/\
450 uint32_t a, b, c, d, l0, l1, h0, h1;\
451 a= AV_RN32(&src1[i*src_stride1]);\
452 b= AV_RN32(&src2[i*src_stride2]);\
453 c= AV_RN32(&src3[i*src_stride3]);\
454 d= AV_RN32(&src4[i*src_stride4]);\
455 l0= (a&0x03030303UL)\
458 h0= ((a&0xFCFCFCFCUL)>>2)\
459 + ((b&0xFCFCFCFCUL)>>2);\
460 l1= (c&0x03030303UL)\
462 h1= ((c&0xFCFCFCFCUL)>>2)\
463 + ((d&0xFCFCFCFCUL)>>2);\
464 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
465 a= AV_RN32(&src1[i*src_stride1+4]);\
466 b= AV_RN32(&src2[i*src_stride2+4]);\
467 c= AV_RN32(&src3[i*src_stride3+4]);\
468 d= AV_RN32(&src4[i*src_stride4+4]);\
469 l0= (a&0x03030303UL)\
472 h0= ((a&0xFCFCFCFCUL)>>2)\
473 + ((b&0xFCFCFCFCUL)>>2);\
474 l1= (c&0x03030303UL)\
476 h1= ((c&0xFCFCFCFCUL)>>2)\
477 + ((d&0xFCFCFCFCUL)>>2);\
478 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
481 static inline void FUNC(OPNAME ## _pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
482 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
483 FUNC(OPNAME ## _pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
484 FUNC(OPNAME ## _pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
486 static inline void FUNC(OPNAME ## _no_rnd_pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
487 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
488 FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
489 FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
492 static inline void FUNCC(OPNAME ## _pixels2_xy2)(uint8_t *p_block, const uint8_t *p_pixels, int line_size, int h)\
494 int i, a0, b0, a1, b1;\
495 pixel *block = (pixel*)p_block;\
496 const pixel *pixels = (const pixel*)p_pixels;\
497 line_size >>= sizeof(pixel)-1;\
504 for(i=0; i<h; i+=2){\
510 block[0]= (a1+a0)>>2; /* FIXME non put */\
511 block[1]= (b1+b0)>>2;\
521 block[0]= (a1+a0)>>2;\
522 block[1]= (b1+b0)>>2;\
528 static inline void FUNCC(OPNAME ## _pixels4_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
530 /* FIXME HIGH BIT DEPTH */\
532 const uint32_t a= AV_RN32(pixels );\
533 const uint32_t b= AV_RN32(pixels+1);\
534 uint32_t l0= (a&0x03030303UL)\
537 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
538 + ((b&0xFCFCFCFCUL)>>2);\
542 for(i=0; i<h; i+=2){\
543 uint32_t a= AV_RN32(pixels );\
544 uint32_t b= AV_RN32(pixels+1);\
545 l1= (a&0x03030303UL)\
547 h1= ((a&0xFCFCFCFCUL)>>2)\
548 + ((b&0xFCFCFCFCUL)>>2);\
549 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
552 a= AV_RN32(pixels );\
553 b= AV_RN32(pixels+1);\
554 l0= (a&0x03030303UL)\
557 h0= ((a&0xFCFCFCFCUL)>>2)\
558 + ((b&0xFCFCFCFCUL)>>2);\
559 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
565 static inline void FUNCC(OPNAME ## _pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
567 /* FIXME HIGH BIT DEPTH */\
571 const uint32_t a= AV_RN32(pixels );\
572 const uint32_t b= AV_RN32(pixels+1);\
573 uint32_t l0= (a&0x03030303UL)\
576 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
577 + ((b&0xFCFCFCFCUL)>>2);\
581 for(i=0; i<h; i+=2){\
582 uint32_t a= AV_RN32(pixels );\
583 uint32_t b= AV_RN32(pixels+1);\
584 l1= (a&0x03030303UL)\
586 h1= ((a&0xFCFCFCFCUL)>>2)\
587 + ((b&0xFCFCFCFCUL)>>2);\
588 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
591 a= AV_RN32(pixels );\
592 b= AV_RN32(pixels+1);\
593 l0= (a&0x03030303UL)\
596 h0= ((a&0xFCFCFCFCUL)>>2)\
597 + ((b&0xFCFCFCFCUL)>>2);\
598 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
602 pixels+=4-line_size*(h+1);\
603 block +=4-line_size*h;\
607 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
609 /* FIXME HIGH BIT DEPTH */\
613 const uint32_t a= AV_RN32(pixels );\
614 const uint32_t b= AV_RN32(pixels+1);\
615 uint32_t l0= (a&0x03030303UL)\
618 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
619 + ((b&0xFCFCFCFCUL)>>2);\
623 for(i=0; i<h; i+=2){\
624 uint32_t a= AV_RN32(pixels );\
625 uint32_t b= AV_RN32(pixels+1);\
626 l1= (a&0x03030303UL)\
628 h1= ((a&0xFCFCFCFCUL)>>2)\
629 + ((b&0xFCFCFCFCUL)>>2);\
630 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
633 a= AV_RN32(pixels );\
634 b= AV_RN32(pixels+1);\
635 l0= (a&0x03030303UL)\
638 h0= ((a&0xFCFCFCFCUL)>>2)\
639 + ((b&0xFCFCFCFCUL)>>2);\
640 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
644 pixels+=4-line_size*(h+1);\
645 block +=4-line_size*h;\
649 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
650 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_x2) , FUNCC(OPNAME ## _pixels8_x2) , 8*sizeof(pixel))\
651 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_y2) , FUNCC(OPNAME ## _pixels8_y2) , 8*sizeof(pixel))\
652 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_xy2), FUNCC(OPNAME ## _pixels8_xy2), 8*sizeof(pixel))\
653 av_unused CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
654 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_x2) , FUNCC(OPNAME ## _no_rnd_pixels8_x2) , 8*sizeof(pixel))\
655 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_y2) , FUNCC(OPNAME ## _no_rnd_pixels8_y2) , 8*sizeof(pixel))\
656 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_xy2), FUNCC(OPNAME ## _no_rnd_pixels8_xy2), 8*sizeof(pixel))\
658 #define op_avg(a, b) a = rnd_avg_pixel4(a, b)
659 #define op_put(a, b) a = b
666 #define put_no_rnd_pixels8_c put_pixels8_c
667 #define put_no_rnd_pixels16_c put_pixels16_c
669 static void FUNCC(put_no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
670 FUNC(put_no_rnd_pixels16_l2)(dst, a, b, stride, stride, stride, h);
673 static void FUNCC(put_no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
674 FUNC(put_no_rnd_pixels8_l2)(dst, a, b, stride, stride, stride, h);
677 #define H264_CHROMA_MC(OPNAME, OP)\
678 static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
679 pixel *dst = (pixel*)p_dst;\
680 pixel *src = (pixel*)p_src;\
681 const int A=(8-x)*(8-y);\
682 const int B=( x)*(8-y);\
683 const int C=(8-x)*( y);\
684 const int D=( x)*( y);\
686 stride >>= sizeof(pixel)-1;\
688 av_assert2(x<8 && y<8 && x>=0 && y>=0);\
692 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
693 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
699 const int step= C ? stride : 1;\
701 OP(dst[0], (A*src[0] + E*src[step+0]));\
702 OP(dst[1], (A*src[1] + E*src[step+1]));\
709 static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
710 pixel *dst = (pixel*)p_dst;\
711 pixel *src = (pixel*)p_src;\
712 const int A=(8-x)*(8-y);\
713 const int B=( x)*(8-y);\
714 const int C=(8-x)*( y);\
715 const int D=( x)*( y);\
717 stride >>= sizeof(pixel)-1;\
719 av_assert2(x<8 && y<8 && x>=0 && y>=0);\
723 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
724 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
725 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
726 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
732 const int step= C ? stride : 1;\
734 OP(dst[0], (A*src[0] + E*src[step+0]));\
735 OP(dst[1], (A*src[1] + E*src[step+1]));\
736 OP(dst[2], (A*src[2] + E*src[step+2]));\
737 OP(dst[3], (A*src[3] + E*src[step+3]));\
744 static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
745 pixel *dst = (pixel*)p_dst;\
746 pixel *src = (pixel*)p_src;\
747 const int A=(8-x)*(8-y);\
748 const int B=( x)*(8-y);\
749 const int C=(8-x)*( y);\
750 const int D=( x)*( y);\
752 stride >>= sizeof(pixel)-1;\
754 av_assert2(x<8 && y<8 && x>=0 && y>=0);\
758 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
759 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
760 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
761 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
762 OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
763 OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
764 OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
765 OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
771 const int step= C ? stride : 1;\
773 OP(dst[0], (A*src[0] + E*src[step+0]));\
774 OP(dst[1], (A*src[1] + E*src[step+1]));\
775 OP(dst[2], (A*src[2] + E*src[step+2]));\
776 OP(dst[3], (A*src[3] + E*src[step+3]));\
777 OP(dst[4], (A*src[4] + E*src[step+4]));\
778 OP(dst[5], (A*src[5] + E*src[step+5]));\
779 OP(dst[6], (A*src[6] + E*src[step+6]));\
780 OP(dst[7], (A*src[7] + E*src[step+7]));\
787 #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
788 #define op_put(a, b) a = (((b) + 32)>>6)
790 H264_CHROMA_MC(put_ , op_put)
791 H264_CHROMA_MC(avg_ , op_avg)
795 #define H264_LOWPASS(OPNAME, OP, OP2) \
796 static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
800 pixel *dst = (pixel*)p_dst;\
801 pixel *src = (pixel*)p_src;\
802 dstStride >>= sizeof(pixel)-1;\
803 srcStride >>= sizeof(pixel)-1;\
806 OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
807 OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
813 static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
817 pixel *dst = (pixel*)p_dst;\
818 pixel *src = (pixel*)p_src;\
819 dstStride >>= sizeof(pixel)-1;\
820 srcStride >>= sizeof(pixel)-1;\
823 const int srcB= src[-2*srcStride];\
824 const int srcA= src[-1*srcStride];\
825 const int src0= src[0 *srcStride];\
826 const int src1= src[1 *srcStride];\
827 const int src2= src[2 *srcStride];\
828 const int src3= src[3 *srcStride];\
829 const int src4= src[4 *srcStride];\
830 OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
831 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
837 static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *p_dst, pixeltmp *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
840 const int pad = (BIT_DEPTH == 10) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
843 pixel *dst = (pixel*)p_dst;\
844 pixel *src = (pixel*)p_src;\
845 dstStride >>= sizeof(pixel)-1;\
846 srcStride >>= sizeof(pixel)-1;\
848 for(i=0; i<h+5; i++)\
850 tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
851 tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
855 tmp -= tmpStride*(h+5-2);\
858 const int tmpB= tmp[-2*tmpStride] - pad;\
859 const int tmpA= tmp[-1*tmpStride] - pad;\
860 const int tmp0= tmp[0 *tmpStride] - pad;\
861 const int tmp1= tmp[1 *tmpStride] - pad;\
862 const int tmp2= tmp[2 *tmpStride] - pad;\
863 const int tmp3= tmp[3 *tmpStride] - pad;\
864 const int tmp4= tmp[4 *tmpStride] - pad;\
865 OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
866 OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
871 static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
875 pixel *dst = (pixel*)p_dst;\
876 pixel *src = (pixel*)p_src;\
877 dstStride >>= sizeof(pixel)-1;\
878 srcStride >>= sizeof(pixel)-1;\
881 OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
882 OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
883 OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
884 OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
890 static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
894 pixel *dst = (pixel*)p_dst;\
895 pixel *src = (pixel*)p_src;\
896 dstStride >>= sizeof(pixel)-1;\
897 srcStride >>= sizeof(pixel)-1;\
900 const int srcB= src[-2*srcStride];\
901 const int srcA= src[-1*srcStride];\
902 const int src0= src[0 *srcStride];\
903 const int src1= src[1 *srcStride];\
904 const int src2= src[2 *srcStride];\
905 const int src3= src[3 *srcStride];\
906 const int src4= src[4 *srcStride];\
907 const int src5= src[5 *srcStride];\
908 const int src6= src[6 *srcStride];\
909 OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
910 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
911 OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
912 OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
918 static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *p_dst, pixeltmp *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
921 const int pad = (BIT_DEPTH == 10) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
924 pixel *dst = (pixel*)p_dst;\
925 pixel *src = (pixel*)p_src;\
926 dstStride >>= sizeof(pixel)-1;\
927 srcStride >>= sizeof(pixel)-1;\
929 for(i=0; i<h+5; i++)\
931 tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
932 tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
933 tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]) + pad;\
934 tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]) + pad;\
938 tmp -= tmpStride*(h+5-2);\
941 const int tmpB= tmp[-2*tmpStride] - pad;\
942 const int tmpA= tmp[-1*tmpStride] - pad;\
943 const int tmp0= tmp[0 *tmpStride] - pad;\
944 const int tmp1= tmp[1 *tmpStride] - pad;\
945 const int tmp2= tmp[2 *tmpStride] - pad;\
946 const int tmp3= tmp[3 *tmpStride] - pad;\
947 const int tmp4= tmp[4 *tmpStride] - pad;\
948 const int tmp5= tmp[5 *tmpStride] - pad;\
949 const int tmp6= tmp[6 *tmpStride] - pad;\
950 OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
951 OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
952 OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
953 OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
959 static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
963 pixel *dst = (pixel*)p_dst;\
964 pixel *src = (pixel*)p_src;\
965 dstStride >>= sizeof(pixel)-1;\
966 srcStride >>= sizeof(pixel)-1;\
969 OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
970 OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
971 OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
972 OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
973 OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
974 OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
975 OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
976 OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
982 static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
986 pixel *dst = (pixel*)p_dst;\
987 pixel *src = (pixel*)p_src;\
988 dstStride >>= sizeof(pixel)-1;\
989 srcStride >>= sizeof(pixel)-1;\
992 const int srcB= src[-2*srcStride];\
993 const int srcA= src[-1*srcStride];\
994 const int src0= src[0 *srcStride];\
995 const int src1= src[1 *srcStride];\
996 const int src2= src[2 *srcStride];\
997 const int src3= src[3 *srcStride];\
998 const int src4= src[4 *srcStride];\
999 const int src5= src[5 *srcStride];\
1000 const int src6= src[6 *srcStride];\
1001 const int src7= src[7 *srcStride];\
1002 const int src8= src[8 *srcStride];\
1003 const int src9= src[9 *srcStride];\
1004 const int src10=src[10*srcStride];\
1005 OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
1006 OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
1007 OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
1008 OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
1009 OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
1010 OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
1011 OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
1012 OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
1018 static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *p_dst, pixeltmp *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
1021 const int pad = (BIT_DEPTH == 10) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
1024 pixel *dst = (pixel*)p_dst;\
1025 pixel *src = (pixel*)p_src;\
1026 dstStride >>= sizeof(pixel)-1;\
1027 srcStride >>= sizeof(pixel)-1;\
1028 src -= 2*srcStride;\
1029 for(i=0; i<h+5; i++)\
1031 tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]) + pad;\
1032 tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]) + pad;\
1033 tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]) + pad;\
1034 tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]) + pad;\
1035 tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]) + pad;\
1036 tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]) + pad;\
1037 tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]) + pad;\
1038 tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]) + pad;\
1042 tmp -= tmpStride*(h+5-2);\
1045 const int tmpB= tmp[-2*tmpStride] - pad;\
1046 const int tmpA= tmp[-1*tmpStride] - pad;\
1047 const int tmp0= tmp[0 *tmpStride] - pad;\
1048 const int tmp1= tmp[1 *tmpStride] - pad;\
1049 const int tmp2= tmp[2 *tmpStride] - pad;\
1050 const int tmp3= tmp[3 *tmpStride] - pad;\
1051 const int tmp4= tmp[4 *tmpStride] - pad;\
1052 const int tmp5= tmp[5 *tmpStride] - pad;\
1053 const int tmp6= tmp[6 *tmpStride] - pad;\
1054 const int tmp7= tmp[7 *tmpStride] - pad;\
1055 const int tmp8= tmp[8 *tmpStride] - pad;\
1056 const int tmp9= tmp[9 *tmpStride] - pad;\
1057 const int tmp10=tmp[10*tmpStride] - pad;\
1058 OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
1059 OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
1060 OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
1061 OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
1062 OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
1063 OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
1064 OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
1065 OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
1071 static void FUNC(OPNAME ## h264_qpel16_v_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1072 FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
1073 FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1074 src += 8*srcStride;\
1075 dst += 8*dstStride;\
1076 FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
1077 FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1080 static void FUNC(OPNAME ## h264_qpel16_h_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1081 FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
1082 FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1083 src += 8*srcStride;\
1084 dst += 8*dstStride;\
1085 FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
1086 FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1089 static void FUNC(OPNAME ## h264_qpel16_hv_lowpass)(uint8_t *dst, pixeltmp *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1090 FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
1091 FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
1092 src += 8*srcStride;\
1093 dst += 8*dstStride;\
1094 FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
1095 FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
1098 #define H264_MC(OPNAME, SIZE) \
1099 static av_unused void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc00)(uint8_t *dst, uint8_t *src, int stride){\
1100 FUNCC(OPNAME ## pixels ## SIZE)(dst, src, stride, SIZE);\
1103 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc10)(uint8_t *dst, uint8_t *src, int stride){\
1104 uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1105 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
1106 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src, half, stride, stride, SIZE*sizeof(pixel), SIZE);\
1109 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc20)(uint8_t *dst, uint8_t *src, int stride){\
1110 FUNC(OPNAME ## h264_qpel ## SIZE ## _h_lowpass)(dst, src, stride, stride);\
1113 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc30)(uint8_t *dst, uint8_t *src, int stride){\
1114 uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1115 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
1116 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src+sizeof(pixel), half, stride, stride, SIZE*sizeof(pixel), SIZE);\
1119 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc01)(uint8_t *dst, uint8_t *src, int stride){\
1120 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1121 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1122 uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1123 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1124 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1125 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid, half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1128 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc02)(uint8_t *dst, uint8_t *src, int stride){\
1129 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1130 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1131 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1132 FUNC(OPNAME ## h264_qpel ## SIZE ## _v_lowpass)(dst, full_mid, stride, SIZE*sizeof(pixel));\
1135 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc03)(uint8_t *dst, uint8_t *src, int stride){\
1136 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1137 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1138 uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1139 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1140 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1141 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid+SIZE*sizeof(pixel), half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1144 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc11)(uint8_t *dst, uint8_t *src, int stride){\
1145 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1146 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1147 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1148 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1149 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
1150 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1151 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1152 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1155 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc31)(uint8_t *dst, uint8_t *src, int stride){\
1156 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1157 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1158 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1159 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1160 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
1161 FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
1162 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1163 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1166 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc13)(uint8_t *dst, uint8_t *src, int stride){\
1167 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1168 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1169 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1170 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1171 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
1172 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1173 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1174 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1177 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc33)(uint8_t *dst, uint8_t *src, int stride){\
1178 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1179 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1180 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1181 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1182 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
1183 FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
1184 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1185 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1188 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc22)(uint8_t *dst, uint8_t *src, int stride){\
1189 pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1190 FUNC(OPNAME ## h264_qpel ## SIZE ## _hv_lowpass)(dst, tmp, src, stride, SIZE*sizeof(pixel), stride);\
1193 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc21)(uint8_t *dst, uint8_t *src, int stride){\
1194 pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1195 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1196 uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1197 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
1198 FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1199 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1202 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc23)(uint8_t *dst, uint8_t *src, int stride){\
1203 pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1204 uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1205 uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1206 FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
1207 FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1208 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1211 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc12)(uint8_t *dst, uint8_t *src, int stride){\
1212 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1213 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1214 pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1215 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1216 uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1217 FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1218 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1219 FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1220 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1223 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc32)(uint8_t *dst, uint8_t *src, int stride){\
1224 uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1225 uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1226 pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1227 uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1228 uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1229 FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
1230 FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1231 FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1232 FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1235 #define op_avg(a, b) a = (((a)+CLIP(((b) + 16)>>5)+1)>>1)
1236 //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
1237 #define op_put(a, b) a = CLIP(((b) + 16)>>5)
1238 #define op2_avg(a, b) a = (((a)+CLIP(((b) + 512)>>10)+1)>>1)
1239 #define op2_put(a, b) a = CLIP(((b) + 512)>>10)
1241 H264_LOWPASS(put_ , op_put, op2_put)
1242 H264_LOWPASS(avg_ , op_avg, op2_avg)
1257 # define put_h264_qpel8_mc00_8_c ff_put_pixels8x8_8_c
1258 # define avg_h264_qpel8_mc00_8_c ff_avg_pixels8x8_8_c
1259 # define put_h264_qpel16_mc00_8_c ff_put_pixels16x16_8_c
1260 # define avg_h264_qpel16_mc00_8_c ff_avg_pixels16x16_8_c
1261 #elif BIT_DEPTH == 9
1262 # define put_h264_qpel8_mc00_9_c ff_put_pixels8x8_9_c
1263 # define avg_h264_qpel8_mc00_9_c ff_avg_pixels8x8_9_c
1264 # define put_h264_qpel16_mc00_9_c ff_put_pixels16x16_9_c
1265 # define avg_h264_qpel16_mc00_9_c ff_avg_pixels16x16_9_c
1266 #elif BIT_DEPTH == 10
1267 # define put_h264_qpel8_mc00_10_c ff_put_pixels8x8_10_c
1268 # define avg_h264_qpel8_mc00_10_c ff_avg_pixels8x8_10_c
1269 # define put_h264_qpel16_mc00_10_c ff_put_pixels16x16_10_c
1270 # define avg_h264_qpel16_mc00_10_c ff_avg_pixels16x16_10_c
1271 #elif BIT_DEPTH == 12
1272 # define put_h264_qpel8_mc00_12_c ff_put_pixels8x8_12_c
1273 # define avg_h264_qpel8_mc00_12_c ff_avg_pixels8x8_12_c
1274 # define put_h264_qpel16_mc00_12_c ff_put_pixels16x16_12_c
1275 # define avg_h264_qpel16_mc00_12_c ff_avg_pixels16x16_12_c
1276 #elif BIT_DEPTH == 14
1277 # define put_h264_qpel8_mc00_14_c ff_put_pixels8x8_14_c
1278 # define avg_h264_qpel8_mc00_14_c ff_avg_pixels8x8_14_c
1279 # define put_h264_qpel16_mc00_14_c ff_put_pixels16x16_14_c
1280 # define avg_h264_qpel16_mc00_14_c ff_avg_pixels16x16_14_c
1283 void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
1284 FUNCC(put_pixels8)(dst, src, stride, 8);
1286 void FUNCC(ff_avg_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
1287 FUNCC(avg_pixels8)(dst, src, stride, 8);
1289 void FUNCC(ff_put_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
1290 FUNCC(put_pixels16)(dst, src, stride, 16);
1292 void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
1293 FUNCC(avg_pixels16)(dst, src, stride, 16);