3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "bit_depth_template.c"
32 static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
37 AV_WN2P(dst , AV_RN2P(src ));
43 static inline void FUNC(copy_block4)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
48 AV_WN4P(dst , AV_RN4P(src ));
54 static inline void FUNC(copy_block8)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
59 AV_WN4P(dst , AV_RN4P(src ));
60 AV_WN4P(dst+4*sizeof(pixel), AV_RN4P(src+4*sizeof(pixel)));
66 static inline void FUNC(copy_block16)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
71 AV_WN4P(dst , AV_RN4P(src ));
72 AV_WN4P(dst+ 4*sizeof(pixel), AV_RN4P(src+ 4*sizeof(pixel)));
73 AV_WN4P(dst+ 8*sizeof(pixel), AV_RN4P(src+ 8*sizeof(pixel)));
74 AV_WN4P(dst+12*sizeof(pixel), AV_RN4P(src+12*sizeof(pixel)));
80 /* draw the edges of width 'w' of an image of size width, height */
81 //FIXME check that this is ok for mpeg4 interlaced
82 static void FUNCC(draw_edges)(uint8_t *_buf, int _wrap, int width, int height, int w, int h, int sides)
84 pixel *buf = (pixel*)_buf;
85 int wrap = _wrap / sizeof(pixel);
86 pixel *ptr, *last_line;
91 for(i=0;i<height;i++) {
94 for (j = 0; j < w; j++) {
96 ptr[j+width] = ptr[width-1];
99 memset(ptr - w, ptr[0], w);
100 memset(ptr + width, ptr[width-1], w);
105 /* top and bottom + corners */
107 last_line = buf + (height - 1) * wrap;
108 if (sides & EDGE_TOP)
109 for(i = 0; i < h; i++)
110 memcpy(buf - (i + 1) * wrap, buf, (width + w + w) * sizeof(pixel)); // top
111 if (sides & EDGE_BOTTOM)
112 for (i = 0; i < h; i++)
113 memcpy(last_line + (i + 1) * wrap, last_line, (width + w + w) * sizeof(pixel)); // bottom
116 #define DCTELEM_FUNCS(dctcoef, suffix) \
117 static void FUNCC(get_pixels ## suffix)(int16_t *restrict _block, \
118 const uint8_t *_pixels, \
121 const pixel *pixels = (const pixel *) _pixels; \
122 dctcoef *restrict block = (dctcoef *) _block; \
125 /* read the pixels */ \
127 block[0] = pixels[0]; \
128 block[1] = pixels[1]; \
129 block[2] = pixels[2]; \
130 block[3] = pixels[3]; \
131 block[4] = pixels[4]; \
132 block[5] = pixels[5]; \
133 block[6] = pixels[6]; \
134 block[7] = pixels[7]; \
135 pixels += line_size / sizeof(pixel); \
140 static void FUNCC(add_pixels8 ## suffix)(uint8_t *restrict _pixels, \
145 pixel *restrict pixels = (pixel *restrict)_pixels; \
146 dctcoef *block = (dctcoef*)_block; \
147 line_size /= sizeof(pixel); \
150 pixels[0] += block[0]; \
151 pixels[1] += block[1]; \
152 pixels[2] += block[2]; \
153 pixels[3] += block[3]; \
154 pixels[4] += block[4]; \
155 pixels[5] += block[5]; \
156 pixels[6] += block[6]; \
157 pixels[7] += block[7]; \
158 pixels += line_size; \
163 static void FUNCC(add_pixels4 ## suffix)(uint8_t *restrict _pixels, \
168 pixel *restrict pixels = (pixel *restrict)_pixels; \
169 dctcoef *block = (dctcoef*)_block; \
170 line_size /= sizeof(pixel); \
173 pixels[0] += block[0]; \
174 pixels[1] += block[1]; \
175 pixels[2] += block[2]; \
176 pixels[3] += block[3]; \
177 pixels += line_size; \
182 static void FUNCC(clear_block ## suffix)(int16_t *block) \
184 memset(block, 0, sizeof(dctcoef)*64); \
188 * memset(blocks, 0, sizeof(int16_t)*6*64) \
190 static void FUNCC(clear_blocks ## suffix)(int16_t *blocks) \
192 memset(blocks, 0, sizeof(dctcoef)*6*64); \
195 DCTELEM_FUNCS(int16_t, _16)
197 DCTELEM_FUNCS(dctcoef, _32)
200 #include "hpel_template.c"
202 #define PIXOP2(OPNAME, OP) \
203 static inline void FUNC(OPNAME ## _no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
204 int src_stride1, int src_stride2, int h){\
208 a= AV_RN4P(&src1[i*src_stride1 ]);\
209 b= AV_RN4P(&src2[i*src_stride2 ]);\
210 OP(*((pixel4*)&dst[i*dst_stride ]), no_rnd_avg_pixel4(a, b));\
211 a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
212 b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
213 OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), no_rnd_avg_pixel4(a, b));\
217 static inline void FUNC(OPNAME ## _no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
218 int src_stride1, int src_stride2, int h){\
219 FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
220 FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
223 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
224 FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
227 static inline void FUNCC(OPNAME ## _pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
228 FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
231 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
232 FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
235 static inline void FUNCC(OPNAME ## _pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
236 FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
239 static inline void FUNC(OPNAME ## _pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
240 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
241 /* FIXME HIGH BIT DEPTH */\
244 uint32_t a, b, c, d, l0, l1, h0, h1;\
245 a= AV_RN32(&src1[i*src_stride1]);\
246 b= AV_RN32(&src2[i*src_stride2]);\
247 c= AV_RN32(&src3[i*src_stride3]);\
248 d= AV_RN32(&src4[i*src_stride4]);\
249 l0= (a&0x03030303UL)\
252 h0= ((a&0xFCFCFCFCUL)>>2)\
253 + ((b&0xFCFCFCFCUL)>>2);\
254 l1= (c&0x03030303UL)\
256 h1= ((c&0xFCFCFCFCUL)>>2)\
257 + ((d&0xFCFCFCFCUL)>>2);\
258 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
259 a= AV_RN32(&src1[i*src_stride1+4]);\
260 b= AV_RN32(&src2[i*src_stride2+4]);\
261 c= AV_RN32(&src3[i*src_stride3+4]);\
262 d= AV_RN32(&src4[i*src_stride4+4]);\
263 l0= (a&0x03030303UL)\
266 h0= ((a&0xFCFCFCFCUL)>>2)\
267 + ((b&0xFCFCFCFCUL)>>2);\
268 l1= (c&0x03030303UL)\
270 h1= ((c&0xFCFCFCFCUL)>>2)\
271 + ((d&0xFCFCFCFCUL)>>2);\
272 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
276 static inline void FUNCC(OPNAME ## _pixels4_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
277 FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
280 static inline void FUNCC(OPNAME ## _pixels4_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
281 FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
284 static inline void FUNCC(OPNAME ## _pixels2_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
285 FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
288 static inline void FUNCC(OPNAME ## _pixels2_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
289 FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
292 static inline void FUNC(OPNAME ## _no_rnd_pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
293 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
294 /* FIXME HIGH BIT DEPTH*/\
297 uint32_t a, b, c, d, l0, l1, h0, h1;\
298 a= AV_RN32(&src1[i*src_stride1]);\
299 b= AV_RN32(&src2[i*src_stride2]);\
300 c= AV_RN32(&src3[i*src_stride3]);\
301 d= AV_RN32(&src4[i*src_stride4]);\
302 l0= (a&0x03030303UL)\
305 h0= ((a&0xFCFCFCFCUL)>>2)\
306 + ((b&0xFCFCFCFCUL)>>2);\
307 l1= (c&0x03030303UL)\
309 h1= ((c&0xFCFCFCFCUL)>>2)\
310 + ((d&0xFCFCFCFCUL)>>2);\
311 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
312 a= AV_RN32(&src1[i*src_stride1+4]);\
313 b= AV_RN32(&src2[i*src_stride2+4]);\
314 c= AV_RN32(&src3[i*src_stride3+4]);\
315 d= AV_RN32(&src4[i*src_stride4+4]);\
316 l0= (a&0x03030303UL)\
319 h0= ((a&0xFCFCFCFCUL)>>2)\
320 + ((b&0xFCFCFCFCUL)>>2);\
321 l1= (c&0x03030303UL)\
323 h1= ((c&0xFCFCFCFCUL)>>2)\
324 + ((d&0xFCFCFCFCUL)>>2);\
325 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
328 static inline void FUNC(OPNAME ## _pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
329 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
330 FUNC(OPNAME ## _pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
331 FUNC(OPNAME ## _pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
333 static inline void FUNC(OPNAME ## _no_rnd_pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
334 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
335 FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
336 FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
339 static inline void FUNCC(OPNAME ## _pixels2_xy2)(uint8_t *_block, const uint8_t *_pixels, int line_size, int h)\
341 int i, a0, b0, a1, b1;\
342 pixel *block = (pixel*)_block;\
343 const pixel *pixels = (const pixel*)_pixels;\
344 line_size /= sizeof(pixel);\
351 for(i=0; i<h; i+=2){\
357 block[0]= (a1+a0)>>2; /* FIXME non put */\
358 block[1]= (b1+b0)>>2;\
368 block[0]= (a1+a0)>>2;\
369 block[1]= (b1+b0)>>2;\
375 static inline void FUNCC(OPNAME ## _pixels4_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
377 /* FIXME HIGH BIT DEPTH */\
379 const uint32_t a= AV_RN32(pixels );\
380 const uint32_t b= AV_RN32(pixels+1);\
381 uint32_t l0= (a&0x03030303UL)\
384 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
385 + ((b&0xFCFCFCFCUL)>>2);\
389 for(i=0; i<h; i+=2){\
390 uint32_t a= AV_RN32(pixels );\
391 uint32_t b= AV_RN32(pixels+1);\
392 l1= (a&0x03030303UL)\
394 h1= ((a&0xFCFCFCFCUL)>>2)\
395 + ((b&0xFCFCFCFCUL)>>2);\
396 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
399 a= AV_RN32(pixels );\
400 b= AV_RN32(pixels+1);\
401 l0= (a&0x03030303UL)\
404 h0= ((a&0xFCFCFCFCUL)>>2)\
405 + ((b&0xFCFCFCFCUL)>>2);\
406 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
412 static inline void FUNCC(OPNAME ## _pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
414 /* FIXME HIGH BIT DEPTH */\
418 const uint32_t a= AV_RN32(pixels );\
419 const uint32_t b= AV_RN32(pixels+1);\
420 uint32_t l0= (a&0x03030303UL)\
423 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
424 + ((b&0xFCFCFCFCUL)>>2);\
428 for(i=0; i<h; i+=2){\
429 uint32_t a= AV_RN32(pixels );\
430 uint32_t b= AV_RN32(pixels+1);\
431 l1= (a&0x03030303UL)\
433 h1= ((a&0xFCFCFCFCUL)>>2)\
434 + ((b&0xFCFCFCFCUL)>>2);\
435 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
438 a= AV_RN32(pixels );\
439 b= AV_RN32(pixels+1);\
440 l0= (a&0x03030303UL)\
443 h0= ((a&0xFCFCFCFCUL)>>2)\
444 + ((b&0xFCFCFCFCUL)>>2);\
445 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
449 pixels+=4-line_size*(h+1);\
450 block +=4-line_size*h;\
454 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
456 /* FIXME HIGH BIT DEPTH */\
460 const uint32_t a= AV_RN32(pixels );\
461 const uint32_t b= AV_RN32(pixels+1);\
462 uint32_t l0= (a&0x03030303UL)\
465 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
466 + ((b&0xFCFCFCFCUL)>>2);\
470 for(i=0; i<h; i+=2){\
471 uint32_t a= AV_RN32(pixels );\
472 uint32_t b= AV_RN32(pixels+1);\
473 l1= (a&0x03030303UL)\
475 h1= ((a&0xFCFCFCFCUL)>>2)\
476 + ((b&0xFCFCFCFCUL)>>2);\
477 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
480 a= AV_RN32(pixels );\
481 b= AV_RN32(pixels+1);\
482 l0= (a&0x03030303UL)\
485 h0= ((a&0xFCFCFCFCUL)>>2)\
486 + ((b&0xFCFCFCFCUL)>>2);\
487 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
491 pixels+=4-line_size*(h+1);\
492 block +=4-line_size*h;\
496 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_x2) , FUNCC(OPNAME ## _pixels8_x2) , 8*sizeof(pixel))\
497 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_y2) , FUNCC(OPNAME ## _pixels8_y2) , 8*sizeof(pixel))\
498 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_xy2), FUNCC(OPNAME ## _pixels8_xy2), 8*sizeof(pixel))\
499 av_unused CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
500 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_x2) , FUNCC(OPNAME ## _no_rnd_pixels8_x2) , 8*sizeof(pixel))\
501 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_y2) , FUNCC(OPNAME ## _no_rnd_pixels8_y2) , 8*sizeof(pixel))\
502 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_xy2), FUNCC(OPNAME ## _no_rnd_pixels8_xy2), 8*sizeof(pixel))\
504 #define op_avg(a, b) a = rnd_avg_pixel4(a, b)
505 #define op_put(a, b) a = b
507 #define put_no_rnd_pixels8_8_c put_pixels8_8_c
514 #define H264_CHROMA_MC(OPNAME, OP)\
515 static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
516 pixel *dst = (pixel*)_dst;\
517 pixel *src = (pixel*)_src;\
518 const int A=(8-x)*(8-y);\
519 const int B=( x)*(8-y);\
520 const int C=(8-x)*( y);\
521 const int D=( x)*( y);\
523 stride /= sizeof(pixel);\
525 assert(x<8 && y<8 && x>=0 && y>=0);\
529 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
530 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
536 const int step= C ? stride : 1;\
538 OP(dst[0], (A*src[0] + E*src[step+0]));\
539 OP(dst[1], (A*src[1] + E*src[step+1]));\
546 static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
547 pixel *dst = (pixel*)_dst;\
548 pixel *src = (pixel*)_src;\
549 const int A=(8-x)*(8-y);\
550 const int B=( x)*(8-y);\
551 const int C=(8-x)*( y);\
552 const int D=( x)*( y);\
554 stride /= sizeof(pixel);\
556 assert(x<8 && y<8 && x>=0 && y>=0);\
560 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
561 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
562 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
563 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
569 const int step= C ? stride : 1;\
571 OP(dst[0], (A*src[0] + E*src[step+0]));\
572 OP(dst[1], (A*src[1] + E*src[step+1]));\
573 OP(dst[2], (A*src[2] + E*src[step+2]));\
574 OP(dst[3], (A*src[3] + E*src[step+3]));\
581 static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
582 pixel *dst = (pixel*)_dst;\
583 pixel *src = (pixel*)_src;\
584 const int A=(8-x)*(8-y);\
585 const int B=( x)*(8-y);\
586 const int C=(8-x)*( y);\
587 const int D=( x)*( y);\
589 stride /= sizeof(pixel);\
591 assert(x<8 && y<8 && x>=0 && y>=0);\
595 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
596 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
597 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
598 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
599 OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
600 OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
601 OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
602 OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
608 const int step= C ? stride : 1;\
610 OP(dst[0], (A*src[0] + E*src[step+0]));\
611 OP(dst[1], (A*src[1] + E*src[step+1]));\
612 OP(dst[2], (A*src[2] + E*src[step+2]));\
613 OP(dst[3], (A*src[3] + E*src[step+3]));\
614 OP(dst[4], (A*src[4] + E*src[step+4]));\
615 OP(dst[5], (A*src[5] + E*src[step+5]));\
616 OP(dst[6], (A*src[6] + E*src[step+6]));\
617 OP(dst[7], (A*src[7] + E*src[step+7]));\
624 #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
625 #define op_put(a, b) a = (((b) + 32)>>6)
627 H264_CHROMA_MC(put_ , op_put)
628 H264_CHROMA_MC(avg_ , op_avg)
632 void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
633 FUNCC(put_pixels8)(dst, src, stride, 8);
635 void FUNCC(ff_avg_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
636 FUNCC(avg_pixels8)(dst, src, stride, 8);
638 void FUNCC(ff_put_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
639 FUNCC(put_pixels16)(dst, src, stride, 16);
641 void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
642 FUNCC(avg_pixels16)(dst, src, stride, 16);