3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "bit_depth_template.c"
33 /* draw the edges of width 'w' of an image of size width, height */
34 //FIXME check that this is ok for mpeg4 interlaced
35 static void FUNCC(draw_edges)(uint8_t *p_buf, int p_wrap, int width, int height, int w, int h, int sides)
37 pixel *buf = (pixel*)p_buf;
38 int wrap = p_wrap / sizeof(pixel);
39 pixel *ptr, *last_line;
44 for(i=0;i<height;i++) {
45 memset(ptr - w, ptr[0], w);
46 memset(ptr + width, ptr[width-1], w);
50 /* top and bottom + corners */
52 last_line = buf + (height - 1) * wrap;
54 for(i = 0; i < h; i++)
55 memcpy(buf - (i + 1) * wrap, buf, (width + w + w) * sizeof(pixel)); // top
56 if (sides & EDGE_BOTTOM)
57 for (i = 0; i < h; i++)
58 memcpy(last_line + (i + 1) * wrap, last_line, (width + w + w) * sizeof(pixel)); // bottom
62 static void FUNCC(get_pixels)(int16_t *av_restrict block,
63 const uint8_t *_pixels,
66 const pixel *pixels = (const pixel *) _pixels;
79 pixels += line_size / sizeof(pixel);
85 static void FUNCC(clear_block)(int16_t *block)
87 memset(block, 0, sizeof(int16_t)*64);
91 * memset(blocks, 0, sizeof(int16_t)*6*64)
93 static void FUNCC(clear_blocks)(int16_t *blocks)
95 memset(blocks, 0, sizeof(int16_t)*6*64);
100 #include "hpel_template.c"
103 #define PIXOP2(OPNAME, OP) \
104 static inline void FUNC(OPNAME ## _no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
105 int src_stride1, int src_stride2, int h){\
109 a= AV_RN4P(&src1[i*src_stride1 ]);\
110 b= AV_RN4P(&src2[i*src_stride2 ]);\
111 OP(*((pixel4*)&dst[i*dst_stride ]), no_rnd_avg_pixel4(a, b));\
112 a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
113 b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
114 OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), no_rnd_avg_pixel4(a, b));\
118 static inline void FUNC(OPNAME ## _no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
119 int src_stride1, int src_stride2, int h){\
120 FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
121 FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
124 static inline void FUNC(OPNAME ## _pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
125 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
126 /* FIXME HIGH BIT DEPTH */\
129 uint32_t a, b, c, d, l0, l1, h0, h1;\
130 a= AV_RN32(&src1[i*src_stride1]);\
131 b= AV_RN32(&src2[i*src_stride2]);\
132 c= AV_RN32(&src3[i*src_stride3]);\
133 d= AV_RN32(&src4[i*src_stride4]);\
134 l0= (a&0x03030303UL)\
137 h0= ((a&0xFCFCFCFCUL)>>2)\
138 + ((b&0xFCFCFCFCUL)>>2);\
139 l1= (c&0x03030303UL)\
141 h1= ((c&0xFCFCFCFCUL)>>2)\
142 + ((d&0xFCFCFCFCUL)>>2);\
143 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
144 a= AV_RN32(&src1[i*src_stride1+4]);\
145 b= AV_RN32(&src2[i*src_stride2+4]);\
146 c= AV_RN32(&src3[i*src_stride3+4]);\
147 d= AV_RN32(&src4[i*src_stride4+4]);\
148 l0= (a&0x03030303UL)\
151 h0= ((a&0xFCFCFCFCUL)>>2)\
152 + ((b&0xFCFCFCFCUL)>>2);\
153 l1= (c&0x03030303UL)\
155 h1= ((c&0xFCFCFCFCUL)>>2)\
156 + ((d&0xFCFCFCFCUL)>>2);\
157 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
161 static inline void FUNC(OPNAME ## _no_rnd_pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
162 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
163 /* FIXME HIGH BIT DEPTH*/\
166 uint32_t a, b, c, d, l0, l1, h0, h1;\
167 a= AV_RN32(&src1[i*src_stride1]);\
168 b= AV_RN32(&src2[i*src_stride2]);\
169 c= AV_RN32(&src3[i*src_stride3]);\
170 d= AV_RN32(&src4[i*src_stride4]);\
171 l0= (a&0x03030303UL)\
174 h0= ((a&0xFCFCFCFCUL)>>2)\
175 + ((b&0xFCFCFCFCUL)>>2);\
176 l1= (c&0x03030303UL)\
178 h1= ((c&0xFCFCFCFCUL)>>2)\
179 + ((d&0xFCFCFCFCUL)>>2);\
180 OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
181 a= AV_RN32(&src1[i*src_stride1+4]);\
182 b= AV_RN32(&src2[i*src_stride2+4]);\
183 c= AV_RN32(&src3[i*src_stride3+4]);\
184 d= AV_RN32(&src4[i*src_stride4+4]);\
185 l0= (a&0x03030303UL)\
188 h0= ((a&0xFCFCFCFCUL)>>2)\
189 + ((b&0xFCFCFCFCUL)>>2);\
190 l1= (c&0x03030303UL)\
192 h1= ((c&0xFCFCFCFCUL)>>2)\
193 + ((d&0xFCFCFCFCUL)>>2);\
194 OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
197 static inline void FUNC(OPNAME ## _pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
198 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
199 FUNC(OPNAME ## _pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
200 FUNC(OPNAME ## _pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
202 static inline void FUNC(OPNAME ## _no_rnd_pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
203 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
204 FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
205 FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
208 static inline void FUNCC(OPNAME ## _pixels8_xy2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)\
210 /* FIXME HIGH BIT DEPTH */\
214 const uint32_t a= AV_RN32(pixels );\
215 const uint32_t b= AV_RN32(pixels+1);\
216 uint32_t l0= (a&0x03030303UL)\
219 uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
220 + ((b&0xFCFCFCFCUL)>>2);\
224 for(i=0; i<h; i+=2){\
225 uint32_t a= AV_RN32(pixels );\
226 uint32_t b= AV_RN32(pixels+1);\
227 l1= (a&0x03030303UL)\
229 h1= ((a&0xFCFCFCFCUL)>>2)\
230 + ((b&0xFCFCFCFCUL)>>2);\
231 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
234 a= AV_RN32(pixels );\
235 b= AV_RN32(pixels+1);\
236 l0= (a&0x03030303UL)\
239 h0= ((a&0xFCFCFCFCUL)>>2)\
240 + ((b&0xFCFCFCFCUL)>>2);\
241 OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
245 pixels+=4-line_size*(h+1);\
246 block +=4-line_size*h;\
250 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_xy2), FUNCC(OPNAME ## _pixels8_xy2), 8*sizeof(pixel))\
252 #define op_avg(a, b) a = rnd_avg_pixel4(a, b)
253 #define op_put(a, b) a = b
255 #define put_no_rnd_pixels8_8_c put_pixels8_8_c