]> git.sesse.net Git - ffmpeg/blob - libavcodec/h264pred.c
swscale: add gray14 support
[ffmpeg] / libavcodec / h264pred.c
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 prediction functions.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27
28 #include "libavutil/attributes.h"
29 #include "libavutil/avassert.h"
30 #include "libavutil/intreadwrite.h"
31 #include "avcodec.h"
32 #include "h264pred.h"
33
34 #define BIT_DEPTH 8
35 #include "h264pred_template.c"
36 #undef BIT_DEPTH
37
38 #define BIT_DEPTH 9
39 #include "h264pred_template.c"
40 #undef BIT_DEPTH
41
42 #define BIT_DEPTH 10
43 #include "h264pred_template.c"
44 #undef BIT_DEPTH
45
46 #define BIT_DEPTH 12
47 #include "h264pred_template.c"
48 #undef BIT_DEPTH
49
50 #define BIT_DEPTH 14
51 #include "h264pred_template.c"
52 #undef BIT_DEPTH
53
54 static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright,
55                                    ptrdiff_t stride)
56 {
57     const unsigned lt = src[-1-1*stride];
58     LOAD_TOP_EDGE
59     LOAD_TOP_RIGHT_EDGE
60     uint32_t v = PACK_4U8((lt + 2*t0 + t1 + 2) >> 2,
61                           (t0 + 2*t1 + t2 + 2) >> 2,
62                           (t1 + 2*t2 + t3 + 2) >> 2,
63                           (t2 + 2*t3 + t4 + 2) >> 2);
64
65     AV_WN32A(src+0*stride, v);
66     AV_WN32A(src+1*stride, v);
67     AV_WN32A(src+2*stride, v);
68     AV_WN32A(src+3*stride, v);
69 }
70
71 static void pred4x4_horizontal_vp8_c(uint8_t *src, const uint8_t *topright,
72                                      ptrdiff_t stride)
73 {
74     const unsigned lt = src[-1-1*stride];
75     LOAD_LEFT_EDGE
76
77     AV_WN32A(src+0*stride, ((lt + 2*l0 + l1 + 2) >> 2)*0x01010101);
78     AV_WN32A(src+1*stride, ((l0 + 2*l1 + l2 + 2) >> 2)*0x01010101);
79     AV_WN32A(src+2*stride, ((l1 + 2*l2 + l3 + 2) >> 2)*0x01010101);
80     AV_WN32A(src+3*stride, ((l2 + 2*l3 + l3 + 2) >> 2)*0x01010101);
81 }
82
83 static void pred4x4_down_left_svq3_c(uint8_t *src, const uint8_t *topright,
84                                      ptrdiff_t stride)
85 {
86     LOAD_TOP_EDGE
87     LOAD_LEFT_EDGE
88
89     src[0+0*stride]=(l1 + t1)>>1;
90     src[1+0*stride]=
91     src[0+1*stride]=(l2 + t2)>>1;
92     src[2+0*stride]=
93     src[1+1*stride]=
94     src[0+2*stride]=
95     src[3+0*stride]=
96     src[2+1*stride]=
97     src[1+2*stride]=
98     src[0+3*stride]=
99     src[3+1*stride]=
100     src[2+2*stride]=
101     src[1+3*stride]=
102     src[3+2*stride]=
103     src[2+3*stride]=
104     src[3+3*stride]=(l3 + t3)>>1;
105 }
106
107 static void pred4x4_down_left_rv40_c(uint8_t *src, const uint8_t *topright,
108                                      ptrdiff_t stride)
109 {
110     LOAD_TOP_EDGE
111     LOAD_TOP_RIGHT_EDGE
112     LOAD_LEFT_EDGE
113     LOAD_DOWN_LEFT_EDGE
114
115     src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
116     src[1+0*stride]=
117     src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
118     src[2+0*stride]=
119     src[1+1*stride]=
120     src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + l4 + 2*l3 + 2)>>3;
121     src[3+0*stride]=
122     src[2+1*stride]=
123     src[1+2*stride]=
124     src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3 + l5 + 2*l4 + 2)>>3;
125     src[3+1*stride]=
126     src[2+2*stride]=
127     src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l4 + l6 + 2*l5 + 2)>>3;
128     src[3+2*stride]=
129     src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l5 + l7 + 2*l6 + 2)>>3;
130     src[3+3*stride]=(t6 + t7 + 1 + l6 + l7 + 1)>>2;
131 }
132
133 static void pred4x4_down_left_rv40_nodown_c(uint8_t *src,
134                                             const uint8_t *topright,
135                                             ptrdiff_t stride)
136 {
137     LOAD_TOP_EDGE
138     LOAD_TOP_RIGHT_EDGE
139     LOAD_LEFT_EDGE
140
141     src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
142     src[1+0*stride]=
143     src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
144     src[2+0*stride]=
145     src[1+1*stride]=
146     src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + 3*l3 + 2)>>3;
147     src[3+0*stride]=
148     src[2+1*stride]=
149     src[1+2*stride]=
150     src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3*4 + 2)>>3;
151     src[3+1*stride]=
152     src[2+2*stride]=
153     src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l3*4 + 2)>>3;
154     src[3+2*stride]=
155     src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l3*4 + 2)>>3;
156     src[3+3*stride]=(t6 + t7 + 1 + 2*l3 + 1)>>2;
157 }
158
159 static void pred4x4_vertical_left_rv40(uint8_t *src, const uint8_t *topright,
160                                        ptrdiff_t stride,
161                                        const int l0, const int l1, const int l2,
162                                        const int l3, const int l4)
163 {
164     LOAD_TOP_EDGE
165     LOAD_TOP_RIGHT_EDGE
166
167     src[0+0*stride]=(2*t0 + 2*t1 + l1 + 2*l2 + l3 + 4)>>3;
168     src[1+0*stride]=
169     src[0+2*stride]=(t1 + t2 + 1)>>1;
170     src[2+0*stride]=
171     src[1+2*stride]=(t2 + t3 + 1)>>1;
172     src[3+0*stride]=
173     src[2+2*stride]=(t3 + t4+ 1)>>1;
174     src[3+2*stride]=(t4 + t5+ 1)>>1;
175     src[0+1*stride]=(t0 + 2*t1 + t2 + l2 + 2*l3 + l4 + 4)>>3;
176     src[1+1*stride]=
177     src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
178     src[2+1*stride]=
179     src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
180     src[3+1*stride]=
181     src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
182     src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
183 }
184
185 static void pred4x4_vertical_left_rv40_c(uint8_t *src, const uint8_t *topright,
186                                          ptrdiff_t stride)
187 {
188     LOAD_LEFT_EDGE
189     LOAD_DOWN_LEFT_EDGE
190
191     pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l4);
192 }
193
194 static void pred4x4_vertical_left_rv40_nodown_c(uint8_t *src,
195                                                 const uint8_t *topright,
196                                                 ptrdiff_t stride)
197 {
198     LOAD_LEFT_EDGE
199
200     pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l3);
201 }
202
203 static void pred4x4_vertical_left_vp8_c(uint8_t *src, const uint8_t *topright,
204                                         ptrdiff_t stride)
205 {
206     LOAD_TOP_EDGE
207     LOAD_TOP_RIGHT_EDGE
208
209     src[0+0*stride]=(t0 + t1 + 1)>>1;
210     src[1+0*stride]=
211     src[0+2*stride]=(t1 + t2 + 1)>>1;
212     src[2+0*stride]=
213     src[1+2*stride]=(t2 + t3 + 1)>>1;
214     src[3+0*stride]=
215     src[2+2*stride]=(t3 + t4 + 1)>>1;
216     src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
217     src[1+1*stride]=
218     src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
219     src[2+1*stride]=
220     src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
221     src[3+1*stride]=
222     src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
223     src[3+2*stride]=(t4 + 2*t5 + t6 + 2)>>2;
224     src[3+3*stride]=(t5 + 2*t6 + t7 + 2)>>2;
225 }
226
227 static void pred4x4_horizontal_up_rv40_c(uint8_t *src, const uint8_t *topright,
228                                          ptrdiff_t stride)
229 {
230     LOAD_LEFT_EDGE
231     LOAD_DOWN_LEFT_EDGE
232     LOAD_TOP_EDGE
233     LOAD_TOP_RIGHT_EDGE
234
235     src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
236     src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
237     src[2+0*stride]=
238     src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
239     src[3+0*stride]=
240     src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
241     src[2+1*stride]=
242     src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
243     src[3+1*stride]=
244     src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
245     src[3+2*stride]=
246     src[1+3*stride]=(l3 + 2*l4 + l5 + 2)>>2;
247     src[0+3*stride]=
248     src[2+2*stride]=(t6 + t7 + l3 + l4 + 2)>>2;
249     src[2+3*stride]=(l4 + l5 + 1)>>1;
250     src[3+3*stride]=(l4 + 2*l5 + l6 + 2)>>2;
251 }
252
253 static void pred4x4_horizontal_up_rv40_nodown_c(uint8_t *src,
254                                                 const uint8_t *topright,
255                                                 ptrdiff_t stride)
256 {
257     LOAD_LEFT_EDGE
258     LOAD_TOP_EDGE
259     LOAD_TOP_RIGHT_EDGE
260
261     src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
262     src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
263     src[2+0*stride]=
264     src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
265     src[3+0*stride]=
266     src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
267     src[2+1*stride]=
268     src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
269     src[3+1*stride]=
270     src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
271     src[3+2*stride]=
272     src[1+3*stride]=l3;
273     src[0+3*stride]=
274     src[2+2*stride]=(t6 + t7 + 2*l3 + 2)>>2;
275     src[2+3*stride]=
276     src[3+3*stride]=l3;
277 }
278
279 static void pred4x4_tm_vp8_c(uint8_t *src, const uint8_t *topright,
280                              ptrdiff_t stride)
281 {
282     const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP - src[-1-stride];
283     uint8_t *top = src-stride;
284     int y;
285
286     for (y = 0; y < 4; y++) {
287         const uint8_t *cm_in = cm + src[-1];
288         src[0] = cm_in[top[0]];
289         src[1] = cm_in[top[1]];
290         src[2] = cm_in[top[2]];
291         src[3] = cm_in[top[3]];
292         src += stride;
293     }
294 }
295
296 static void pred16x16_plane_svq3_c(uint8_t *src, ptrdiff_t stride)
297 {
298     pred16x16_plane_compat_8_c(src, stride, 1, 0);
299 }
300
301 static void pred16x16_plane_rv40_c(uint8_t *src, ptrdiff_t stride)
302 {
303     pred16x16_plane_compat_8_c(src, stride, 0, 1);
304 }
305
306 static void pred16x16_tm_vp8_c(uint8_t *src, ptrdiff_t stride)
307 {
308     const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP - src[-1-stride];
309     uint8_t *top = src-stride;
310     int y;
311
312     for (y = 0; y < 16; y++) {
313         const uint8_t *cm_in = cm + src[-1];
314         src[0]  = cm_in[top[0]];
315         src[1]  = cm_in[top[1]];
316         src[2]  = cm_in[top[2]];
317         src[3]  = cm_in[top[3]];
318         src[4]  = cm_in[top[4]];
319         src[5]  = cm_in[top[5]];
320         src[6]  = cm_in[top[6]];
321         src[7]  = cm_in[top[7]];
322         src[8]  = cm_in[top[8]];
323         src[9]  = cm_in[top[9]];
324         src[10] = cm_in[top[10]];
325         src[11] = cm_in[top[11]];
326         src[12] = cm_in[top[12]];
327         src[13] = cm_in[top[13]];
328         src[14] = cm_in[top[14]];
329         src[15] = cm_in[top[15]];
330         src += stride;
331     }
332 }
333
334 static void pred8x8_left_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
335 {
336     int i;
337     unsigned dc0;
338
339     dc0=0;
340     for(i=0;i<8; i++)
341         dc0+= src[-1+i*stride];
342     dc0= 0x01010101*((dc0 + 4)>>3);
343
344     for(i=0; i<8; i++){
345         ((uint32_t*)(src+i*stride))[0]=
346         ((uint32_t*)(src+i*stride))[1]= dc0;
347     }
348 }
349
350 static void pred8x8_top_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
351 {
352     int i;
353     unsigned dc0;
354
355     dc0=0;
356     for(i=0;i<8; i++)
357         dc0+= src[i-stride];
358     dc0= 0x01010101*((dc0 + 4)>>3);
359
360     for(i=0; i<8; i++){
361         ((uint32_t*)(src+i*stride))[0]=
362         ((uint32_t*)(src+i*stride))[1]= dc0;
363     }
364 }
365
366 static void pred8x8_dc_rv40_c(uint8_t *src, ptrdiff_t stride)
367 {
368     int i;
369     unsigned dc0 = 0;
370
371     for(i=0;i<4; i++){
372         dc0+= src[-1+i*stride] + src[i-stride];
373         dc0+= src[4+i-stride];
374         dc0+= src[-1+(i+4)*stride];
375     }
376     dc0= 0x01010101*((dc0 + 8)>>4);
377
378     for(i=0; i<4; i++){
379         ((uint32_t*)(src+i*stride))[0]= dc0;
380         ((uint32_t*)(src+i*stride))[1]= dc0;
381     }
382     for(i=4; i<8; i++){
383         ((uint32_t*)(src+i*stride))[0]= dc0;
384         ((uint32_t*)(src+i*stride))[1]= dc0;
385     }
386 }
387
388 static void pred8x8_tm_vp8_c(uint8_t *src, ptrdiff_t stride)
389 {
390     const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP - src[-1-stride];
391     uint8_t *top = src-stride;
392     int y;
393
394     for (y = 0; y < 8; y++) {
395         const uint8_t *cm_in = cm + src[-1];
396         src[0] = cm_in[top[0]];
397         src[1] = cm_in[top[1]];
398         src[2] = cm_in[top[2]];
399         src[3] = cm_in[top[3]];
400         src[4] = cm_in[top[4]];
401         src[5] = cm_in[top[5]];
402         src[6] = cm_in[top[6]];
403         src[7] = cm_in[top[7]];
404         src += stride;
405     }
406 }
407
408 /**
409  * Set the intra prediction function pointers.
410  */
411 av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
412                                const int bit_depth,
413                                int chroma_format_idc)
414 {
415 #undef FUNC
416 #undef FUNCC
417 #define FUNC(a, depth) a ## _ ## depth
418 #define FUNCC(a, depth) a ## _ ## depth ## _c
419 #define FUNCD(a) a ## _c
420
421 #define H264_PRED(depth) \
422     if(codec_id != AV_CODEC_ID_RV40){\
423         if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\
424             h->pred4x4[VERT_PRED       ]= FUNCD(pred4x4_vertical_vp8);\
425             h->pred4x4[HOR_PRED        ]= FUNCD(pred4x4_horizontal_vp8);\
426         } else {\
427             h->pred4x4[VERT_PRED       ]= FUNCC(pred4x4_vertical          , depth);\
428             h->pred4x4[HOR_PRED        ]= FUNCC(pred4x4_horizontal        , depth);\
429         }\
430         h->pred4x4[DC_PRED             ]= FUNCC(pred4x4_dc                , depth);\
431         if(codec_id == AV_CODEC_ID_SVQ3)\
432             h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCD(pred4x4_down_left_svq3);\
433         else\
434             h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCC(pred4x4_down_left     , depth);\
435         h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right        , depth);\
436         h->pred4x4[VERT_RIGHT_PRED     ]= FUNCC(pred4x4_vertical_right    , depth);\
437         h->pred4x4[HOR_DOWN_PRED       ]= FUNCC(pred4x4_horizontal_down   , depth);\
438         if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\
439             h->pred4x4[VERT_LEFT_PRED  ]= FUNCD(pred4x4_vertical_left_vp8);\
440         } else\
441             h->pred4x4[VERT_LEFT_PRED  ]= FUNCC(pred4x4_vertical_left     , depth);\
442         h->pred4x4[HOR_UP_PRED         ]= FUNCC(pred4x4_horizontal_up     , depth);\
443         if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) {\
444             h->pred4x4[LEFT_DC_PRED    ]= FUNCC(pred4x4_left_dc           , depth);\
445             h->pred4x4[TOP_DC_PRED     ]= FUNCC(pred4x4_top_dc            , depth);\
446         } else {\
447             h->pred4x4[TM_VP8_PRED     ]= FUNCD(pred4x4_tm_vp8);\
448             h->pred4x4[DC_127_PRED     ]= FUNCC(pred4x4_127_dc            , depth);\
449             h->pred4x4[DC_129_PRED     ]= FUNCC(pred4x4_129_dc            , depth);\
450             h->pred4x4[VERT_VP8_PRED   ]= FUNCC(pred4x4_vertical          , depth);\
451             h->pred4x4[HOR_VP8_PRED    ]= FUNCC(pred4x4_horizontal        , depth);\
452         }\
453         if (codec_id != AV_CODEC_ID_VP8)\
454             h->pred4x4[DC_128_PRED     ]= FUNCC(pred4x4_128_dc            , depth);\
455     }else{\
456         h->pred4x4[VERT_PRED           ]= FUNCC(pred4x4_vertical          , depth);\
457         h->pred4x4[HOR_PRED            ]= FUNCC(pred4x4_horizontal        , depth);\
458         h->pred4x4[DC_PRED             ]= FUNCC(pred4x4_dc                , depth);\
459         h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCD(pred4x4_down_left_rv40);\
460         h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right        , depth);\
461         h->pred4x4[VERT_RIGHT_PRED     ]= FUNCC(pred4x4_vertical_right    , depth);\
462         h->pred4x4[HOR_DOWN_PRED       ]= FUNCC(pred4x4_horizontal_down   , depth);\
463         h->pred4x4[VERT_LEFT_PRED      ]= FUNCD(pred4x4_vertical_left_rv40);\
464         h->pred4x4[HOR_UP_PRED         ]= FUNCD(pred4x4_horizontal_up_rv40);\
465         h->pred4x4[LEFT_DC_PRED        ]= FUNCC(pred4x4_left_dc           , depth);\
466         h->pred4x4[TOP_DC_PRED         ]= FUNCC(pred4x4_top_dc            , depth);\
467         h->pred4x4[DC_128_PRED         ]= FUNCC(pred4x4_128_dc            , depth);\
468         h->pred4x4[DIAG_DOWN_LEFT_PRED_RV40_NODOWN]= FUNCD(pred4x4_down_left_rv40_nodown);\
469         h->pred4x4[HOR_UP_PRED_RV40_NODOWN]= FUNCD(pred4x4_horizontal_up_rv40_nodown);\
470         h->pred4x4[VERT_LEFT_PRED_RV40_NODOWN]= FUNCD(pred4x4_vertical_left_rv40_nodown);\
471     }\
472 \
473     h->pred8x8l[VERT_PRED           ]= FUNCC(pred8x8l_vertical            , depth);\
474     h->pred8x8l[HOR_PRED            ]= FUNCC(pred8x8l_horizontal          , depth);\
475     h->pred8x8l[DC_PRED             ]= FUNCC(pred8x8l_dc                  , depth);\
476     h->pred8x8l[DIAG_DOWN_LEFT_PRED ]= FUNCC(pred8x8l_down_left           , depth);\
477     h->pred8x8l[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred8x8l_down_right          , depth);\
478     h->pred8x8l[VERT_RIGHT_PRED     ]= FUNCC(pred8x8l_vertical_right      , depth);\
479     h->pred8x8l[HOR_DOWN_PRED       ]= FUNCC(pred8x8l_horizontal_down     , depth);\
480     h->pred8x8l[VERT_LEFT_PRED      ]= FUNCC(pred8x8l_vertical_left       , depth);\
481     h->pred8x8l[HOR_UP_PRED         ]= FUNCC(pred8x8l_horizontal_up       , depth);\
482     h->pred8x8l[LEFT_DC_PRED        ]= FUNCC(pred8x8l_left_dc             , depth);\
483     h->pred8x8l[TOP_DC_PRED         ]= FUNCC(pred8x8l_top_dc              , depth);\
484     h->pred8x8l[DC_128_PRED         ]= FUNCC(pred8x8l_128_dc              , depth);\
485 \
486     if (chroma_format_idc <= 1) {\
487         h->pred8x8[VERT_PRED8x8   ]= FUNCC(pred8x8_vertical               , depth);\
488         h->pred8x8[HOR_PRED8x8    ]= FUNCC(pred8x8_horizontal             , depth);\
489     } else {\
490         h->pred8x8[VERT_PRED8x8   ]= FUNCC(pred8x16_vertical              , depth);\
491         h->pred8x8[HOR_PRED8x8    ]= FUNCC(pred8x16_horizontal            , depth);\
492     }\
493     if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) {\
494         if (chroma_format_idc <= 1) {\
495             h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x8_plane                , depth);\
496         } else {\
497             h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x16_plane               , depth);\
498         }\
499     } else\
500         h->pred8x8[PLANE_PRED8x8]= FUNCD(pred8x8_tm_vp8);\
501     if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 && \
502         codec_id != AV_CODEC_ID_VP8) {\
503         if (chroma_format_idc <= 1) {\
504             h->pred8x8[DC_PRED8x8     ]= FUNCC(pred8x8_dc                     , depth);\
505             h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc                , depth);\
506             h->pred8x8[TOP_DC_PRED8x8 ]= FUNCC(pred8x8_top_dc                 , depth);\
507             h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_l0t, depth);\
508             h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_0lt, depth);\
509             h->pred8x8[ALZHEIMER_DC_L00_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_l00, depth);\
510             h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8 ]= FUNC(pred8x8_mad_cow_dc_0l0, depth);\
511         } else {\
512             h->pred8x8[DC_PRED8x8     ]= FUNCC(pred8x16_dc                    , depth);\
513             h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x16_left_dc               , depth);\
514             h->pred8x8[TOP_DC_PRED8x8 ]= FUNCC(pred8x16_top_dc                , depth);\
515             h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_l0t, depth);\
516             h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_0lt, depth);\
517             h->pred8x8[ALZHEIMER_DC_L00_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_l00, depth);\
518             h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8 ]= FUNC(pred8x16_mad_cow_dc_0l0, depth);\
519         }\
520     }else{\
521         h->pred8x8[DC_PRED8x8     ]= FUNCD(pred8x8_dc_rv40);\
522         h->pred8x8[LEFT_DC_PRED8x8]= FUNCD(pred8x8_left_dc_rv40);\
523         h->pred8x8[TOP_DC_PRED8x8 ]= FUNCD(pred8x8_top_dc_rv40);\
524         if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\
525             h->pred8x8[DC_127_PRED8x8]= FUNCC(pred8x8_127_dc              , depth);\
526             h->pred8x8[DC_129_PRED8x8]= FUNCC(pred8x8_129_dc              , depth);\
527         }\
528     }\
529     if (chroma_format_idc <= 1) {\
530         h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x8_128_dc                 , depth);\
531     } else {\
532         h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x16_128_dc                , depth);\
533     }\
534 \
535     h->pred16x16[DC_PRED8x8     ]= FUNCC(pred16x16_dc                     , depth);\
536     h->pred16x16[VERT_PRED8x8   ]= FUNCC(pred16x16_vertical               , depth);\
537     h->pred16x16[HOR_PRED8x8    ]= FUNCC(pred16x16_horizontal             , depth);\
538     switch(codec_id){\
539     case AV_CODEC_ID_SVQ3:\
540        h->pred16x16[PLANE_PRED8x8  ]= FUNCD(pred16x16_plane_svq3);\
541        break;\
542     case AV_CODEC_ID_RV40:\
543        h->pred16x16[PLANE_PRED8x8  ]= FUNCD(pred16x16_plane_rv40);\
544        break;\
545     case AV_CODEC_ID_VP7:\
546     case AV_CODEC_ID_VP8:\
547        h->pred16x16[PLANE_PRED8x8  ]= FUNCD(pred16x16_tm_vp8);\
548        h->pred16x16[DC_127_PRED8x8]= FUNCC(pred16x16_127_dc               , depth);\
549        h->pred16x16[DC_129_PRED8x8]= FUNCC(pred16x16_129_dc               , depth);\
550        break;\
551     default:\
552        h->pred16x16[PLANE_PRED8x8  ]= FUNCC(pred16x16_plane               , depth);\
553        break;\
554     }\
555     h->pred16x16[LEFT_DC_PRED8x8]= FUNCC(pred16x16_left_dc                , depth);\
556     h->pred16x16[TOP_DC_PRED8x8 ]= FUNCC(pred16x16_top_dc                 , depth);\
557     h->pred16x16[DC_128_PRED8x8 ]= FUNCC(pred16x16_128_dc                 , depth);\
558 \
559     /* special lossless h/v prediction for H.264 */ \
560     h->pred4x4_add  [VERT_PRED   ]= FUNCC(pred4x4_vertical_add            , depth);\
561     h->pred4x4_add  [ HOR_PRED   ]= FUNCC(pred4x4_horizontal_add          , depth);\
562     h->pred8x8l_add [VERT_PRED   ]= FUNCC(pred8x8l_vertical_add           , depth);\
563     h->pred8x8l_add [ HOR_PRED   ]= FUNCC(pred8x8l_horizontal_add         , depth);\
564     h->pred8x8l_filter_add [VERT_PRED   ]= FUNCC(pred8x8l_vertical_filter_add           , depth);\
565     h->pred8x8l_filter_add [ HOR_PRED   ]= FUNCC(pred8x8l_horizontal_filter_add         , depth);\
566     if (chroma_format_idc <= 1) {\
567     h->pred8x8_add  [VERT_PRED8x8]= FUNCC(pred8x8_vertical_add            , depth);\
568     h->pred8x8_add  [ HOR_PRED8x8]= FUNCC(pred8x8_horizontal_add          , depth);\
569     } else {\
570         h->pred8x8_add  [VERT_PRED8x8]= FUNCC(pred8x16_vertical_add            , depth);\
571         h->pred8x8_add  [ HOR_PRED8x8]= FUNCC(pred8x16_horizontal_add          , depth);\
572     }\
573     h->pred16x16_add[VERT_PRED8x8]= FUNCC(pred16x16_vertical_add          , depth);\
574     h->pred16x16_add[ HOR_PRED8x8]= FUNCC(pred16x16_horizontal_add        , depth);\
575
576     switch (bit_depth) {
577         case 9:
578             H264_PRED(9)
579             break;
580         case 10:
581             H264_PRED(10)
582             break;
583         case 12:
584             H264_PRED(12)
585             break;
586         case 14:
587             H264_PRED(14)
588             break;
589         default:
590             av_assert0(bit_depth<=8);
591             H264_PRED(8)
592             break;
593     }
594
595     if (ARCH_AARCH64)
596         ff_h264_pred_init_aarch64(h, codec_id, bit_depth, chroma_format_idc);
597     if (ARCH_ARM)
598         ff_h264_pred_init_arm(h, codec_id, bit_depth, chroma_format_idc);
599     if (ARCH_X86)
600         ff_h264_pred_init_x86(h, codec_id, bit_depth, chroma_format_idc);
601     if (ARCH_MIPS)
602         ff_h264_pred_init_mips(h, codec_id, bit_depth, chroma_format_idc);
603 }