]> git.sesse.net Git - ffmpeg/blob - libavcodec/x86/h264dsp_mmx.c
h264dec: h264: 4:2:2 intra decoding
[ffmpeg] / libavcodec / x86 / h264dsp_mmx.c
1 /*
2  * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include "libavutil/cpu.h"
22 #include "libavutil/x86_cpu.h"
23 #include "libavcodec/h264dsp.h"
24 #include "dsputil_mmx.h"
25
26 DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1  ) = 0x0103010301030103ULL;
27
28 /***********************************/
29 /* IDCT */
30 #define IDCT_ADD_FUNC(NUM, DEPTH, OPT) \
31 void ff_h264_idct ## NUM ## _add_ ## DEPTH ## _ ## OPT (uint8_t *dst, int16_t *block, int stride);
32
33 IDCT_ADD_FUNC(, 8, mmx)
34 IDCT_ADD_FUNC(, 10, sse2)
35 IDCT_ADD_FUNC(_dc, 8, mmx2)
36 IDCT_ADD_FUNC(_dc, 10, mmx2)
37 IDCT_ADD_FUNC(8_dc, 8, mmx2)
38 IDCT_ADD_FUNC(8_dc, 10, sse2)
39 IDCT_ADD_FUNC(8, 8, mmx)
40 IDCT_ADD_FUNC(8, 8, sse2)
41 IDCT_ADD_FUNC(8, 10, sse2)
42 #if HAVE_AVX
43 IDCT_ADD_FUNC(, 10, avx)
44 IDCT_ADD_FUNC(8_dc, 10, avx)
45 IDCT_ADD_FUNC(8, 10, avx)
46 #endif
47
48
49 #define IDCT_ADD_REP_FUNC(NUM, REP, DEPTH, OPT) \
50 void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
51                               (uint8_t *dst, const int *block_offset, \
52                               DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
53
54 IDCT_ADD_REP_FUNC(8, 4, 8, mmx)
55 IDCT_ADD_REP_FUNC(8, 4, 8, mmx2)
56 IDCT_ADD_REP_FUNC(8, 4, 8, sse2)
57 IDCT_ADD_REP_FUNC(8, 4, 10, sse2)
58 IDCT_ADD_REP_FUNC(8, 4, 10, avx)
59 IDCT_ADD_REP_FUNC(, 16, 8, mmx)
60 IDCT_ADD_REP_FUNC(, 16, 8, mmx2)
61 IDCT_ADD_REP_FUNC(, 16, 8, sse2)
62 IDCT_ADD_REP_FUNC(, 16, 10, sse2)
63 IDCT_ADD_REP_FUNC(, 16intra, 8, mmx)
64 IDCT_ADD_REP_FUNC(, 16intra, 8, mmx2)
65 IDCT_ADD_REP_FUNC(, 16intra, 8, sse2)
66 IDCT_ADD_REP_FUNC(, 16intra, 10, sse2)
67 #if HAVE_AVX
68 IDCT_ADD_REP_FUNC(, 16, 10, avx)
69 IDCT_ADD_REP_FUNC(, 16intra, 10, avx)
70 #endif
71
72
73 #define IDCT_ADD_REP_FUNC2(NUM, REP, DEPTH, OPT) \
74 void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
75                               (uint8_t **dst, const int *block_offset, \
76                               DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
77 IDCT_ADD_REP_FUNC2(, 8, 8, mmx)
78 IDCT_ADD_REP_FUNC2(, 8, 8, mmx2)
79 IDCT_ADD_REP_FUNC2(, 8, 8, sse2)
80 IDCT_ADD_REP_FUNC2(, 8, 10, sse2)
81 #if HAVE_AVX
82 IDCT_ADD_REP_FUNC2(, 8, 10, avx)
83 #endif
84
85 void ff_h264_luma_dc_dequant_idct_mmx (DCTELEM *output, DCTELEM *input, int qmul);
86 void ff_h264_luma_dc_dequant_idct_sse2(DCTELEM *output, DCTELEM *input, int qmul);
87
88 /***********************************/
89 /* deblocking */
90
91 #define h264_loop_filter_strength_iteration_mmx2(bS, nz, ref, mv, bidir, edges, step, mask_mv, dir, d_idx, mask_dir) \
92     do { \
93         x86_reg b_idx; \
94         mask_mv <<= 3; \
95         for( b_idx=0; b_idx<edges; b_idx+=step ) { \
96             if (!mask_dir) \
97             __asm__ volatile( \
98                     "pxor %%mm0, %%mm0 \n\t" \
99                     :: \
100             ); \
101             if(!(mask_mv & b_idx)) { \
102                 if(bidir) { \
103                     __asm__ volatile( \
104                         "movd         %a3(%0,%2), %%mm2 \n" \
105                         "punpckldq    %a4(%0,%2), %%mm2 \n" /* { ref0[bn], ref1[bn] } */ \
106                         "pshufw $0x44, 12(%0,%2), %%mm0 \n" /* { ref0[b], ref0[b] } */ \
107                         "pshufw $0x44, 52(%0,%2), %%mm1 \n" /* { ref1[b], ref1[b] } */ \
108                         "pshufw $0x4E, %%mm2, %%mm3 \n" \
109                         "psubb         %%mm2, %%mm0 \n" /* { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] } */ \
110                         "psubb         %%mm3, %%mm1 \n" /* { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] } */ \
111  \
112                         "por           %%mm1, %%mm0 \n" \
113                         "movq   %a5(%1,%2,4), %%mm1 \n" \
114                         "movq   %a6(%1,%2,4), %%mm2 \n" \
115                         "movq          %%mm1, %%mm3 \n" \
116                         "movq          %%mm2, %%mm4 \n" \
117                         "psubw   48(%1,%2,4), %%mm1 \n" \
118                         "psubw   56(%1,%2,4), %%mm2 \n" \
119                         "psubw  208(%1,%2,4), %%mm3 \n" \
120                         "psubw  216(%1,%2,4), %%mm4 \n" \
121                         "packsswb      %%mm2, %%mm1 \n" \
122                         "packsswb      %%mm4, %%mm3 \n" \
123                         "paddb         %%mm6, %%mm1 \n" \
124                         "paddb         %%mm6, %%mm3 \n" \
125                         "psubusb       %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
126                         "psubusb       %%mm5, %%mm3 \n" \
127                         "packsswb      %%mm3, %%mm1 \n" \
128  \
129                         "por           %%mm1, %%mm0 \n" \
130                         "movq   %a7(%1,%2,4), %%mm1 \n" \
131                         "movq   %a8(%1,%2,4), %%mm2 \n" \
132                         "movq          %%mm1, %%mm3 \n" \
133                         "movq          %%mm2, %%mm4 \n" \
134                         "psubw   48(%1,%2,4), %%mm1 \n" \
135                         "psubw   56(%1,%2,4), %%mm2 \n" \
136                         "psubw  208(%1,%2,4), %%mm3 \n" \
137                         "psubw  216(%1,%2,4), %%mm4 \n" \
138                         "packsswb      %%mm2, %%mm1 \n" \
139                         "packsswb      %%mm4, %%mm3 \n" \
140                         "paddb         %%mm6, %%mm1 \n" \
141                         "paddb         %%mm6, %%mm3 \n" \
142                         "psubusb       %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
143                         "psubusb       %%mm5, %%mm3 \n" \
144                         "packsswb      %%mm3, %%mm1 \n" \
145  \
146                         "pshufw $0x4E, %%mm1, %%mm1 \n" \
147                         "por           %%mm1, %%mm0 \n" \
148                         "pshufw $0x4E, %%mm0, %%mm1 \n" \
149                         "pminub        %%mm1, %%mm0 \n" \
150                         ::"r"(ref), \
151                           "r"(mv), \
152                           "r"(b_idx), \
153                           "i"(d_idx+12), \
154                           "i"(d_idx+52), \
155                           "i"(d_idx*4+48), \
156                           "i"(d_idx*4+56), \
157                           "i"(d_idx*4+208), \
158                           "i"(d_idx*4+216) \
159                     ); \
160                 } else { \
161                     __asm__ volatile( \
162                         "movd   12(%0,%2), %%mm0 \n" \
163                         "psubb %a3(%0,%2), %%mm0 \n" /* ref[b] != ref[bn] */ \
164                         "movq   48(%1,%2,4), %%mm1 \n" \
165                         "movq   56(%1,%2,4), %%mm2 \n" \
166                         "psubw %a4(%1,%2,4), %%mm1 \n" \
167                         "psubw %a5(%1,%2,4), %%mm2 \n" \
168                         "packsswb   %%mm2, %%mm1 \n" \
169                         "paddb      %%mm6, %%mm1 \n" \
170                         "psubusb    %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
171                         "packsswb   %%mm1, %%mm1 \n" \
172                         "por        %%mm1, %%mm0 \n" \
173                         ::"r"(ref), \
174                           "r"(mv), \
175                           "r"(b_idx), \
176                           "i"(d_idx+12), \
177                           "i"(d_idx*4+48), \
178                           "i"(d_idx*4+56) \
179                     ); \
180                 } \
181             } \
182             __asm__ volatile( \
183                 "movd 12(%0,%1), %%mm1 \n" \
184                 "por %a2(%0,%1), %%mm1 \n" /* nnz[b] || nnz[bn] */ \
185                 ::"r"(nnz), \
186                   "r"(b_idx), \
187                   "i"(d_idx+12) \
188             ); \
189             __asm__ volatile( \
190                 "pminub    %%mm7, %%mm1 \n" \
191                 "pminub    %%mm7, %%mm0 \n" \
192                 "psllw        $1, %%mm1 \n" \
193                 "pxor      %%mm2, %%mm2 \n" \
194                 "pmaxub    %%mm0, %%mm1 \n" \
195                 "punpcklbw %%mm2, %%mm1 \n" \
196                 "movq      %%mm1, %a1(%0,%2) \n" \
197                 ::"r"(bS), \
198                   "i"(32*dir), \
199                   "r"(b_idx) \
200                 :"memory" \
201             ); \
202         } \
203     } while (0)
204
205 static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
206                                             int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
207     __asm__ volatile(
208         "movq %0, %%mm7 \n"
209         "movq %1, %%mm6 \n"
210         ::"m"(ff_pb_1), "m"(ff_pb_3)
211     );
212     if(field)
213         __asm__ volatile(
214             "movq %0, %%mm6 \n"
215             ::"m"(ff_pb_3_1)
216         );
217     __asm__ volatile(
218         "movq  %%mm6, %%mm5 \n"
219         "paddb %%mm5, %%mm5 \n"
220     :);
221
222     // could do a special case for dir==0 && edges==1, but it only reduces the
223     // average filter time by 1.2%
224     step  <<= 3;
225     edges <<= 3;
226     h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1, -8,  0);
227     h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir,    32,    8, mask_mv0, 0, -1, -1);
228
229     __asm__ volatile(
230         "movq   (%0), %%mm0 \n\t"
231         "movq  8(%0), %%mm1 \n\t"
232         "movq 16(%0), %%mm2 \n\t"
233         "movq 24(%0), %%mm3 \n\t"
234         TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
235         "movq %%mm0,   (%0) \n\t"
236         "movq %%mm3,  8(%0) \n\t"
237         "movq %%mm4, 16(%0) \n\t"
238         "movq %%mm2, 24(%0) \n\t"
239         ::"r"(bS[0])
240         :"memory"
241     );
242 }
243
244 #define LF_FUNC(DIR, TYPE, DEPTH, OPT) \
245 void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
246                                                                 int alpha, int beta, int8_t *tc0);
247 #define LF_IFUNC(DIR, TYPE, DEPTH, OPT) \
248 void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
249                                                                 int alpha, int beta);
250
251 #define LF_FUNCS(type, depth)\
252 LF_FUNC (h,  chroma,       depth, mmxext)\
253 LF_IFUNC(h,  chroma_intra, depth, mmxext)\
254 LF_FUNC (v,  chroma,       depth, mmxext)\
255 LF_IFUNC(v,  chroma_intra, depth, mmxext)\
256 LF_FUNC (h,  luma,         depth, mmxext)\
257 LF_IFUNC(h,  luma_intra,   depth, mmxext)\
258 LF_FUNC (h,  luma,         depth, sse2)\
259 LF_IFUNC(h,  luma_intra,   depth, sse2)\
260 LF_FUNC (v,  luma,         depth, sse2)\
261 LF_IFUNC(v,  luma_intra,   depth, sse2)\
262 LF_FUNC (h,  chroma,       depth, sse2)\
263 LF_IFUNC(h,  chroma_intra, depth, sse2)\
264 LF_FUNC (v,  chroma,       depth, sse2)\
265 LF_IFUNC(v,  chroma_intra, depth, sse2)\
266 LF_FUNC (h,  luma,         depth,  avx)\
267 LF_IFUNC(h,  luma_intra,   depth,  avx)\
268 LF_FUNC (v,  luma,         depth,  avx)\
269 LF_IFUNC(v,  luma_intra,   depth,  avx)\
270 LF_FUNC (h,  chroma,       depth,  avx)\
271 LF_IFUNC(h,  chroma_intra, depth,  avx)\
272 LF_FUNC (v,  chroma,       depth,  avx)\
273 LF_IFUNC(v,  chroma_intra, depth,  avx)
274
275 LF_FUNCS( uint8_t,  8)
276 LF_FUNCS(uint16_t, 10)
277
278 #if ARCH_X86_32
279 LF_FUNC (v8, luma,             8, mmxext)
280 static void ff_deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
281 {
282     if((tc0[0] & tc0[1]) >= 0)
283         ff_deblock_v8_luma_8_mmxext(pix+0, stride, alpha, beta, tc0);
284     if((tc0[2] & tc0[3]) >= 0)
285         ff_deblock_v8_luma_8_mmxext(pix+8, stride, alpha, beta, tc0+2);
286 }
287 LF_IFUNC(v8, luma_intra,        8, mmxext)
288 static void ff_deblock_v_luma_intra_8_mmxext(uint8_t *pix, int stride, int alpha, int beta)
289 {
290     ff_deblock_v8_luma_intra_8_mmxext(pix+0, stride, alpha, beta);
291     ff_deblock_v8_luma_intra_8_mmxext(pix+8, stride, alpha, beta);
292 }
293 #endif /* ARCH_X86_32 */
294
295 LF_FUNC (v,  luma,            10, mmxext)
296 LF_IFUNC(v,  luma_intra,      10, mmxext)
297
298 /***********************************/
299 /* weighted prediction */
300
301 #define H264_WEIGHT(W, H, OPT) \
302 void ff_h264_weight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
303     int stride, int log2_denom, int weight, int offset);
304
305 #define H264_BIWEIGHT(W, H, OPT) \
306 void ff_h264_biweight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
307     uint8_t *src, int stride, int log2_denom, int weightd, \
308     int weights, int offset);
309
310 #define H264_BIWEIGHT_MMX(W,H) \
311 H264_WEIGHT  (W, H, mmx2) \
312 H264_BIWEIGHT(W, H, mmx2)
313
314 #define H264_BIWEIGHT_MMX_SSE(W,H) \
315 H264_BIWEIGHT_MMX(W, H) \
316 H264_WEIGHT      (W, H, sse2) \
317 H264_BIWEIGHT    (W, H, sse2) \
318 H264_BIWEIGHT    (W, H, ssse3)
319
320 H264_BIWEIGHT_MMX_SSE(16, 16)
321 H264_BIWEIGHT_MMX_SSE(16,  8)
322 H264_BIWEIGHT_MMX_SSE( 8, 16)
323 H264_BIWEIGHT_MMX_SSE( 8,  8)
324 H264_BIWEIGHT_MMX_SSE( 8,  4)
325 H264_BIWEIGHT_MMX    ( 4,  8)
326 H264_BIWEIGHT_MMX    ( 4,  4)
327 H264_BIWEIGHT_MMX    ( 4,  2)
328
329 #define H264_WEIGHT_10(W, H, DEPTH, OPT) \
330 void ff_h264_weight_ ## W ## x ## H ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \
331     int stride, int log2_denom, int weight, int offset);
332
333 #define H264_BIWEIGHT_10(W, H, DEPTH, OPT) \
334 void ff_h264_biweight_ ## W ## x ## H ## _ ## DEPTH ## _ ## OPT \
335     (uint8_t *dst, uint8_t *src, int stride, int log2_denom, \
336      int weightd, int weights, int offset);
337
338 #define H264_BIWEIGHT_10_SSE(W, H, DEPTH) \
339 H264_WEIGHT_10  (W, H, DEPTH, sse2) \
340 H264_WEIGHT_10  (W, H, DEPTH, sse4) \
341 H264_BIWEIGHT_10(W, H, DEPTH, sse2) \
342 H264_BIWEIGHT_10(W, H, DEPTH, sse4)
343
344 H264_BIWEIGHT_10_SSE(16, 16, 10)
345 H264_BIWEIGHT_10_SSE(16,  8, 10)
346 H264_BIWEIGHT_10_SSE( 8, 16, 10)
347 H264_BIWEIGHT_10_SSE( 8,  8, 10)
348 H264_BIWEIGHT_10_SSE( 8,  4, 10)
349 H264_BIWEIGHT_10_SSE( 4,  8, 10)
350 H264_BIWEIGHT_10_SSE( 4,  4, 10)
351 H264_BIWEIGHT_10_SSE( 4,  2, 10)
352
353 void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
354 {
355     int mm_flags = av_get_cpu_flags();
356
357     if (mm_flags & AV_CPU_FLAG_MMX2) {
358         c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
359     }
360
361     if (bit_depth == 8) {
362 #if HAVE_YASM
363     if (mm_flags & AV_CPU_FLAG_MMX) {
364         c->h264_idct_dc_add         =
365         c->h264_idct_add            = ff_h264_idct_add_8_mmx;
366         c->h264_idct8_dc_add        =
367         c->h264_idct8_add           = ff_h264_idct8_add_8_mmx;
368
369         c->h264_idct_add16          = ff_h264_idct_add16_8_mmx;
370         c->h264_idct8_add4          = ff_h264_idct8_add4_8_mmx;
371         if (chroma_format_idc == 1)
372             c->h264_idct_add8       = ff_h264_idct_add8_8_mmx;
373         c->h264_idct_add16intra     = ff_h264_idct_add16intra_8_mmx;
374         c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_mmx;
375
376         if (mm_flags & AV_CPU_FLAG_MMX2) {
377             c->h264_idct_dc_add    = ff_h264_idct_dc_add_8_mmx2;
378             c->h264_idct8_dc_add   = ff_h264_idct8_dc_add_8_mmx2;
379             c->h264_idct_add16     = ff_h264_idct_add16_8_mmx2;
380             c->h264_idct8_add4     = ff_h264_idct8_add4_8_mmx2;
381             if (chroma_format_idc == 1)
382                 c->h264_idct_add8  = ff_h264_idct_add8_8_mmx2;
383             c->h264_idct_add16intra= ff_h264_idct_add16intra_8_mmx2;
384
385             c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_8_mmxext;
386             c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_8_mmxext;
387             if (chroma_format_idc == 1) {
388                 c->h264_h_loop_filter_chroma= ff_deblock_h_chroma_8_mmxext;
389                 c->h264_h_loop_filter_chroma_intra= ff_deblock_h_chroma_intra_8_mmxext;
390             }
391 #if ARCH_X86_32
392             c->h264_v_loop_filter_luma= ff_deblock_v_luma_8_mmxext;
393             c->h264_h_loop_filter_luma= ff_deblock_h_luma_8_mmxext;
394             c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_mmxext;
395             c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmxext;
396 #endif
397             c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
398             c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
399             c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
400             c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
401             c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
402             c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
403             c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
404             c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
405
406             c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
407             c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
408             c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
409             c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
410             c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
411             c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
412             c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
413             c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
414
415             if (mm_flags&AV_CPU_FLAG_SSE2) {
416                 c->h264_idct8_add           = ff_h264_idct8_add_8_sse2;
417
418                 c->h264_idct_add16          = ff_h264_idct_add16_8_sse2;
419                 c->h264_idct8_add4          = ff_h264_idct8_add4_8_sse2;
420                 if (chroma_format_idc == 1)
421                     c->h264_idct_add8       = ff_h264_idct_add8_8_sse2;
422                 c->h264_idct_add16intra     = ff_h264_idct_add16intra_8_sse2;
423                 c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_sse2;
424
425                 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_sse2;
426                 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_sse2;
427                 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_sse2;
428                 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_sse2;
429                 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_sse2;
430
431                 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_sse2;
432                 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_sse2;
433                 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_sse2;
434                 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_sse2;
435                 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_sse2;
436
437 #if HAVE_ALIGNED_STACK
438                 c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_sse2;
439                 c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_sse2;
440                 c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2;
441                 c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2;
442 #endif
443             }
444             if (mm_flags&AV_CPU_FLAG_SSSE3) {
445                 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3;
446                 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_ssse3;
447                 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_ssse3;
448                 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3;
449                 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_ssse3;
450             }
451             if (HAVE_AVX && mm_flags&AV_CPU_FLAG_AVX) {
452 #if HAVE_ALIGNED_STACK
453                 c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_avx;
454                 c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_avx;
455                 c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_avx;
456                 c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_avx;
457 #endif
458             }
459         }
460     }
461 #endif
462     } else if (bit_depth == 10) {
463 #if HAVE_YASM
464     if (mm_flags & AV_CPU_FLAG_MMX) {
465         if (mm_flags & AV_CPU_FLAG_MMX2) {
466 #if ARCH_X86_32
467             c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_mmxext;
468             c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_mmxext;
469             c->h264_v_loop_filter_luma= ff_deblock_v_luma_10_mmxext;
470             c->h264_h_loop_filter_luma= ff_deblock_h_luma_10_mmxext;
471             c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_mmxext;
472             c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_mmxext;
473 #endif
474             c->h264_idct_dc_add= ff_h264_idct_dc_add_10_mmx2;
475             if (mm_flags&AV_CPU_FLAG_SSE2) {
476                 c->h264_idct_add       = ff_h264_idct_add_10_sse2;
477                 c->h264_idct8_dc_add   = ff_h264_idct8_dc_add_10_sse2;
478
479                 c->h264_idct_add16     = ff_h264_idct_add16_10_sse2;
480                 if (chroma_format_idc == 1)
481                     c->h264_idct_add8  = ff_h264_idct_add8_10_sse2;
482                 c->h264_idct_add16intra= ff_h264_idct_add16intra_10_sse2;
483 #if HAVE_ALIGNED_STACK
484                 c->h264_idct8_add      = ff_h264_idct8_add_10_sse2;
485                 c->h264_idct8_add4     = ff_h264_idct8_add4_10_sse2;
486 #endif
487
488                 c->weight_h264_pixels_tab[0] = ff_h264_weight_16x16_10_sse2;
489                 c->weight_h264_pixels_tab[1] = ff_h264_weight_16x8_10_sse2;
490                 c->weight_h264_pixels_tab[2] = ff_h264_weight_8x16_10_sse2;
491                 c->weight_h264_pixels_tab[3] = ff_h264_weight_8x8_10_sse2;
492                 c->weight_h264_pixels_tab[4] = ff_h264_weight_8x4_10_sse2;
493                 c->weight_h264_pixels_tab[5] = ff_h264_weight_4x8_10_sse2;
494                 c->weight_h264_pixels_tab[6] = ff_h264_weight_4x4_10_sse2;
495                 c->weight_h264_pixels_tab[7] = ff_h264_weight_4x2_10_sse2;
496
497                 c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16x16_10_sse2;
498                 c->biweight_h264_pixels_tab[1] = ff_h264_biweight_16x8_10_sse2;
499                 c->biweight_h264_pixels_tab[2] = ff_h264_biweight_8x16_10_sse2;
500                 c->biweight_h264_pixels_tab[3] = ff_h264_biweight_8x8_10_sse2;
501                 c->biweight_h264_pixels_tab[4] = ff_h264_biweight_8x4_10_sse2;
502                 c->biweight_h264_pixels_tab[5] = ff_h264_biweight_4x8_10_sse2;
503                 c->biweight_h264_pixels_tab[6] = ff_h264_biweight_4x4_10_sse2;
504                 c->biweight_h264_pixels_tab[7] = ff_h264_biweight_4x2_10_sse2;
505
506                 c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_sse2;
507                 c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_sse2;
508 #if HAVE_ALIGNED_STACK
509                 c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_sse2;
510                 c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_sse2;
511                 c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_sse2;
512                 c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2;
513 #endif
514             }
515             if (mm_flags&AV_CPU_FLAG_SSE4) {
516                 c->weight_h264_pixels_tab[0] = ff_h264_weight_16x16_10_sse4;
517                 c->weight_h264_pixels_tab[1] = ff_h264_weight_16x8_10_sse4;
518                 c->weight_h264_pixels_tab[2] = ff_h264_weight_8x16_10_sse4;
519                 c->weight_h264_pixels_tab[3] = ff_h264_weight_8x8_10_sse4;
520                 c->weight_h264_pixels_tab[4] = ff_h264_weight_8x4_10_sse4;
521                 c->weight_h264_pixels_tab[5] = ff_h264_weight_4x8_10_sse4;
522                 c->weight_h264_pixels_tab[6] = ff_h264_weight_4x4_10_sse4;
523                 c->weight_h264_pixels_tab[7] = ff_h264_weight_4x2_10_sse4;
524
525                 c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16x16_10_sse4;
526                 c->biweight_h264_pixels_tab[1] = ff_h264_biweight_16x8_10_sse4;
527                 c->biweight_h264_pixels_tab[2] = ff_h264_biweight_8x16_10_sse4;
528                 c->biweight_h264_pixels_tab[3] = ff_h264_biweight_8x8_10_sse4;
529                 c->biweight_h264_pixels_tab[4] = ff_h264_biweight_8x4_10_sse4;
530                 c->biweight_h264_pixels_tab[5] = ff_h264_biweight_4x8_10_sse4;
531                 c->biweight_h264_pixels_tab[6] = ff_h264_biweight_4x4_10_sse4;
532                 c->biweight_h264_pixels_tab[7] = ff_h264_biweight_4x2_10_sse4;
533             }
534 #if HAVE_AVX
535             if (mm_flags&AV_CPU_FLAG_AVX) {
536                 c->h264_idct_dc_add    =
537                 c->h264_idct_add       = ff_h264_idct_add_10_avx;
538                 c->h264_idct8_dc_add   = ff_h264_idct8_dc_add_10_avx;
539
540                 c->h264_idct_add16     = ff_h264_idct_add16_10_avx;
541                 if (chroma_format_idc == 1)
542                     c->h264_idct_add8  = ff_h264_idct_add8_10_avx;
543                 c->h264_idct_add16intra= ff_h264_idct_add16intra_10_avx;
544 #if HAVE_ALIGNED_STACK
545                 c->h264_idct8_add      = ff_h264_idct8_add_10_avx;
546                 c->h264_idct8_add4     = ff_h264_idct8_add4_10_avx;
547 #endif
548
549                 c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_avx;
550                 c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_avx;
551 #if HAVE_ALIGNED_STACK
552                 c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_avx;
553                 c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_avx;
554                 c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_avx;
555                 c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;
556 #endif
557             }
558 #endif /* HAVE_AVX */
559         }
560     }
561 #endif
562     }
563 }