]> git.sesse.net Git - ffmpeg/blob - libavcodec/x86/h264dsp_mmx.c
Merge remote-tracking branch 'qatar/master'
[ffmpeg] / libavcodec / x86 / h264dsp_mmx.c
1 /*
2  * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include "libavutil/cpu.h"
22 #include "libavutil/x86_cpu.h"
23 #include "libavcodec/h264dsp.h"
24 #include "dsputil_mmx.h"
25
26 DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1  ) = 0x0103010301030103ULL;
27
28 /***********************************/
29 /* IDCT */
30 #define IDCT_ADD_FUNC(NUM, DEPTH, OPT) \
31 void ff_h264_idct ## NUM ## _add_ ## DEPTH ## _ ## OPT (uint8_t *dst, int16_t *block, int stride);
32
33 IDCT_ADD_FUNC(, 8, mmx)
34 IDCT_ADD_FUNC(, 10, sse2)
35 IDCT_ADD_FUNC(_dc, 8, mmx2)
36 IDCT_ADD_FUNC(_dc, 10, mmx2)
37 IDCT_ADD_FUNC(8_dc, 8, mmx2)
38 IDCT_ADD_FUNC(8_dc, 10, sse2)
39 IDCT_ADD_FUNC(8, 8, mmx)
40 IDCT_ADD_FUNC(8, 8, sse2)
41 IDCT_ADD_FUNC(8, 10, sse2)
42 #if HAVE_AVX
43 IDCT_ADD_FUNC(, 10, avx)
44 IDCT_ADD_FUNC(8_dc, 10, avx)
45 IDCT_ADD_FUNC(8, 10, avx)
46 #endif
47
48
49 #define IDCT_ADD_REP_FUNC(NUM, REP, DEPTH, OPT) \
50 void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
51                               (uint8_t *dst, const int *block_offset, \
52                               DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
53
54 IDCT_ADD_REP_FUNC(8, 4, 8, mmx)
55 IDCT_ADD_REP_FUNC(8, 4, 8, mmx2)
56 IDCT_ADD_REP_FUNC(8, 4, 8, sse2)
57 IDCT_ADD_REP_FUNC(8, 4, 10, sse2)
58 IDCT_ADD_REP_FUNC(8, 4, 10, avx)
59 IDCT_ADD_REP_FUNC(, 16, 8, mmx)
60 IDCT_ADD_REP_FUNC(, 16, 8, mmx2)
61 IDCT_ADD_REP_FUNC(, 16, 8, sse2)
62 IDCT_ADD_REP_FUNC(, 16, 10, sse2)
63 IDCT_ADD_REP_FUNC(, 16intra, 8, mmx)
64 IDCT_ADD_REP_FUNC(, 16intra, 8, mmx2)
65 IDCT_ADD_REP_FUNC(, 16intra, 8, sse2)
66 IDCT_ADD_REP_FUNC(, 16intra, 10, sse2)
67 #if HAVE_AVX
68 IDCT_ADD_REP_FUNC(, 16, 10, avx)
69 IDCT_ADD_REP_FUNC(, 16intra, 10, avx)
70 #endif
71
72
73 #define IDCT_ADD_REP_FUNC2(NUM, REP, DEPTH, OPT) \
74 void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
75                               (uint8_t **dst, const int *block_offset, \
76                               DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
77 IDCT_ADD_REP_FUNC2(, 8, 8, mmx)
78 IDCT_ADD_REP_FUNC2(, 8, 8, mmx2)
79 IDCT_ADD_REP_FUNC2(, 8, 8, sse2)
80 IDCT_ADD_REP_FUNC2(, 8, 10, sse2)
81 #if HAVE_AVX
82 IDCT_ADD_REP_FUNC2(, 8, 10, avx)
83 #endif
84
85 void ff_h264_luma_dc_dequant_idct_mmx (DCTELEM *output, DCTELEM *input, int qmul);
86 void ff_h264_luma_dc_dequant_idct_sse2(DCTELEM *output, DCTELEM *input, int qmul);
87
88 /***********************************/
89 /* deblocking */
90
91 #define h264_loop_filter_strength_iteration_mmx2(bS, nz, ref, mv, bidir, edges, step, mask_mv, dir, d_idx, mask_dir) \
92     do { \
93         x86_reg b_idx; \
94         mask_mv <<= 3; \
95         for( b_idx=0; b_idx<edges; b_idx+=step ) { \
96             if (!mask_dir) \
97             __asm__ volatile( \
98                     "pxor %%mm0, %%mm0 \n\t" \
99                     :: \
100             ); \
101             if(!(mask_mv & b_idx)) { \
102                 if(bidir) { \
103                     __asm__ volatile( \
104                         "movd         %a3(%0,%2), %%mm2 \n" \
105                         "punpckldq    %a4(%0,%2), %%mm2 \n" /* { ref0[bn], ref1[bn] } */ \
106                         "pshufw $0x44, 12(%0,%2), %%mm0 \n" /* { ref0[b], ref0[b] } */ \
107                         "pshufw $0x44, 52(%0,%2), %%mm1 \n" /* { ref1[b], ref1[b] } */ \
108                         "pshufw $0x4E, %%mm2, %%mm3 \n" \
109                         "psubb         %%mm2, %%mm0 \n" /* { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] } */ \
110                         "psubb         %%mm3, %%mm1 \n" /* { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] } */ \
111  \
112                         "por           %%mm1, %%mm0 \n" \
113                         "movq   %a5(%1,%2,4), %%mm1 \n" \
114                         "movq   %a6(%1,%2,4), %%mm2 \n" \
115                         "movq          %%mm1, %%mm3 \n" \
116                         "movq          %%mm2, %%mm4 \n" \
117                         "psubw   48(%1,%2,4), %%mm1 \n" \
118                         "psubw   56(%1,%2,4), %%mm2 \n" \
119                         "psubw  208(%1,%2,4), %%mm3 \n" \
120                         "psubw  216(%1,%2,4), %%mm4 \n" \
121                         "packsswb      %%mm2, %%mm1 \n" \
122                         "packsswb      %%mm4, %%mm3 \n" \
123                         "paddb         %%mm6, %%mm1 \n" \
124                         "paddb         %%mm6, %%mm3 \n" \
125                         "psubusb       %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
126                         "psubusb       %%mm5, %%mm3 \n" \
127                         "packsswb      %%mm3, %%mm1 \n" \
128  \
129                         "por           %%mm1, %%mm0 \n" \
130                         "movq   %a7(%1,%2,4), %%mm1 \n" \
131                         "movq   %a8(%1,%2,4), %%mm2 \n" \
132                         "movq          %%mm1, %%mm3 \n" \
133                         "movq          %%mm2, %%mm4 \n" \
134                         "psubw   48(%1,%2,4), %%mm1 \n" \
135                         "psubw   56(%1,%2,4), %%mm2 \n" \
136                         "psubw  208(%1,%2,4), %%mm3 \n" \
137                         "psubw  216(%1,%2,4), %%mm4 \n" \
138                         "packsswb      %%mm2, %%mm1 \n" \
139                         "packsswb      %%mm4, %%mm3 \n" \
140                         "paddb         %%mm6, %%mm1 \n" \
141                         "paddb         %%mm6, %%mm3 \n" \
142                         "psubusb       %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
143                         "psubusb       %%mm5, %%mm3 \n" \
144                         "packsswb      %%mm3, %%mm1 \n" \
145  \
146                         "pshufw $0x4E, %%mm1, %%mm1 \n" \
147                         "por           %%mm1, %%mm0 \n" \
148                         "pshufw $0x4E, %%mm0, %%mm1 \n" \
149                         "pminub        %%mm1, %%mm0 \n" \
150                         ::"r"(ref), \
151                           "r"(mv), \
152                           "r"(b_idx), \
153                           "i"(d_idx+12), \
154                           "i"(d_idx+52), \
155                           "i"(d_idx*4+48), \
156                           "i"(d_idx*4+56), \
157                           "i"(d_idx*4+208), \
158                           "i"(d_idx*4+216) \
159                     ); \
160                 } else { \
161                     __asm__ volatile( \
162                         "movd   12(%0,%2), %%mm0 \n" \
163                         "psubb %a3(%0,%2), %%mm0 \n" /* ref[b] != ref[bn] */ \
164                         "movq   48(%1,%2,4), %%mm1 \n" \
165                         "movq   56(%1,%2,4), %%mm2 \n" \
166                         "psubw %a4(%1,%2,4), %%mm1 \n" \
167                         "psubw %a5(%1,%2,4), %%mm2 \n" \
168                         "packsswb   %%mm2, %%mm1 \n" \
169                         "paddb      %%mm6, %%mm1 \n" \
170                         "psubusb    %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
171                         "packsswb   %%mm1, %%mm1 \n" \
172                         "por        %%mm1, %%mm0 \n" \
173                         ::"r"(ref), \
174                           "r"(mv), \
175                           "r"(b_idx), \
176                           "i"(d_idx+12), \
177                           "i"(d_idx*4+48), \
178                           "i"(d_idx*4+56) \
179                     ); \
180                 } \
181             } \
182             __asm__ volatile( \
183                 "movd 12(%0,%1), %%mm1 \n" \
184                 "por %a2(%0,%1), %%mm1 \n" /* nnz[b] || nnz[bn] */ \
185                 ::"r"(nnz), \
186                   "r"(b_idx), \
187                   "i"(d_idx+12) \
188             ); \
189             __asm__ volatile( \
190                 "pminub    %%mm7, %%mm1 \n" \
191                 "pminub    %%mm7, %%mm0 \n" \
192                 "psllw        $1, %%mm1 \n" \
193                 "pxor      %%mm2, %%mm2 \n" \
194                 "pmaxub    %%mm0, %%mm1 \n" \
195                 "punpcklbw %%mm2, %%mm1 \n" \
196                 "movq      %%mm1, %a1(%0,%2) \n" \
197                 ::"r"(bS), \
198                   "i"(32*dir), \
199                   "r"(b_idx) \
200                 :"memory" \
201             ); \
202         } \
203     } while (0)
204
205 static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
206                                             int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
207     __asm__ volatile(
208         "movq %0, %%mm7 \n"
209         "movq %1, %%mm6 \n"
210         ::"m"(ff_pb_1), "m"(ff_pb_3)
211     );
212     if(field)
213         __asm__ volatile(
214             "movq %0, %%mm6 \n"
215             ::"m"(ff_pb_3_1)
216         );
217     __asm__ volatile(
218         "movq  %%mm6, %%mm5 \n"
219         "paddb %%mm5, %%mm5 \n"
220     :);
221
222     // could do a special case for dir==0 && edges==1, but it only reduces the
223     // average filter time by 1.2%
224     step  <<= 3;
225     edges <<= 3;
226     h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1, -8,  0);
227     h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir,    32,    8, mask_mv0, 0, -1, -1);
228
229     __asm__ volatile(
230         "movq   (%0), %%mm0 \n\t"
231         "movq  8(%0), %%mm1 \n\t"
232         "movq 16(%0), %%mm2 \n\t"
233         "movq 24(%0), %%mm3 \n\t"
234         TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
235         "movq %%mm0,   (%0) \n\t"
236         "movq %%mm3,  8(%0) \n\t"
237         "movq %%mm4, 16(%0) \n\t"
238         "movq %%mm2, 24(%0) \n\t"
239         ::"r"(bS[0])
240         :"memory"
241     );
242 }
243
244 #define LF_FUNC(DIR, TYPE, DEPTH, OPT) \
245 void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
246                                                                 int alpha, int beta, int8_t *tc0);
247 #define LF_IFUNC(DIR, TYPE, DEPTH, OPT) \
248 void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
249                                                                 int alpha, int beta);
250
251 #define LF_FUNCS(type, depth)\
252 LF_FUNC (h,  chroma,       depth, mmxext)\
253 LF_IFUNC(h,  chroma_intra, depth, mmxext)\
254 LF_FUNC (v,  chroma,       depth, mmxext)\
255 LF_IFUNC(v,  chroma_intra, depth, mmxext)\
256 LF_FUNC (h,  luma,         depth, mmxext)\
257 LF_IFUNC(h,  luma_intra,   depth, mmxext)\
258 LF_FUNC (h,  luma,         depth, sse2)\
259 LF_IFUNC(h,  luma_intra,   depth, sse2)\
260 LF_FUNC (v,  luma,         depth, sse2)\
261 LF_IFUNC(v,  luma_intra,   depth, sse2)\
262 LF_FUNC (h,  chroma,       depth, sse2)\
263 LF_IFUNC(h,  chroma_intra, depth, sse2)\
264 LF_FUNC (v,  chroma,       depth, sse2)\
265 LF_IFUNC(v,  chroma_intra, depth, sse2)\
266 LF_FUNC (h,  luma,         depth,  avx)\
267 LF_IFUNC(h,  luma_intra,   depth,  avx)\
268 LF_FUNC (v,  luma,         depth,  avx)\
269 LF_IFUNC(v,  luma_intra,   depth,  avx)\
270 LF_FUNC (h,  chroma,       depth,  avx)\
271 LF_IFUNC(h,  chroma_intra, depth,  avx)\
272 LF_FUNC (v,  chroma,       depth,  avx)\
273 LF_IFUNC(v,  chroma_intra, depth,  avx)
274
275 LF_FUNCS( uint8_t,  8)
276 LF_FUNCS(uint16_t, 10)
277
278 #if ARCH_X86_32
279 LF_FUNC (v8, luma,             8, mmxext)
280 static void ff_deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
281 {
282     if((tc0[0] & tc0[1]) >= 0)
283         ff_deblock_v8_luma_8_mmxext(pix+0, stride, alpha, beta, tc0);
284     if((tc0[2] & tc0[3]) >= 0)
285         ff_deblock_v8_luma_8_mmxext(pix+8, stride, alpha, beta, tc0+2);
286 }
287 LF_IFUNC(v8, luma_intra,        8, mmxext)
288 static void ff_deblock_v_luma_intra_8_mmxext(uint8_t *pix, int stride, int alpha, int beta)
289 {
290     ff_deblock_v8_luma_intra_8_mmxext(pix+0, stride, alpha, beta);
291     ff_deblock_v8_luma_intra_8_mmxext(pix+8, stride, alpha, beta);
292 }
293 #endif /* ARCH_X86_32 */
294
295 LF_FUNC (v,  luma,            10, mmxext)
296 LF_IFUNC(v,  luma_intra,      10, mmxext)
297
298 /***********************************/
299 /* weighted prediction */
300
301 #define H264_WEIGHT(W, H, OPT) \
302 void ff_h264_weight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
303     int stride, int log2_denom, int weight, int offset);
304
305 #define H264_BIWEIGHT(W, H, OPT) \
306 void ff_h264_biweight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
307     uint8_t *src, int stride, int log2_denom, int weightd, \
308     int weights, int offset);
309
310 #define H264_BIWEIGHT_MMX(W,H) \
311 H264_WEIGHT  (W, H, mmx2) \
312 H264_BIWEIGHT(W, H, mmx2)
313
314 #define H264_BIWEIGHT_MMX_SSE(W,H) \
315 H264_BIWEIGHT_MMX(W, H) \
316 H264_WEIGHT      (W, H, sse2) \
317 H264_BIWEIGHT    (W, H, sse2) \
318 H264_BIWEIGHT    (W, H, ssse3)
319
320 H264_BIWEIGHT_MMX_SSE(16, 16)
321 H264_BIWEIGHT_MMX_SSE(16,  8)
322 H264_BIWEIGHT_MMX_SSE( 8, 16)
323 H264_BIWEIGHT_MMX_SSE( 8,  8)
324 H264_BIWEIGHT_MMX_SSE( 8,  4)
325 H264_BIWEIGHT_MMX    ( 4,  8)
326 H264_BIWEIGHT_MMX    ( 4,  4)
327 H264_BIWEIGHT_MMX    ( 4,  2)
328
329 #define H264_WEIGHT_10(W, H, DEPTH, OPT) \
330 void ff_h264_weight_ ## W ## x ## H ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \
331     int stride, int log2_denom, int weight, int offset);
332
333 #define H264_BIWEIGHT_10(W, H, DEPTH, OPT) \
334 void ff_h264_biweight_ ## W ## x ## H ## _ ## DEPTH ## _ ## OPT \
335     (uint8_t *dst, uint8_t *src, int stride, int log2_denom, \
336      int weightd, int weights, int offset);
337
338 #define H264_BIWEIGHT_10_SSE(W, H, DEPTH) \
339 H264_WEIGHT_10  (W, H, DEPTH, sse2) \
340 H264_WEIGHT_10  (W, H, DEPTH, sse4) \
341 H264_BIWEIGHT_10(W, H, DEPTH, sse2) \
342 H264_BIWEIGHT_10(W, H, DEPTH, sse4)
343
344 H264_BIWEIGHT_10_SSE(16, 16, 10)
345 H264_BIWEIGHT_10_SSE(16,  8, 10)
346 H264_BIWEIGHT_10_SSE( 8, 16, 10)
347 H264_BIWEIGHT_10_SSE( 8,  8, 10)
348 H264_BIWEIGHT_10_SSE( 8,  4, 10)
349 H264_BIWEIGHT_10_SSE( 4,  8, 10)
350 H264_BIWEIGHT_10_SSE( 4,  4, 10)
351 H264_BIWEIGHT_10_SSE( 4,  2, 10)
352
353 void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
354 {
355     int mm_flags = av_get_cpu_flags();
356
357     if (mm_flags & AV_CPU_FLAG_MMX2) {
358         c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
359     }
360
361     if (bit_depth == 8) {
362 #if HAVE_YASM
363     if (mm_flags & AV_CPU_FLAG_MMX) {
364         c->h264_idct_dc_add         =
365         c->h264_idct_add            = ff_h264_idct_add_8_mmx;
366         c->h264_idct8_dc_add        =
367         c->h264_idct8_add           = ff_h264_idct8_add_8_mmx;
368
369         c->h264_idct_add16          = ff_h264_idct_add16_8_mmx;
370         c->h264_idct8_add4          = ff_h264_idct8_add4_8_mmx;
371         c->h264_idct_add8           = ff_h264_idct_add8_8_mmx;
372         c->h264_idct_add16intra     = ff_h264_idct_add16intra_8_mmx;
373         c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_mmx;
374
375         if (mm_flags & AV_CPU_FLAG_MMX2) {
376             c->h264_idct_dc_add    = ff_h264_idct_dc_add_8_mmx2;
377             c->h264_idct8_dc_add   = ff_h264_idct8_dc_add_8_mmx2;
378             c->h264_idct_add16     = ff_h264_idct_add16_8_mmx2;
379             c->h264_idct8_add4     = ff_h264_idct8_add4_8_mmx2;
380             c->h264_idct_add8      = ff_h264_idct_add8_8_mmx2;
381             c->h264_idct_add16intra= ff_h264_idct_add16intra_8_mmx2;
382
383             c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_8_mmxext;
384             c->h264_h_loop_filter_chroma= ff_deblock_h_chroma_8_mmxext;
385             c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_8_mmxext;
386             c->h264_h_loop_filter_chroma_intra= ff_deblock_h_chroma_intra_8_mmxext;
387 #if ARCH_X86_32
388             c->h264_v_loop_filter_luma= ff_deblock_v_luma_8_mmxext;
389             c->h264_h_loop_filter_luma= ff_deblock_h_luma_8_mmxext;
390             c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_mmxext;
391             c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmxext;
392 #endif
393             c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
394             c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
395             c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
396             c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
397             c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
398             c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
399             c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
400             c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
401
402             c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
403             c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
404             c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
405             c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
406             c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
407             c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
408             c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
409             c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
410
411             if (mm_flags&AV_CPU_FLAG_SSE2) {
412                 c->h264_idct8_add           = ff_h264_idct8_add_8_sse2;
413
414                 c->h264_idct_add16          = ff_h264_idct_add16_8_sse2;
415                 c->h264_idct8_add4          = ff_h264_idct8_add4_8_sse2;
416                 c->h264_idct_add8           = ff_h264_idct_add8_8_sse2;
417                 c->h264_idct_add16intra     = ff_h264_idct_add16intra_8_sse2;
418                 c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_sse2;
419
420                 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_sse2;
421                 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_sse2;
422                 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_sse2;
423                 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_sse2;
424                 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_sse2;
425
426                 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_sse2;
427                 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_sse2;
428                 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_sse2;
429                 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_sse2;
430                 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_sse2;
431
432 #if HAVE_ALIGNED_STACK
433                 c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_sse2;
434                 c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_sse2;
435                 c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2;
436                 c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2;
437 #endif
438             }
439             if (mm_flags&AV_CPU_FLAG_SSSE3) {
440                 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3;
441                 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_ssse3;
442                 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_ssse3;
443                 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3;
444                 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_ssse3;
445             }
446             if (HAVE_AVX && mm_flags&AV_CPU_FLAG_AVX) {
447 #if HAVE_ALIGNED_STACK
448                 c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_avx;
449                 c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_avx;
450                 c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_avx;
451                 c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_avx;
452 #endif
453             }
454         }
455     }
456 #endif
457     } else if (bit_depth == 10) {
458 #if HAVE_YASM
459     if (mm_flags & AV_CPU_FLAG_MMX) {
460         if (mm_flags & AV_CPU_FLAG_MMX2) {
461 #if ARCH_X86_32
462             c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_mmxext;
463             c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_mmxext;
464             c->h264_v_loop_filter_luma= ff_deblock_v_luma_10_mmxext;
465             c->h264_h_loop_filter_luma= ff_deblock_h_luma_10_mmxext;
466             c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_mmxext;
467             c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_mmxext;
468 #endif
469             c->h264_idct_dc_add= ff_h264_idct_dc_add_10_mmx2;
470             if (mm_flags&AV_CPU_FLAG_SSE2) {
471                 c->h264_idct_add       = ff_h264_idct_add_10_sse2;
472                 c->h264_idct8_dc_add   = ff_h264_idct8_dc_add_10_sse2;
473
474                 c->h264_idct_add16     = ff_h264_idct_add16_10_sse2;
475                 c->h264_idct_add8      = ff_h264_idct_add8_10_sse2;
476                 c->h264_idct_add16intra= ff_h264_idct_add16intra_10_sse2;
477 #if HAVE_ALIGNED_STACK
478                 c->h264_idct8_add      = ff_h264_idct8_add_10_sse2;
479                 c->h264_idct8_add4     = ff_h264_idct8_add4_10_sse2;
480 #endif
481
482                 c->weight_h264_pixels_tab[0] = ff_h264_weight_16x16_10_sse2;
483                 c->weight_h264_pixels_tab[1] = ff_h264_weight_16x8_10_sse2;
484                 c->weight_h264_pixels_tab[2] = ff_h264_weight_8x16_10_sse2;
485                 c->weight_h264_pixels_tab[3] = ff_h264_weight_8x8_10_sse2;
486                 c->weight_h264_pixels_tab[4] = ff_h264_weight_8x4_10_sse2;
487                 c->weight_h264_pixels_tab[5] = ff_h264_weight_4x8_10_sse2;
488                 c->weight_h264_pixels_tab[6] = ff_h264_weight_4x4_10_sse2;
489                 c->weight_h264_pixels_tab[7] = ff_h264_weight_4x2_10_sse2;
490
491                 c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16x16_10_sse2;
492                 c->biweight_h264_pixels_tab[1] = ff_h264_biweight_16x8_10_sse2;
493                 c->biweight_h264_pixels_tab[2] = ff_h264_biweight_8x16_10_sse2;
494                 c->biweight_h264_pixels_tab[3] = ff_h264_biweight_8x8_10_sse2;
495                 c->biweight_h264_pixels_tab[4] = ff_h264_biweight_8x4_10_sse2;
496                 c->biweight_h264_pixels_tab[5] = ff_h264_biweight_4x8_10_sse2;
497                 c->biweight_h264_pixels_tab[6] = ff_h264_biweight_4x4_10_sse2;
498                 c->biweight_h264_pixels_tab[7] = ff_h264_biweight_4x2_10_sse2;
499
500                 c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_sse2;
501                 c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_sse2;
502 #if HAVE_ALIGNED_STACK
503                 c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_sse2;
504                 c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_sse2;
505                 c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_sse2;
506                 c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2;
507 #endif
508             }
509             if (mm_flags&AV_CPU_FLAG_SSE4) {
510                 c->weight_h264_pixels_tab[0] = ff_h264_weight_16x16_10_sse4;
511                 c->weight_h264_pixels_tab[1] = ff_h264_weight_16x8_10_sse4;
512                 c->weight_h264_pixels_tab[2] = ff_h264_weight_8x16_10_sse4;
513                 c->weight_h264_pixels_tab[3] = ff_h264_weight_8x8_10_sse4;
514                 c->weight_h264_pixels_tab[4] = ff_h264_weight_8x4_10_sse4;
515                 c->weight_h264_pixels_tab[5] = ff_h264_weight_4x8_10_sse4;
516                 c->weight_h264_pixels_tab[6] = ff_h264_weight_4x4_10_sse4;
517                 c->weight_h264_pixels_tab[7] = ff_h264_weight_4x2_10_sse4;
518
519                 c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16x16_10_sse4;
520                 c->biweight_h264_pixels_tab[1] = ff_h264_biweight_16x8_10_sse4;
521                 c->biweight_h264_pixels_tab[2] = ff_h264_biweight_8x16_10_sse4;
522                 c->biweight_h264_pixels_tab[3] = ff_h264_biweight_8x8_10_sse4;
523                 c->biweight_h264_pixels_tab[4] = ff_h264_biweight_8x4_10_sse4;
524                 c->biweight_h264_pixels_tab[5] = ff_h264_biweight_4x8_10_sse4;
525                 c->biweight_h264_pixels_tab[6] = ff_h264_biweight_4x4_10_sse4;
526                 c->biweight_h264_pixels_tab[7] = ff_h264_biweight_4x2_10_sse4;
527             }
528 #if HAVE_AVX
529             if (mm_flags&AV_CPU_FLAG_AVX) {
530                 c->h264_idct_dc_add    =
531                 c->h264_idct_add       = ff_h264_idct_add_10_avx;
532                 c->h264_idct8_dc_add   = ff_h264_idct8_dc_add_10_avx;
533
534                 c->h264_idct_add16     = ff_h264_idct_add16_10_avx;
535                 c->h264_idct_add8      = ff_h264_idct_add8_10_avx;
536                 c->h264_idct_add16intra= ff_h264_idct_add16intra_10_avx;
537 #if HAVE_ALIGNED_STACK
538                 c->h264_idct8_add      = ff_h264_idct8_add_10_avx;
539                 c->h264_idct8_add4     = ff_h264_idct8_add4_10_avx;
540 #endif
541
542                 c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_avx;
543                 c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_avx;
544 #if HAVE_ALIGNED_STACK
545                 c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_avx;
546                 c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_avx;
547                 c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_avx;
548                 c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;
549 #endif
550             }
551 #endif /* HAVE_AVX */
552         }
553     }
554 #endif
555     }
556 }