]> git.sesse.net Git - ffmpeg/blob - libavcodec/x86/h264dsp_mmx.c
Mark recently added pred4x4_down_left_mmxext as CONFIG_GPL. Although Holger
[ffmpeg] / libavcodec / x86 / h264dsp_mmx.c
1 /*
2  * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include "libavutil/cpu.h"
22 #include "libavutil/x86_cpu.h"
23 #include "libavcodec/h264dsp.h"
24 #include "dsputil_mmx.h"
25
26 DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1  ) = 0x0103010301030103ULL;
27
28 /***********************************/
29 /* IDCT */
30
31 void ff_h264_idct_add_mmx     (uint8_t *dst, int16_t *block, int stride);
32 void ff_h264_idct8_add_mmx    (uint8_t *dst, int16_t *block, int stride);
33 void ff_h264_idct8_add_sse2   (uint8_t *dst, int16_t *block, int stride);
34 void ff_h264_idct_dc_add_mmx2 (uint8_t *dst, int16_t *block, int stride);
35 void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride);
36
37 void ff_h264_idct_add16_mmx      (uint8_t *dst, const int *block_offset,
38                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
39 void ff_h264_idct8_add4_mmx      (uint8_t *dst, const int *block_offset,
40                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
41 void ff_h264_idct_add16_mmx2     (uint8_t *dst, const int *block_offset,
42                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
43 void ff_h264_idct_add16intra_mmx (uint8_t *dst, const int *block_offset,
44                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
45 void ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset,
46                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
47 void ff_h264_idct8_add4_mmx2     (uint8_t *dst, const int *block_offset,
48                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
49 void ff_h264_idct8_add4_sse2     (uint8_t *dst, const int *block_offset,
50                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
51 void ff_h264_idct_add8_mmx       (uint8_t **dest, const int *block_offset,
52                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
53 void ff_h264_idct_add8_mmx2      (uint8_t **dest, const int *block_offset,
54                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
55
56 void ff_h264_idct_add16_sse2     (uint8_t *dst, const int *block_offset, DCTELEM *block,
57                                   int stride, const uint8_t nnzc[6*8]);
58 void ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block,
59                                   int stride, const uint8_t nnzc[6*8]);
60 void ff_h264_idct_add8_sse2      (uint8_t **dest, const int *block_offset, DCTELEM *block,
61                                   int stride, const uint8_t nnzc[6*8]);
62
63 /***********************************/
64 /* deblocking */
65
66 #define h264_loop_filter_strength_iteration_mmx2(bS, nz, ref, mv, bidir, edges, step, mask_mv, dir, d_idx, mask_dir) \
67     do { \
68         x86_reg b_idx; \
69         mask_mv <<= 3; \
70         for( b_idx=0; b_idx<edges; b_idx+=step ) { \
71             if (!mask_dir) \
72             __asm__ volatile( \
73                     "pxor %%mm0, %%mm0 \n\t" \
74                     :: \
75             ); \
76             if(!(mask_mv & b_idx)) { \
77                 if(bidir) { \
78                     __asm__ volatile( \
79                         "movd         %a3(%0,%2), %%mm2 \n" \
80                         "punpckldq    %a4(%0,%2), %%mm2 \n" /* { ref0[bn], ref1[bn] } */ \
81                         "pshufw $0x44, 12(%0,%2), %%mm0 \n" /* { ref0[b], ref0[b] } */ \
82                         "pshufw $0x44, 52(%0,%2), %%mm1 \n" /* { ref1[b], ref1[b] } */ \
83                         "pshufw $0x4E, %%mm2, %%mm3 \n" \
84                         "psubb         %%mm2, %%mm0 \n" /* { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] } */ \
85                         "psubb         %%mm3, %%mm1 \n" /* { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] } */ \
86  \
87                         "por           %%mm1, %%mm0 \n" \
88                         "movq   %a5(%1,%2,4), %%mm1 \n" \
89                         "movq   %a6(%1,%2,4), %%mm2 \n" \
90                         "movq          %%mm1, %%mm3 \n" \
91                         "movq          %%mm2, %%mm4 \n" \
92                         "psubw   48(%1,%2,4), %%mm1 \n" \
93                         "psubw   56(%1,%2,4), %%mm2 \n" \
94                         "psubw  208(%1,%2,4), %%mm3 \n" \
95                         "psubw  216(%1,%2,4), %%mm4 \n" \
96                         "packsswb      %%mm2, %%mm1 \n" \
97                         "packsswb      %%mm4, %%mm3 \n" \
98                         "paddb         %%mm6, %%mm1 \n" \
99                         "paddb         %%mm6, %%mm3 \n" \
100                         "psubusb       %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
101                         "psubusb       %%mm5, %%mm3 \n" \
102                         "packsswb      %%mm3, %%mm1 \n" \
103  \
104                         "por           %%mm1, %%mm0 \n" \
105                         "movq   %a7(%1,%2,4), %%mm1 \n" \
106                         "movq   %a8(%1,%2,4), %%mm2 \n" \
107                         "movq          %%mm1, %%mm3 \n" \
108                         "movq          %%mm2, %%mm4 \n" \
109                         "psubw   48(%1,%2,4), %%mm1 \n" \
110                         "psubw   56(%1,%2,4), %%mm2 \n" \
111                         "psubw  208(%1,%2,4), %%mm3 \n" \
112                         "psubw  216(%1,%2,4), %%mm4 \n" \
113                         "packsswb      %%mm2, %%mm1 \n" \
114                         "packsswb      %%mm4, %%mm3 \n" \
115                         "paddb         %%mm6, %%mm1 \n" \
116                         "paddb         %%mm6, %%mm3 \n" \
117                         "psubusb       %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
118                         "psubusb       %%mm5, %%mm3 \n" \
119                         "packsswb      %%mm3, %%mm1 \n" \
120  \
121                         "pshufw $0x4E, %%mm1, %%mm1 \n" \
122                         "por           %%mm1, %%mm0 \n" \
123                         "pshufw $0x4E, %%mm0, %%mm1 \n" \
124                         "pminub        %%mm1, %%mm0 \n" \
125                         ::"r"(ref), \
126                           "r"(mv), \
127                           "r"(b_idx), \
128                           "i"(d_idx+12), \
129                           "i"(d_idx+52), \
130                           "i"(d_idx*4+48), \
131                           "i"(d_idx*4+56), \
132                           "i"(d_idx*4+208), \
133                           "i"(d_idx*4+216) \
134                     ); \
135                 } else { \
136                     __asm__ volatile( \
137                         "movd   12(%0,%2), %%mm0 \n" \
138                         "psubb %a3(%0,%2), %%mm0 \n" /* ref[b] != ref[bn] */ \
139                         "movq   48(%1,%2,4), %%mm1 \n" \
140                         "movq   56(%1,%2,4), %%mm2 \n" \
141                         "psubw %a4(%1,%2,4), %%mm1 \n" \
142                         "psubw %a5(%1,%2,4), %%mm2 \n" \
143                         "packsswb   %%mm2, %%mm1 \n" \
144                         "paddb      %%mm6, %%mm1 \n" \
145                         "psubusb    %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
146                         "packsswb   %%mm1, %%mm1 \n" \
147                         "por        %%mm1, %%mm0 \n" \
148                         ::"r"(ref), \
149                           "r"(mv), \
150                           "r"(b_idx), \
151                           "i"(d_idx+12), \
152                           "i"(d_idx*4+48), \
153                           "i"(d_idx*4+56) \
154                     ); \
155                 } \
156             } \
157             __asm__ volatile( \
158                 "movd 12(%0,%1), %%mm1 \n" \
159                 "por %a2(%0,%1), %%mm1 \n" /* nnz[b] || nnz[bn] */ \
160                 ::"r"(nnz), \
161                   "r"(b_idx), \
162                   "i"(d_idx+12) \
163             ); \
164             __asm__ volatile( \
165                 "pminub    %%mm7, %%mm1 \n" \
166                 "pminub    %%mm7, %%mm0 \n" \
167                 "psllw        $1, %%mm1 \n" \
168                 "pxor      %%mm2, %%mm2 \n" \
169                 "pmaxub    %%mm0, %%mm1 \n" \
170                 "punpcklbw %%mm2, %%mm1 \n" \
171                 "movq      %%mm1, %a1(%0,%2) \n" \
172                 ::"r"(bS), \
173                   "i"(32*dir), \
174                   "r"(b_idx) \
175                 :"memory" \
176             ); \
177         } \
178     } while (0)
179
180 static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
181                                             int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
182     __asm__ volatile(
183         "movq %0, %%mm7 \n"
184         "movq %1, %%mm6 \n"
185         ::"m"(ff_pb_1), "m"(ff_pb_3)
186     );
187     if(field)
188         __asm__ volatile(
189             "movq %0, %%mm6 \n"
190             ::"m"(ff_pb_3_1)
191         );
192     __asm__ volatile(
193         "movq  %%mm6, %%mm5 \n"
194         "paddb %%mm5, %%mm5 \n"
195     :);
196
197     // could do a special case for dir==0 && edges==1, but it only reduces the
198     // average filter time by 1.2%
199     step  <<= 3;
200     edges <<= 3;
201     h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1, -8,  0);
202     h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir,    32,    8, mask_mv0, 0, -1, -1);
203
204     __asm__ volatile(
205         "movq   (%0), %%mm0 \n\t"
206         "movq  8(%0), %%mm1 \n\t"
207         "movq 16(%0), %%mm2 \n\t"
208         "movq 24(%0), %%mm3 \n\t"
209         TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
210         "movq %%mm0,   (%0) \n\t"
211         "movq %%mm3,  8(%0) \n\t"
212         "movq %%mm4, 16(%0) \n\t"
213         "movq %%mm2, 24(%0) \n\t"
214         ::"r"(bS[0])
215         :"memory"
216     );
217 }
218
219 #define LF_FUNC(DIR, TYPE, OPT) \
220 void ff_x264_deblock_ ## DIR ## _ ## TYPE ## _ ## OPT (uint8_t *pix, int stride, \
221                                                int alpha, int beta, int8_t *tc0);
222 #define LF_IFUNC(DIR, TYPE, OPT) \
223 void ff_x264_deblock_ ## DIR ## _ ## TYPE ## _ ## OPT (uint8_t *pix, int stride, \
224                                                int alpha, int beta);
225
226 LF_FUNC (h,  chroma,       mmxext)
227 LF_IFUNC(h,  chroma_intra, mmxext)
228 LF_FUNC (v,  chroma,       mmxext)
229 LF_IFUNC(v,  chroma_intra, mmxext)
230
231 LF_FUNC (h,  luma,         mmxext)
232 LF_IFUNC(h,  luma_intra,   mmxext)
233 #if HAVE_YASM && ARCH_X86_32
234 LF_FUNC (v8, luma,         mmxext)
235 static void ff_x264_deblock_v_luma_mmxext(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
236 {
237     if((tc0[0] & tc0[1]) >= 0)
238         ff_x264_deblock_v8_luma_mmxext(pix+0, stride, alpha, beta, tc0);
239     if((tc0[2] & tc0[3]) >= 0)
240         ff_x264_deblock_v8_luma_mmxext(pix+8, stride, alpha, beta, tc0+2);
241 }
242 LF_IFUNC(v8, luma_intra,   mmxext)
243 static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
244 {
245     ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
246     ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
247 }
248 #endif
249
250 LF_FUNC (h,  luma,         sse2)
251 LF_IFUNC(h,  luma_intra,   sse2)
252 LF_FUNC (v,  luma,         sse2)
253 LF_IFUNC(v,  luma_intra,   sse2)
254
255 /***********************************/
256 /* weighted prediction */
257
258 #define H264_WEIGHT(W, H, OPT) \
259 void ff_h264_weight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
260     int stride, int log2_denom, int weight, int offset);
261
262 #define H264_BIWEIGHT(W, H, OPT) \
263 void ff_h264_biweight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
264     uint8_t *src, int stride, int log2_denom, int weightd, \
265     int weights, int offset);
266
267 #define H264_BIWEIGHT_MMX(W,H) \
268 H264_WEIGHT  (W, H, mmx2) \
269 H264_BIWEIGHT(W, H, mmx2)
270
271 #define H264_BIWEIGHT_MMX_SSE(W,H) \
272 H264_BIWEIGHT_MMX(W, H) \
273 H264_WEIGHT      (W, H, sse2) \
274 H264_BIWEIGHT    (W, H, sse2) \
275 H264_BIWEIGHT    (W, H, ssse3)
276
277 H264_BIWEIGHT_MMX_SSE(16, 16)
278 H264_BIWEIGHT_MMX_SSE(16,  8)
279 H264_BIWEIGHT_MMX_SSE( 8, 16)
280 H264_BIWEIGHT_MMX_SSE( 8,  8)
281 H264_BIWEIGHT_MMX_SSE( 8,  4)
282 H264_BIWEIGHT_MMX    ( 4,  8)
283 H264_BIWEIGHT_MMX    ( 4,  4)
284 H264_BIWEIGHT_MMX    ( 4,  2)
285
286 void ff_h264dsp_init_x86(H264DSPContext *c)
287 {
288     int mm_flags = av_get_cpu_flags();
289
290     if (mm_flags & AV_CPU_FLAG_MMX2) {
291         c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
292     }
293 #if HAVE_YASM
294     if (mm_flags & AV_CPU_FLAG_MMX) {
295         c->h264_idct_dc_add=
296         c->h264_idct_add= ff_h264_idct_add_mmx;
297         c->h264_idct8_dc_add=
298         c->h264_idct8_add= ff_h264_idct8_add_mmx;
299
300         c->h264_idct_add16     = ff_h264_idct_add16_mmx;
301         c->h264_idct8_add4     = ff_h264_idct8_add4_mmx;
302         c->h264_idct_add8      = ff_h264_idct_add8_mmx;
303         c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
304
305         if (mm_flags & AV_CPU_FLAG_MMX2) {
306             c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
307             c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
308             c->h264_idct_add16     = ff_h264_idct_add16_mmx2;
309             c->h264_idct8_add4     = ff_h264_idct8_add4_mmx2;
310             c->h264_idct_add8      = ff_h264_idct_add8_mmx2;
311             c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
312
313             c->h264_v_loop_filter_chroma= ff_x264_deblock_v_chroma_mmxext;
314             c->h264_h_loop_filter_chroma= ff_x264_deblock_h_chroma_mmxext;
315             c->h264_v_loop_filter_chroma_intra= ff_x264_deblock_v_chroma_intra_mmxext;
316             c->h264_h_loop_filter_chroma_intra= ff_x264_deblock_h_chroma_intra_mmxext;
317 #if ARCH_X86_32
318             c->h264_v_loop_filter_luma= ff_x264_deblock_v_luma_mmxext;
319             c->h264_h_loop_filter_luma= ff_x264_deblock_h_luma_mmxext;
320             c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
321             c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
322 #endif
323             c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
324             c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
325             c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
326             c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
327             c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
328             c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
329             c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
330             c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
331
332             c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
333             c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
334             c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
335             c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
336             c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
337             c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
338             c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
339             c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
340
341             if (mm_flags&AV_CPU_FLAG_SSE2) {
342                 c->h264_idct8_add = ff_h264_idct8_add_sse2;
343                 c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
344
345                 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_sse2;
346                 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_sse2;
347                 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_sse2;
348                 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_sse2;
349                 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_sse2;
350
351                 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_sse2;
352                 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_sse2;
353                 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_sse2;
354                 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_sse2;
355                 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_sse2;
356
357 #if HAVE_ALIGNED_STACK
358                 c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
359                 c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
360                 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
361                 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
362 #endif
363
364                 c->h264_idct_add16 = ff_h264_idct_add16_sse2;
365                 c->h264_idct_add8  = ff_h264_idct_add8_sse2;
366                 c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
367             }
368             if (mm_flags&AV_CPU_FLAG_SSSE3) {
369                 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3;
370                 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_ssse3;
371                 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_ssse3;
372                 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3;
373                 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_ssse3;
374             }
375         }
376     }
377 #endif
378 }