]> git.sesse.net Git - ffmpeg/blob - libavcodec/x86/h264dsp_mmx.c
Unroll inner bidir loop in h264_loop_filter_strength_mmx2(), which gets rid
[ffmpeg] / libavcodec / x86 / h264dsp_mmx.c
1 /*
2  * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include "libavutil/cpu.h"
22 #include "libavutil/x86_cpu.h"
23 #include "libavcodec/h264dsp.h"
24 #include "dsputil_mmx.h"
25
26 DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1  ) = 0x0103010301030103ULL;
27
28 /***********************************/
29 /* IDCT */
30
31 void ff_h264_idct_add_mmx     (uint8_t *dst, int16_t *block, int stride);
32 void ff_h264_idct8_add_mmx    (uint8_t *dst, int16_t *block, int stride);
33 void ff_h264_idct8_add_sse2   (uint8_t *dst, int16_t *block, int stride);
34 void ff_h264_idct_dc_add_mmx2 (uint8_t *dst, int16_t *block, int stride);
35 void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride);
36
37 void ff_h264_idct_add16_mmx      (uint8_t *dst, const int *block_offset,
38                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
39 void ff_h264_idct8_add4_mmx      (uint8_t *dst, const int *block_offset,
40                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
41 void ff_h264_idct_add16_mmx2     (uint8_t *dst, const int *block_offset,
42                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
43 void ff_h264_idct_add16intra_mmx (uint8_t *dst, const int *block_offset,
44                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
45 void ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset,
46                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
47 void ff_h264_idct8_add4_mmx2     (uint8_t *dst, const int *block_offset,
48                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
49 void ff_h264_idct8_add4_sse2     (uint8_t *dst, const int *block_offset,
50                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
51 void ff_h264_idct_add8_mmx       (uint8_t **dest, const int *block_offset,
52                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
53 void ff_h264_idct_add8_mmx2      (uint8_t **dest, const int *block_offset,
54                                   DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
55
56 void ff_h264_idct_add16_sse2     (uint8_t *dst, const int *block_offset, DCTELEM *block,
57                                   int stride, const uint8_t nnzc[6*8]);
58 void ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block,
59                                   int stride, const uint8_t nnzc[6*8]);
60 void ff_h264_idct_add8_sse2      (uint8_t **dest, const int *block_offset, DCTELEM *block,
61                                   int stride, const uint8_t nnzc[6*8]);
62
63 /***********************************/
64 /* deblocking */
65
66 static av_always_inline void h264_loop_filter_strength_iteration_mmx2(int16_t bS[2][4][4], uint8_t nnz[40],
67                                                                       int8_t ref[2][40],   int16_t mv[2][40][2],
68                                                                       int bidir,   int edges, int step,
69                                                                       int mask_mv, int dir)
70 {
71         const x86_reg d_idx = dir ? -8 : -1;
72         DECLARE_ALIGNED(8, const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
73         int b_idx, edge;
74         for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
75             __asm__ volatile(
76                 "pand %0, %%mm0 \n\t"
77                 ::"m"(mask_dir)
78             );
79             if(!(mask_mv & edge)) {
80                 if(bidir) {
81                     __asm__ volatile(
82                         "movd         (%1,%0), %%mm2 \n"
83                         "punpckldq  40(%1,%0), %%mm2 \n" // { ref0[bn], ref1[bn] }
84                         "pshufw $0x44,   (%1), %%mm0 \n" // { ref0[b], ref0[b] }
85                         "pshufw $0x44, 40(%1), %%mm1 \n" // { ref1[b], ref1[b] }
86                         "pshufw $0x4E, %%mm2, %%mm3 \n"
87                         "psubb         %%mm2, %%mm0 \n" // { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] }
88                         "psubb         %%mm3, %%mm1 \n" // { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] }
89
90                         "por           %%mm1, %%mm0 \n"
91                         "movq      (%2,%0,4), %%mm1 \n"
92                         "movq     8(%2,%0,4), %%mm2 \n"
93                         "movq          %%mm1, %%mm3 \n"
94                         "movq          %%mm2, %%mm4 \n"
95                         "psubw          (%2), %%mm1 \n"
96                         "psubw         8(%2), %%mm2 \n"
97                         "psubw       160(%2), %%mm3 \n"
98                         "psubw       168(%2), %%mm4 \n"
99                         "packsswb      %%mm2, %%mm1 \n"
100                         "packsswb      %%mm4, %%mm3 \n"
101                         "paddb         %%mm6, %%mm1 \n"
102                         "paddb         %%mm6, %%mm3 \n"
103                         "psubusb       %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit
104                         "psubusb       %%mm5, %%mm3 \n"
105                         "packsswb      %%mm3, %%mm1 \n"
106
107                         "por           %%mm1, %%mm0 \n"
108                         "movq   160(%2,%0,4), %%mm1 \n"
109                         "movq   168(%2,%0,4), %%mm2 \n"
110                         "movq          %%mm1, %%mm3 \n"
111                         "movq          %%mm2, %%mm4 \n"
112                         "psubw          (%2), %%mm1 \n"
113                         "psubw         8(%2), %%mm2 \n"
114                         "psubw       160(%2), %%mm3 \n"
115                         "psubw       168(%2), %%mm4 \n"
116                         "packsswb      %%mm2, %%mm1 \n"
117                         "packsswb      %%mm4, %%mm3 \n"
118                         "paddb         %%mm6, %%mm1 \n"
119                         "paddb         %%mm6, %%mm3 \n"
120                         "psubusb       %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit
121                         "psubusb       %%mm5, %%mm3 \n"
122                         "packsswb      %%mm3, %%mm1 \n"
123
124                         "pshufw $0x4E, %%mm1, %%mm1 \n"
125                         "por           %%mm1, %%mm0 \n"
126                         "pshufw $0x4E, %%mm0, %%mm1 \n"
127                         "pminub        %%mm1, %%mm0 \n"
128                         ::"r"(d_idx),
129                           "r"(ref[0]+b_idx),
130                           "r"(mv[0]+b_idx)
131                     );
132                 } else {
133                     __asm__ volatile(
134                         "movd        (%1), %%mm0 \n"
135                         "psubb    (%1,%0), %%mm0 \n" // ref[b] != ref[bn]
136                         "movq        (%2), %%mm1 \n"
137                         "movq       8(%2), %%mm2 \n"
138                         "psubw  (%2,%0,4), %%mm1 \n"
139                         "psubw 8(%2,%0,4), %%mm2 \n"
140                         "packsswb   %%mm2, %%mm1 \n"
141                         "paddb      %%mm6, %%mm1 \n"
142                         "psubusb    %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit
143                         "packsswb   %%mm1, %%mm1 \n"
144                         "por        %%mm1, %%mm0 \n"
145                         ::"r"(d_idx),
146                           "r"(ref[0]+b_idx),
147                           "r"(mv[0]+b_idx)
148                     );
149                 }
150             }
151             __asm__ volatile(
152                 "movd %0, %%mm1 \n"
153                 "por  %1, %%mm1 \n" // nnz[b] || nnz[bn]
154                 ::"m"(nnz[b_idx]),
155                   "m"(nnz[b_idx+d_idx])
156             );
157             __asm__ volatile(
158                 "pminub    %%mm7, %%mm1 \n"
159                 "pminub    %%mm7, %%mm0 \n"
160                 "psllw        $1, %%mm1 \n"
161                 "pxor      %%mm2, %%mm2 \n"
162                 "pmaxub    %%mm0, %%mm1 \n"
163                 "punpcklbw %%mm2, %%mm1 \n"
164                 "movq      %%mm1, %0    \n"
165                 :"=m"(*bS[dir][edge])
166                 ::"memory"
167             );
168         }
169 }
170
171 static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
172                                             int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
173     __asm__ volatile(
174         "movq %0, %%mm7 \n"
175         "movq %1, %%mm6 \n"
176         ::"m"(ff_pb_1), "m"(ff_pb_3)
177     );
178     if(field)
179         __asm__ volatile(
180             "movq %0, %%mm6 \n"
181             ::"m"(ff_pb_3_1)
182         );
183     __asm__ volatile(
184         "movq  %%mm6, %%mm5 \n"
185         "paddb %%mm5, %%mm5 \n"
186     :);
187
188     // could do a special case for dir==0 && edges==1, but it only reduces the
189     // average filter time by 1.2%
190     h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1);
191     h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir,     4,    1, mask_mv0, 0);
192
193     __asm__ volatile(
194         "movq   (%0), %%mm0 \n\t"
195         "movq  8(%0), %%mm1 \n\t"
196         "movq 16(%0), %%mm2 \n\t"
197         "movq 24(%0), %%mm3 \n\t"
198         TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
199         "movq %%mm0,   (%0) \n\t"
200         "movq %%mm3,  8(%0) \n\t"
201         "movq %%mm4, 16(%0) \n\t"
202         "movq %%mm2, 24(%0) \n\t"
203         ::"r"(bS[0])
204         :"memory"
205     );
206 }
207
208 #define LF_FUNC(DIR, TYPE, OPT) \
209 void ff_x264_deblock_ ## DIR ## _ ## TYPE ## _ ## OPT (uint8_t *pix, int stride, \
210                                                int alpha, int beta, int8_t *tc0);
211 #define LF_IFUNC(DIR, TYPE, OPT) \
212 void ff_x264_deblock_ ## DIR ## _ ## TYPE ## _ ## OPT (uint8_t *pix, int stride, \
213                                                int alpha, int beta);
214
215 LF_FUNC (h,  chroma,       mmxext)
216 LF_IFUNC(h,  chroma_intra, mmxext)
217 LF_FUNC (v,  chroma,       mmxext)
218 LF_IFUNC(v,  chroma_intra, mmxext)
219
220 LF_FUNC (h,  luma,         mmxext)
221 LF_IFUNC(h,  luma_intra,   mmxext)
222 #if HAVE_YASM && ARCH_X86_32
223 LF_FUNC (v8, luma,         mmxext)
224 static void ff_x264_deblock_v_luma_mmxext(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
225 {
226     if((tc0[0] & tc0[1]) >= 0)
227         ff_x264_deblock_v8_luma_mmxext(pix+0, stride, alpha, beta, tc0);
228     if((tc0[2] & tc0[3]) >= 0)
229         ff_x264_deblock_v8_luma_mmxext(pix+8, stride, alpha, beta, tc0+2);
230 }
231 LF_IFUNC(v8, luma_intra,   mmxext)
232 static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
233 {
234     ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
235     ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
236 }
237 #endif
238
239 LF_FUNC (h,  luma,         sse2)
240 LF_IFUNC(h,  luma_intra,   sse2)
241 LF_FUNC (v,  luma,         sse2)
242 LF_IFUNC(v,  luma_intra,   sse2)
243
244 /***********************************/
245 /* weighted prediction */
246
247 #define H264_WEIGHT(W, H, OPT) \
248 void ff_h264_weight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
249     int stride, int log2_denom, int weight, int offset);
250
251 #define H264_BIWEIGHT(W, H, OPT) \
252 void ff_h264_biweight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
253     uint8_t *src, int stride, int log2_denom, int weightd, \
254     int weights, int offset);
255
256 #define H264_BIWEIGHT_MMX(W,H) \
257 H264_WEIGHT  (W, H, mmx2) \
258 H264_BIWEIGHT(W, H, mmx2)
259
260 #define H264_BIWEIGHT_MMX_SSE(W,H) \
261 H264_BIWEIGHT_MMX(W, H) \
262 H264_WEIGHT      (W, H, sse2) \
263 H264_BIWEIGHT    (W, H, sse2) \
264 H264_BIWEIGHT    (W, H, ssse3)
265
266 H264_BIWEIGHT_MMX_SSE(16, 16)
267 H264_BIWEIGHT_MMX_SSE(16,  8)
268 H264_BIWEIGHT_MMX_SSE( 8, 16)
269 H264_BIWEIGHT_MMX_SSE( 8,  8)
270 H264_BIWEIGHT_MMX_SSE( 8,  4)
271 H264_BIWEIGHT_MMX    ( 4,  8)
272 H264_BIWEIGHT_MMX    ( 4,  4)
273 H264_BIWEIGHT_MMX    ( 4,  2)
274
275 void ff_h264dsp_init_x86(H264DSPContext *c)
276 {
277     int mm_flags = av_get_cpu_flags();
278
279     if (mm_flags & AV_CPU_FLAG_MMX2) {
280         c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
281     }
282 #if HAVE_YASM
283     if (mm_flags & AV_CPU_FLAG_MMX) {
284         c->h264_idct_dc_add=
285         c->h264_idct_add= ff_h264_idct_add_mmx;
286         c->h264_idct8_dc_add=
287         c->h264_idct8_add= ff_h264_idct8_add_mmx;
288
289         c->h264_idct_add16     = ff_h264_idct_add16_mmx;
290         c->h264_idct8_add4     = ff_h264_idct8_add4_mmx;
291         c->h264_idct_add8      = ff_h264_idct_add8_mmx;
292         c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
293
294         if (mm_flags & AV_CPU_FLAG_MMX2) {
295             c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
296             c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
297             c->h264_idct_add16     = ff_h264_idct_add16_mmx2;
298             c->h264_idct8_add4     = ff_h264_idct8_add4_mmx2;
299             c->h264_idct_add8      = ff_h264_idct_add8_mmx2;
300             c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
301
302             c->h264_v_loop_filter_chroma= ff_x264_deblock_v_chroma_mmxext;
303             c->h264_h_loop_filter_chroma= ff_x264_deblock_h_chroma_mmxext;
304             c->h264_v_loop_filter_chroma_intra= ff_x264_deblock_v_chroma_intra_mmxext;
305             c->h264_h_loop_filter_chroma_intra= ff_x264_deblock_h_chroma_intra_mmxext;
306 #if ARCH_X86_32
307             c->h264_v_loop_filter_luma= ff_x264_deblock_v_luma_mmxext;
308             c->h264_h_loop_filter_luma= ff_x264_deblock_h_luma_mmxext;
309             c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
310             c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
311 #endif
312             c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
313             c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
314             c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
315             c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
316             c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
317             c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
318             c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
319             c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
320
321             c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
322             c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
323             c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
324             c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
325             c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
326             c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
327             c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
328             c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
329
330             if (mm_flags&AV_CPU_FLAG_SSE2) {
331                 c->h264_idct8_add = ff_h264_idct8_add_sse2;
332                 c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
333
334                 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_sse2;
335                 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_sse2;
336                 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_sse2;
337                 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_sse2;
338                 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_sse2;
339
340                 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_sse2;
341                 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_sse2;
342                 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_sse2;
343                 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_sse2;
344                 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_sse2;
345
346 #if HAVE_ALIGNED_STACK
347                 c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
348                 c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
349                 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
350                 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
351 #endif
352
353                 c->h264_idct_add16 = ff_h264_idct_add16_sse2;
354                 c->h264_idct_add8  = ff_h264_idct_add8_sse2;
355                 c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
356             }
357             if (mm_flags&AV_CPU_FLAG_SSSE3) {
358                 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3;
359                 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_ssse3;
360                 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_ssse3;
361                 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3;
362                 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_ssse3;
363             }
364         }
365     }
366 #endif
367 }