]> git.sesse.net Git - ffmpeg/blob - libavcodec/x86/mpegvideo.c
Merge commit 'c5fcdb440237f06f6c954185ab60970cabf786a2'
[ffmpeg] / libavcodec / x86 / mpegvideo.c
1 /*
2  * Optimized for ia32 CPUs by Nick Kurshev <nickols_k@mail.ru>
3  * h263, mpeg1, mpeg2 dequantizer & draw_edges by Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "libavutil/attributes.h"
23 #include "libavutil/cpu.h"
24 #include "libavutil/x86/asm.h"
25 #include "libavcodec/avcodec.h"
26 #include "libavcodec/dsputil.h"
27 #include "libavcodec/mpegvideo.h"
28 #include "dsputil_mmx.h"
29
30 #if HAVE_INLINE_ASM
31
32 static void dct_unquantize_h263_intra_mmx(MpegEncContext *s,
33                                   int16_t *block, int n, int qscale)
34 {
35     x86_reg level, qmul, qadd, nCoeffs;
36
37     qmul = qscale << 1;
38
39     av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
40
41     if (!s->h263_aic) {
42         if (n < 4)
43             level = block[0] * s->y_dc_scale;
44         else
45             level = block[0] * s->c_dc_scale;
46         qadd = (qscale - 1) | 1;
47     }else{
48         qadd = 0;
49         level= block[0];
50     }
51     if(s->ac_pred)
52         nCoeffs=63;
53     else
54         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
55
56 __asm__ volatile(
57                 "movd %1, %%mm6                 \n\t" //qmul
58                 "packssdw %%mm6, %%mm6          \n\t"
59                 "packssdw %%mm6, %%mm6          \n\t"
60                 "movd %2, %%mm5                 \n\t" //qadd
61                 "pxor %%mm7, %%mm7              \n\t"
62                 "packssdw %%mm5, %%mm5          \n\t"
63                 "packssdw %%mm5, %%mm5          \n\t"
64                 "psubw %%mm5, %%mm7             \n\t"
65                 "pxor %%mm4, %%mm4              \n\t"
66                 ".p2align 4                     \n\t"
67                 "1:                             \n\t"
68                 "movq (%0, %3), %%mm0           \n\t"
69                 "movq 8(%0, %3), %%mm1          \n\t"
70
71                 "pmullw %%mm6, %%mm0            \n\t"
72                 "pmullw %%mm6, %%mm1            \n\t"
73
74                 "movq (%0, %3), %%mm2           \n\t"
75                 "movq 8(%0, %3), %%mm3          \n\t"
76
77                 "pcmpgtw %%mm4, %%mm2           \n\t" // block[i] < 0 ? -1 : 0
78                 "pcmpgtw %%mm4, %%mm3           \n\t" // block[i] < 0 ? -1 : 0
79
80                 "pxor %%mm2, %%mm0              \n\t"
81                 "pxor %%mm3, %%mm1              \n\t"
82
83                 "paddw %%mm7, %%mm0             \n\t"
84                 "paddw %%mm7, %%mm1             \n\t"
85
86                 "pxor %%mm0, %%mm2              \n\t"
87                 "pxor %%mm1, %%mm3              \n\t"
88
89                 "pcmpeqw %%mm7, %%mm0           \n\t" // block[i] == 0 ? -1 : 0
90                 "pcmpeqw %%mm7, %%mm1           \n\t" // block[i] == 0 ? -1 : 0
91
92                 "pandn %%mm2, %%mm0             \n\t"
93                 "pandn %%mm3, %%mm1             \n\t"
94
95                 "movq %%mm0, (%0, %3)           \n\t"
96                 "movq %%mm1, 8(%0, %3)          \n\t"
97
98                 "add $16, %3                    \n\t"
99                 "jng 1b                         \n\t"
100                 ::"r" (block+nCoeffs), "rm"(qmul), "rm" (qadd), "r" (2*(-nCoeffs))
101                 : "memory"
102         );
103         block[0]= level;
104 }
105
106
107 static void dct_unquantize_h263_inter_mmx(MpegEncContext *s,
108                                   int16_t *block, int n, int qscale)
109 {
110     x86_reg qmul, qadd, nCoeffs;
111
112     qmul = qscale << 1;
113     qadd = (qscale - 1) | 1;
114
115     assert(s->block_last_index[n]>=0 || s->h263_aic);
116
117     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
118
119 __asm__ volatile(
120                 "movd %1, %%mm6                 \n\t" //qmul
121                 "packssdw %%mm6, %%mm6          \n\t"
122                 "packssdw %%mm6, %%mm6          \n\t"
123                 "movd %2, %%mm5                 \n\t" //qadd
124                 "pxor %%mm7, %%mm7              \n\t"
125                 "packssdw %%mm5, %%mm5          \n\t"
126                 "packssdw %%mm5, %%mm5          \n\t"
127                 "psubw %%mm5, %%mm7             \n\t"
128                 "pxor %%mm4, %%mm4              \n\t"
129                 ".p2align 4                     \n\t"
130                 "1:                             \n\t"
131                 "movq (%0, %3), %%mm0           \n\t"
132                 "movq 8(%0, %3), %%mm1          \n\t"
133
134                 "pmullw %%mm6, %%mm0            \n\t"
135                 "pmullw %%mm6, %%mm1            \n\t"
136
137                 "movq (%0, %3), %%mm2           \n\t"
138                 "movq 8(%0, %3), %%mm3          \n\t"
139
140                 "pcmpgtw %%mm4, %%mm2           \n\t" // block[i] < 0 ? -1 : 0
141                 "pcmpgtw %%mm4, %%mm3           \n\t" // block[i] < 0 ? -1 : 0
142
143                 "pxor %%mm2, %%mm0              \n\t"
144                 "pxor %%mm3, %%mm1              \n\t"
145
146                 "paddw %%mm7, %%mm0             \n\t"
147                 "paddw %%mm7, %%mm1             \n\t"
148
149                 "pxor %%mm0, %%mm2              \n\t"
150                 "pxor %%mm1, %%mm3              \n\t"
151
152                 "pcmpeqw %%mm7, %%mm0           \n\t" // block[i] == 0 ? -1 : 0
153                 "pcmpeqw %%mm7, %%mm1           \n\t" // block[i] == 0 ? -1 : 0
154
155                 "pandn %%mm2, %%mm0             \n\t"
156                 "pandn %%mm3, %%mm1             \n\t"
157
158                 "movq %%mm0, (%0, %3)           \n\t"
159                 "movq %%mm1, 8(%0, %3)          \n\t"
160
161                 "add $16, %3                    \n\t"
162                 "jng 1b                         \n\t"
163                 ::"r" (block+nCoeffs), "rm"(qmul), "rm" (qadd), "r" (2*(-nCoeffs))
164                 : "memory"
165         );
166 }
167
168
169 /*
170   We can suppose that result of two multiplications can't be greater than 0xFFFF
171   i.e. is 16-bit, so we use here only PMULLW instruction and can avoid
172   a complex multiplication.
173 =====================================================
174  Full formula for multiplication of 2 integer numbers
175  which are represent as high:low words:
176  input: value1 = high1:low1
177         value2 = high2:low2
178  output: value3 = value1*value2
179  value3=high3:low3 (on overflow: modulus 2^32 wrap-around)
180  this mean that for 0x123456 * 0x123456 correct result is 0x766cb0ce4
181  but this algorithm will compute only 0x66cb0ce4
182  this limited by 16-bit size of operands
183  ---------------------------------
184  tlow1 = high1*low2
185  tlow2 = high2*low1
186  tlow1 = tlow1 + tlow2
187  high3:low3 = low1*low2
188  high3 += tlow1
189 */
190 static void dct_unquantize_mpeg1_intra_mmx(MpegEncContext *s,
191                                      int16_t *block, int n, int qscale)
192 {
193     x86_reg nCoeffs;
194     const uint16_t *quant_matrix;
195     int block0;
196
197     av_assert2(s->block_last_index[n]>=0);
198
199     nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1;
200
201     if (n < 4)
202         block0 = block[0] * s->y_dc_scale;
203     else
204         block0 = block[0] * s->c_dc_scale;
205     /* XXX: only mpeg1 */
206     quant_matrix = s->intra_matrix;
207 __asm__ volatile(
208                 "pcmpeqw %%mm7, %%mm7           \n\t"
209                 "psrlw $15, %%mm7               \n\t"
210                 "movd %2, %%mm6                 \n\t"
211                 "packssdw %%mm6, %%mm6          \n\t"
212                 "packssdw %%mm6, %%mm6          \n\t"
213                 "mov %3, %%"REG_a"              \n\t"
214                 ".p2align 4                     \n\t"
215                 "1:                             \n\t"
216                 "movq (%0, %%"REG_a"), %%mm0    \n\t"
217                 "movq 8(%0, %%"REG_a"), %%mm1   \n\t"
218                 "movq (%1, %%"REG_a"), %%mm4    \n\t"
219                 "movq 8(%1, %%"REG_a"), %%mm5   \n\t"
220                 "pmullw %%mm6, %%mm4            \n\t" // q=qscale*quant_matrix[i]
221                 "pmullw %%mm6, %%mm5            \n\t" // q=qscale*quant_matrix[i]
222                 "pxor %%mm2, %%mm2              \n\t"
223                 "pxor %%mm3, %%mm3              \n\t"
224                 "pcmpgtw %%mm0, %%mm2           \n\t" // block[i] < 0 ? -1 : 0
225                 "pcmpgtw %%mm1, %%mm3           \n\t" // block[i] < 0 ? -1 : 0
226                 "pxor %%mm2, %%mm0              \n\t"
227                 "pxor %%mm3, %%mm1              \n\t"
228                 "psubw %%mm2, %%mm0             \n\t" // abs(block[i])
229                 "psubw %%mm3, %%mm1             \n\t" // abs(block[i])
230                 "pmullw %%mm4, %%mm0            \n\t" // abs(block[i])*q
231                 "pmullw %%mm5, %%mm1            \n\t" // abs(block[i])*q
232                 "pxor %%mm4, %%mm4              \n\t"
233                 "pxor %%mm5, %%mm5              \n\t" // FIXME slow
234                 "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
235                 "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
236                 "psraw $3, %%mm0                \n\t"
237                 "psraw $3, %%mm1                \n\t"
238                 "psubw %%mm7, %%mm0             \n\t"
239                 "psubw %%mm7, %%mm1             \n\t"
240                 "por %%mm7, %%mm0               \n\t"
241                 "por %%mm7, %%mm1               \n\t"
242                 "pxor %%mm2, %%mm0              \n\t"
243                 "pxor %%mm3, %%mm1              \n\t"
244                 "psubw %%mm2, %%mm0             \n\t"
245                 "psubw %%mm3, %%mm1             \n\t"
246                 "pandn %%mm0, %%mm4             \n\t"
247                 "pandn %%mm1, %%mm5             \n\t"
248                 "movq %%mm4, (%0, %%"REG_a")    \n\t"
249                 "movq %%mm5, 8(%0, %%"REG_a")   \n\t"
250
251                 "add $16, %%"REG_a"             \n\t"
252                 "js 1b                          \n\t"
253                 ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
254                 : "%"REG_a, "memory"
255         );
256     block[0]= block0;
257 }
258
259 static void dct_unquantize_mpeg1_inter_mmx(MpegEncContext *s,
260                                      int16_t *block, int n, int qscale)
261 {
262     x86_reg nCoeffs;
263     const uint16_t *quant_matrix;
264
265     av_assert2(s->block_last_index[n]>=0);
266
267     nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1;
268
269         quant_matrix = s->inter_matrix;
270 __asm__ volatile(
271                 "pcmpeqw %%mm7, %%mm7           \n\t"
272                 "psrlw $15, %%mm7               \n\t"
273                 "movd %2, %%mm6                 \n\t"
274                 "packssdw %%mm6, %%mm6          \n\t"
275                 "packssdw %%mm6, %%mm6          \n\t"
276                 "mov %3, %%"REG_a"              \n\t"
277                 ".p2align 4                     \n\t"
278                 "1:                             \n\t"
279                 "movq (%0, %%"REG_a"), %%mm0    \n\t"
280                 "movq 8(%0, %%"REG_a"), %%mm1   \n\t"
281                 "movq (%1, %%"REG_a"), %%mm4    \n\t"
282                 "movq 8(%1, %%"REG_a"), %%mm5   \n\t"
283                 "pmullw %%mm6, %%mm4            \n\t" // q=qscale*quant_matrix[i]
284                 "pmullw %%mm6, %%mm5            \n\t" // q=qscale*quant_matrix[i]
285                 "pxor %%mm2, %%mm2              \n\t"
286                 "pxor %%mm3, %%mm3              \n\t"
287                 "pcmpgtw %%mm0, %%mm2           \n\t" // block[i] < 0 ? -1 : 0
288                 "pcmpgtw %%mm1, %%mm3           \n\t" // block[i] < 0 ? -1 : 0
289                 "pxor %%mm2, %%mm0              \n\t"
290                 "pxor %%mm3, %%mm1              \n\t"
291                 "psubw %%mm2, %%mm0             \n\t" // abs(block[i])
292                 "psubw %%mm3, %%mm1             \n\t" // abs(block[i])
293                 "paddw %%mm0, %%mm0             \n\t" // abs(block[i])*2
294                 "paddw %%mm1, %%mm1             \n\t" // abs(block[i])*2
295                 "paddw %%mm7, %%mm0             \n\t" // abs(block[i])*2 + 1
296                 "paddw %%mm7, %%mm1             \n\t" // abs(block[i])*2 + 1
297                 "pmullw %%mm4, %%mm0            \n\t" // (abs(block[i])*2 + 1)*q
298                 "pmullw %%mm5, %%mm1            \n\t" // (abs(block[i])*2 + 1)*q
299                 "pxor %%mm4, %%mm4              \n\t"
300                 "pxor %%mm5, %%mm5              \n\t" // FIXME slow
301                 "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
302                 "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
303                 "psraw $4, %%mm0                \n\t"
304                 "psraw $4, %%mm1                \n\t"
305                 "psubw %%mm7, %%mm0             \n\t"
306                 "psubw %%mm7, %%mm1             \n\t"
307                 "por %%mm7, %%mm0               \n\t"
308                 "por %%mm7, %%mm1               \n\t"
309                 "pxor %%mm2, %%mm0              \n\t"
310                 "pxor %%mm3, %%mm1              \n\t"
311                 "psubw %%mm2, %%mm0             \n\t"
312                 "psubw %%mm3, %%mm1             \n\t"
313                 "pandn %%mm0, %%mm4             \n\t"
314                 "pandn %%mm1, %%mm5             \n\t"
315                 "movq %%mm4, (%0, %%"REG_a")    \n\t"
316                 "movq %%mm5, 8(%0, %%"REG_a")   \n\t"
317
318                 "add $16, %%"REG_a"             \n\t"
319                 "js 1b                          \n\t"
320                 ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
321                 : "%"REG_a, "memory"
322         );
323 }
324
325 static void dct_unquantize_mpeg2_intra_mmx(MpegEncContext *s,
326                                      int16_t *block, int n, int qscale)
327 {
328     x86_reg nCoeffs;
329     const uint16_t *quant_matrix;
330     int block0;
331
332     av_assert2(s->block_last_index[n]>=0);
333
334     if(s->alternate_scan) nCoeffs= 63; //FIXME
335     else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
336
337     if (n < 4)
338         block0 = block[0] * s->y_dc_scale;
339     else
340         block0 = block[0] * s->c_dc_scale;
341     quant_matrix = s->intra_matrix;
342 __asm__ volatile(
343                 "pcmpeqw %%mm7, %%mm7           \n\t"
344                 "psrlw $15, %%mm7               \n\t"
345                 "movd %2, %%mm6                 \n\t"
346                 "packssdw %%mm6, %%mm6          \n\t"
347                 "packssdw %%mm6, %%mm6          \n\t"
348                 "mov %3, %%"REG_a"              \n\t"
349                 ".p2align 4                     \n\t"
350                 "1:                             \n\t"
351                 "movq (%0, %%"REG_a"), %%mm0    \n\t"
352                 "movq 8(%0, %%"REG_a"), %%mm1   \n\t"
353                 "movq (%1, %%"REG_a"), %%mm4    \n\t"
354                 "movq 8(%1, %%"REG_a"), %%mm5   \n\t"
355                 "pmullw %%mm6, %%mm4            \n\t" // q=qscale*quant_matrix[i]
356                 "pmullw %%mm6, %%mm5            \n\t" // q=qscale*quant_matrix[i]
357                 "pxor %%mm2, %%mm2              \n\t"
358                 "pxor %%mm3, %%mm3              \n\t"
359                 "pcmpgtw %%mm0, %%mm2           \n\t" // block[i] < 0 ? -1 : 0
360                 "pcmpgtw %%mm1, %%mm3           \n\t" // block[i] < 0 ? -1 : 0
361                 "pxor %%mm2, %%mm0              \n\t"
362                 "pxor %%mm3, %%mm1              \n\t"
363                 "psubw %%mm2, %%mm0             \n\t" // abs(block[i])
364                 "psubw %%mm3, %%mm1             \n\t" // abs(block[i])
365                 "pmullw %%mm4, %%mm0            \n\t" // abs(block[i])*q
366                 "pmullw %%mm5, %%mm1            \n\t" // abs(block[i])*q
367                 "pxor %%mm4, %%mm4              \n\t"
368                 "pxor %%mm5, %%mm5              \n\t" // FIXME slow
369                 "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
370                 "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
371                 "psraw $3, %%mm0                \n\t"
372                 "psraw $3, %%mm1                \n\t"
373                 "pxor %%mm2, %%mm0              \n\t"
374                 "pxor %%mm3, %%mm1              \n\t"
375                 "psubw %%mm2, %%mm0             \n\t"
376                 "psubw %%mm3, %%mm1             \n\t"
377                 "pandn %%mm0, %%mm4             \n\t"
378                 "pandn %%mm1, %%mm5             \n\t"
379                 "movq %%mm4, (%0, %%"REG_a")    \n\t"
380                 "movq %%mm5, 8(%0, %%"REG_a")   \n\t"
381
382                 "add $16, %%"REG_a"             \n\t"
383                 "jng 1b                         \n\t"
384                 ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
385                 : "%"REG_a, "memory"
386         );
387     block[0]= block0;
388         //Note, we do not do mismatch control for intra as errors cannot accumulate
389 }
390
391 static void dct_unquantize_mpeg2_inter_mmx(MpegEncContext *s,
392                                      int16_t *block, int n, int qscale)
393 {
394     x86_reg nCoeffs;
395     const uint16_t *quant_matrix;
396
397     av_assert2(s->block_last_index[n]>=0);
398
399     if(s->alternate_scan) nCoeffs= 63; //FIXME
400     else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
401
402         quant_matrix = s->inter_matrix;
403 __asm__ volatile(
404                 "pcmpeqw %%mm7, %%mm7           \n\t"
405                 "psrlq $48, %%mm7               \n\t"
406                 "movd %2, %%mm6                 \n\t"
407                 "packssdw %%mm6, %%mm6          \n\t"
408                 "packssdw %%mm6, %%mm6          \n\t"
409                 "mov %3, %%"REG_a"              \n\t"
410                 ".p2align 4                     \n\t"
411                 "1:                             \n\t"
412                 "movq (%0, %%"REG_a"), %%mm0    \n\t"
413                 "movq 8(%0, %%"REG_a"), %%mm1   \n\t"
414                 "movq (%1, %%"REG_a"), %%mm4    \n\t"
415                 "movq 8(%1, %%"REG_a"), %%mm5   \n\t"
416                 "pmullw %%mm6, %%mm4            \n\t" // q=qscale*quant_matrix[i]
417                 "pmullw %%mm6, %%mm5            \n\t" // q=qscale*quant_matrix[i]
418                 "pxor %%mm2, %%mm2              \n\t"
419                 "pxor %%mm3, %%mm3              \n\t"
420                 "pcmpgtw %%mm0, %%mm2           \n\t" // block[i] < 0 ? -1 : 0
421                 "pcmpgtw %%mm1, %%mm3           \n\t" // block[i] < 0 ? -1 : 0
422                 "pxor %%mm2, %%mm0              \n\t"
423                 "pxor %%mm3, %%mm1              \n\t"
424                 "psubw %%mm2, %%mm0             \n\t" // abs(block[i])
425                 "psubw %%mm3, %%mm1             \n\t" // abs(block[i])
426                 "paddw %%mm0, %%mm0             \n\t" // abs(block[i])*2
427                 "paddw %%mm1, %%mm1             \n\t" // abs(block[i])*2
428                 "pmullw %%mm4, %%mm0            \n\t" // abs(block[i])*2*q
429                 "pmullw %%mm5, %%mm1            \n\t" // abs(block[i])*2*q
430                 "paddw %%mm4, %%mm0             \n\t" // (abs(block[i])*2 + 1)*q
431                 "paddw %%mm5, %%mm1             \n\t" // (abs(block[i])*2 + 1)*q
432                 "pxor %%mm4, %%mm4              \n\t"
433                 "pxor %%mm5, %%mm5              \n\t" // FIXME slow
434                 "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
435                 "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
436                 "psrlw $4, %%mm0                \n\t"
437                 "psrlw $4, %%mm1                \n\t"
438                 "pxor %%mm2, %%mm0              \n\t"
439                 "pxor %%mm3, %%mm1              \n\t"
440                 "psubw %%mm2, %%mm0             \n\t"
441                 "psubw %%mm3, %%mm1             \n\t"
442                 "pandn %%mm0, %%mm4             \n\t"
443                 "pandn %%mm1, %%mm5             \n\t"
444                 "pxor %%mm4, %%mm7              \n\t"
445                 "pxor %%mm5, %%mm7              \n\t"
446                 "movq %%mm4, (%0, %%"REG_a")    \n\t"
447                 "movq %%mm5, 8(%0, %%"REG_a")   \n\t"
448
449                 "add $16, %%"REG_a"             \n\t"
450                 "jng 1b                         \n\t"
451                 "movd 124(%0, %3), %%mm0        \n\t"
452                 "movq %%mm7, %%mm6              \n\t"
453                 "psrlq $32, %%mm7               \n\t"
454                 "pxor %%mm6, %%mm7              \n\t"
455                 "movq %%mm7, %%mm6              \n\t"
456                 "psrlq $16, %%mm7               \n\t"
457                 "pxor %%mm6, %%mm7              \n\t"
458                 "pslld $31, %%mm7               \n\t"
459                 "psrlq $15, %%mm7               \n\t"
460                 "pxor %%mm7, %%mm0              \n\t"
461                 "movd %%mm0, 124(%0, %3)        \n\t"
462
463                 ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "r" (-2*nCoeffs)
464                 : "%"REG_a, "memory"
465         );
466 }
467
468 static void  denoise_dct_mmx(MpegEncContext *s, int16_t *block){
469     const int intra= s->mb_intra;
470     int *sum= s->dct_error_sum[intra];
471     uint16_t *offset= s->dct_offset[intra];
472
473     s->dct_count[intra]++;
474
475     __asm__ volatile(
476         "pxor %%mm7, %%mm7                      \n\t"
477         "1:                                     \n\t"
478         "pxor %%mm0, %%mm0                      \n\t"
479         "pxor %%mm1, %%mm1                      \n\t"
480         "movq (%0), %%mm2                       \n\t"
481         "movq 8(%0), %%mm3                      \n\t"
482         "pcmpgtw %%mm2, %%mm0                   \n\t"
483         "pcmpgtw %%mm3, %%mm1                   \n\t"
484         "pxor %%mm0, %%mm2                      \n\t"
485         "pxor %%mm1, %%mm3                      \n\t"
486         "psubw %%mm0, %%mm2                     \n\t"
487         "psubw %%mm1, %%mm3                     \n\t"
488         "movq %%mm2, %%mm4                      \n\t"
489         "movq %%mm3, %%mm5                      \n\t"
490         "psubusw (%2), %%mm2                    \n\t"
491         "psubusw 8(%2), %%mm3                   \n\t"
492         "pxor %%mm0, %%mm2                      \n\t"
493         "pxor %%mm1, %%mm3                      \n\t"
494         "psubw %%mm0, %%mm2                     \n\t"
495         "psubw %%mm1, %%mm3                     \n\t"
496         "movq %%mm2, (%0)                       \n\t"
497         "movq %%mm3, 8(%0)                      \n\t"
498         "movq %%mm4, %%mm2                      \n\t"
499         "movq %%mm5, %%mm3                      \n\t"
500         "punpcklwd %%mm7, %%mm4                 \n\t"
501         "punpckhwd %%mm7, %%mm2                 \n\t"
502         "punpcklwd %%mm7, %%mm5                 \n\t"
503         "punpckhwd %%mm7, %%mm3                 \n\t"
504         "paddd (%1), %%mm4                      \n\t"
505         "paddd 8(%1), %%mm2                     \n\t"
506         "paddd 16(%1), %%mm5                    \n\t"
507         "paddd 24(%1), %%mm3                    \n\t"
508         "movq %%mm4, (%1)                       \n\t"
509         "movq %%mm2, 8(%1)                      \n\t"
510         "movq %%mm5, 16(%1)                     \n\t"
511         "movq %%mm3, 24(%1)                     \n\t"
512         "add $16, %0                            \n\t"
513         "add $32, %1                            \n\t"
514         "add $16, %2                            \n\t"
515         "cmp %3, %0                             \n\t"
516             " jb 1b                             \n\t"
517         : "+r" (block), "+r" (sum), "+r" (offset)
518         : "r"(block+64)
519     );
520 }
521
522 static void  denoise_dct_sse2(MpegEncContext *s, int16_t *block){
523     const int intra= s->mb_intra;
524     int *sum= s->dct_error_sum[intra];
525     uint16_t *offset= s->dct_offset[intra];
526
527     s->dct_count[intra]++;
528
529     __asm__ volatile(
530         "pxor %%xmm7, %%xmm7                    \n\t"
531         "1:                                     \n\t"
532         "pxor %%xmm0, %%xmm0                    \n\t"
533         "pxor %%xmm1, %%xmm1                    \n\t"
534         "movdqa (%0), %%xmm2                    \n\t"
535         "movdqa 16(%0), %%xmm3                  \n\t"
536         "pcmpgtw %%xmm2, %%xmm0                 \n\t"
537         "pcmpgtw %%xmm3, %%xmm1                 \n\t"
538         "pxor %%xmm0, %%xmm2                    \n\t"
539         "pxor %%xmm1, %%xmm3                    \n\t"
540         "psubw %%xmm0, %%xmm2                   \n\t"
541         "psubw %%xmm1, %%xmm3                   \n\t"
542         "movdqa %%xmm2, %%xmm4                  \n\t"
543         "movdqa %%xmm3, %%xmm5                  \n\t"
544         "psubusw (%2), %%xmm2                   \n\t"
545         "psubusw 16(%2), %%xmm3                 \n\t"
546         "pxor %%xmm0, %%xmm2                    \n\t"
547         "pxor %%xmm1, %%xmm3                    \n\t"
548         "psubw %%xmm0, %%xmm2                   \n\t"
549         "psubw %%xmm1, %%xmm3                   \n\t"
550         "movdqa %%xmm2, (%0)                    \n\t"
551         "movdqa %%xmm3, 16(%0)                  \n\t"
552         "movdqa %%xmm4, %%xmm6                  \n\t"
553         "movdqa %%xmm5, %%xmm0                  \n\t"
554         "punpcklwd %%xmm7, %%xmm4               \n\t"
555         "punpckhwd %%xmm7, %%xmm6               \n\t"
556         "punpcklwd %%xmm7, %%xmm5               \n\t"
557         "punpckhwd %%xmm7, %%xmm0               \n\t"
558         "paddd (%1), %%xmm4                     \n\t"
559         "paddd 16(%1), %%xmm6                   \n\t"
560         "paddd 32(%1), %%xmm5                   \n\t"
561         "paddd 48(%1), %%xmm0                   \n\t"
562         "movdqa %%xmm4, (%1)                    \n\t"
563         "movdqa %%xmm6, 16(%1)                  \n\t"
564         "movdqa %%xmm5, 32(%1)                  \n\t"
565         "movdqa %%xmm0, 48(%1)                  \n\t"
566         "add $32, %0                            \n\t"
567         "add $64, %1                            \n\t"
568         "add $32, %2                            \n\t"
569         "cmp %3, %0                             \n\t"
570             " jb 1b                             \n\t"
571         : "+r" (block), "+r" (sum), "+r" (offset)
572         : "r"(block+64)
573           XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
574                             "%xmm4", "%xmm5", "%xmm6", "%xmm7")
575     );
576 }
577
578 #endif /* HAVE_INLINE_ASM */
579
580 av_cold void ff_MPV_common_init_x86(MpegEncContext *s)
581 {
582 #if HAVE_INLINE_ASM
583     int mm_flags = av_get_cpu_flags();
584
585     if (mm_flags & AV_CPU_FLAG_MMX) {
586         s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_mmx;
587         s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_mmx;
588         s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_mmx;
589         s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_mmx;
590         if(!(s->flags & CODEC_FLAG_BITEXACT))
591             s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx;
592         s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_mmx;
593
594         if (mm_flags & AV_CPU_FLAG_SSE2) {
595             s->denoise_dct= denoise_dct_sse2;
596         } else {
597                 s->denoise_dct= denoise_dct_mmx;
598         }
599     }
600 #endif /* HAVE_INLINE_ASM */
601 }