]> git.sesse.net Git - ffmpeg/blob - libavcodec/x86/mpegvideo.c
Merge commit '88bd7fdc821aaa0cbcf44cf075c62aaa42121e3f'
[ffmpeg] / libavcodec / x86 / mpegvideo.c
1 /*
2  * Optimized for ia32 CPUs by Nick Kurshev <nickols_k@mail.ru>
3  * h263, mpeg1, mpeg2 dequantizer & draw_edges by Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "libavutil/cpu.h"
23 #include "libavutil/x86/asm.h"
24 #include "libavcodec/avcodec.h"
25 #include "libavcodec/dsputil.h"
26 #include "libavcodec/mpegvideo.h"
27 #include "dsputil_mmx.h"
28
29 #if HAVE_INLINE_ASM
30
31 static void dct_unquantize_h263_intra_mmx(MpegEncContext *s,
32                                   int16_t *block, int n, int qscale)
33 {
34     x86_reg level, qmul, qadd, nCoeffs;
35
36     qmul = qscale << 1;
37
38     av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
39
40     if (!s->h263_aic) {
41         if (n < 4)
42             level = block[0] * s->y_dc_scale;
43         else
44             level = block[0] * s->c_dc_scale;
45         qadd = (qscale - 1) | 1;
46     }else{
47         qadd = 0;
48         level= block[0];
49     }
50     if(s->ac_pred)
51         nCoeffs=63;
52     else
53         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
54
55 __asm__ volatile(
56                 "movd %1, %%mm6                 \n\t" //qmul
57                 "packssdw %%mm6, %%mm6          \n\t"
58                 "packssdw %%mm6, %%mm6          \n\t"
59                 "movd %2, %%mm5                 \n\t" //qadd
60                 "pxor %%mm7, %%mm7              \n\t"
61                 "packssdw %%mm5, %%mm5          \n\t"
62                 "packssdw %%mm5, %%mm5          \n\t"
63                 "psubw %%mm5, %%mm7             \n\t"
64                 "pxor %%mm4, %%mm4              \n\t"
65                 ".p2align 4                     \n\t"
66                 "1:                             \n\t"
67                 "movq (%0, %3), %%mm0           \n\t"
68                 "movq 8(%0, %3), %%mm1          \n\t"
69
70                 "pmullw %%mm6, %%mm0            \n\t"
71                 "pmullw %%mm6, %%mm1            \n\t"
72
73                 "movq (%0, %3), %%mm2           \n\t"
74                 "movq 8(%0, %3), %%mm3          \n\t"
75
76                 "pcmpgtw %%mm4, %%mm2           \n\t" // block[i] < 0 ? -1 : 0
77                 "pcmpgtw %%mm4, %%mm3           \n\t" // block[i] < 0 ? -1 : 0
78
79                 "pxor %%mm2, %%mm0              \n\t"
80                 "pxor %%mm3, %%mm1              \n\t"
81
82                 "paddw %%mm7, %%mm0             \n\t"
83                 "paddw %%mm7, %%mm1             \n\t"
84
85                 "pxor %%mm0, %%mm2              \n\t"
86                 "pxor %%mm1, %%mm3              \n\t"
87
88                 "pcmpeqw %%mm7, %%mm0           \n\t" // block[i] == 0 ? -1 : 0
89                 "pcmpeqw %%mm7, %%mm1           \n\t" // block[i] == 0 ? -1 : 0
90
91                 "pandn %%mm2, %%mm0             \n\t"
92                 "pandn %%mm3, %%mm1             \n\t"
93
94                 "movq %%mm0, (%0, %3)           \n\t"
95                 "movq %%mm1, 8(%0, %3)          \n\t"
96
97                 "add $16, %3                    \n\t"
98                 "jng 1b                         \n\t"
99                 ::"r" (block+nCoeffs), "rm"(qmul), "rm" (qadd), "r" (2*(-nCoeffs))
100                 : "memory"
101         );
102         block[0]= level;
103 }
104
105
106 static void dct_unquantize_h263_inter_mmx(MpegEncContext *s,
107                                   int16_t *block, int n, int qscale)
108 {
109     x86_reg qmul, qadd, nCoeffs;
110
111     qmul = qscale << 1;
112     qadd = (qscale - 1) | 1;
113
114     assert(s->block_last_index[n]>=0 || s->h263_aic);
115
116     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
117
118 __asm__ volatile(
119                 "movd %1, %%mm6                 \n\t" //qmul
120                 "packssdw %%mm6, %%mm6          \n\t"
121                 "packssdw %%mm6, %%mm6          \n\t"
122                 "movd %2, %%mm5                 \n\t" //qadd
123                 "pxor %%mm7, %%mm7              \n\t"
124                 "packssdw %%mm5, %%mm5          \n\t"
125                 "packssdw %%mm5, %%mm5          \n\t"
126                 "psubw %%mm5, %%mm7             \n\t"
127                 "pxor %%mm4, %%mm4              \n\t"
128                 ".p2align 4                     \n\t"
129                 "1:                             \n\t"
130                 "movq (%0, %3), %%mm0           \n\t"
131                 "movq 8(%0, %3), %%mm1          \n\t"
132
133                 "pmullw %%mm6, %%mm0            \n\t"
134                 "pmullw %%mm6, %%mm1            \n\t"
135
136                 "movq (%0, %3), %%mm2           \n\t"
137                 "movq 8(%0, %3), %%mm3          \n\t"
138
139                 "pcmpgtw %%mm4, %%mm2           \n\t" // block[i] < 0 ? -1 : 0
140                 "pcmpgtw %%mm4, %%mm3           \n\t" // block[i] < 0 ? -1 : 0
141
142                 "pxor %%mm2, %%mm0              \n\t"
143                 "pxor %%mm3, %%mm1              \n\t"
144
145                 "paddw %%mm7, %%mm0             \n\t"
146                 "paddw %%mm7, %%mm1             \n\t"
147
148                 "pxor %%mm0, %%mm2              \n\t"
149                 "pxor %%mm1, %%mm3              \n\t"
150
151                 "pcmpeqw %%mm7, %%mm0           \n\t" // block[i] == 0 ? -1 : 0
152                 "pcmpeqw %%mm7, %%mm1           \n\t" // block[i] == 0 ? -1 : 0
153
154                 "pandn %%mm2, %%mm0             \n\t"
155                 "pandn %%mm3, %%mm1             \n\t"
156
157                 "movq %%mm0, (%0, %3)           \n\t"
158                 "movq %%mm1, 8(%0, %3)          \n\t"
159
160                 "add $16, %3                    \n\t"
161                 "jng 1b                         \n\t"
162                 ::"r" (block+nCoeffs), "rm"(qmul), "rm" (qadd), "r" (2*(-nCoeffs))
163                 : "memory"
164         );
165 }
166
167
168 /*
169   We can suppose that result of two multiplications can't be greater than 0xFFFF
170   i.e. is 16-bit, so we use here only PMULLW instruction and can avoid
171   a complex multiplication.
172 =====================================================
173  Full formula for multiplication of 2 integer numbers
174  which are represent as high:low words:
175  input: value1 = high1:low1
176         value2 = high2:low2
177  output: value3 = value1*value2
178  value3=high3:low3 (on overflow: modulus 2^32 wrap-around)
179  this mean that for 0x123456 * 0x123456 correct result is 0x766cb0ce4
180  but this algorithm will compute only 0x66cb0ce4
181  this limited by 16-bit size of operands
182  ---------------------------------
183  tlow1 = high1*low2
184  tlow2 = high2*low1
185  tlow1 = tlow1 + tlow2
186  high3:low3 = low1*low2
187  high3 += tlow1
188 */
189 static void dct_unquantize_mpeg1_intra_mmx(MpegEncContext *s,
190                                      int16_t *block, int n, int qscale)
191 {
192     x86_reg nCoeffs;
193     const uint16_t *quant_matrix;
194     int block0;
195
196     av_assert2(s->block_last_index[n]>=0);
197
198     nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1;
199
200     if (n < 4)
201         block0 = block[0] * s->y_dc_scale;
202     else
203         block0 = block[0] * s->c_dc_scale;
204     /* XXX: only mpeg1 */
205     quant_matrix = s->intra_matrix;
206 __asm__ volatile(
207                 "pcmpeqw %%mm7, %%mm7           \n\t"
208                 "psrlw $15, %%mm7               \n\t"
209                 "movd %2, %%mm6                 \n\t"
210                 "packssdw %%mm6, %%mm6          \n\t"
211                 "packssdw %%mm6, %%mm6          \n\t"
212                 "mov %3, %%"REG_a"              \n\t"
213                 ".p2align 4                     \n\t"
214                 "1:                             \n\t"
215                 "movq (%0, %%"REG_a"), %%mm0    \n\t"
216                 "movq 8(%0, %%"REG_a"), %%mm1   \n\t"
217                 "movq (%1, %%"REG_a"), %%mm4    \n\t"
218                 "movq 8(%1, %%"REG_a"), %%mm5   \n\t"
219                 "pmullw %%mm6, %%mm4            \n\t" // q=qscale*quant_matrix[i]
220                 "pmullw %%mm6, %%mm5            \n\t" // q=qscale*quant_matrix[i]
221                 "pxor %%mm2, %%mm2              \n\t"
222                 "pxor %%mm3, %%mm3              \n\t"
223                 "pcmpgtw %%mm0, %%mm2           \n\t" // block[i] < 0 ? -1 : 0
224                 "pcmpgtw %%mm1, %%mm3           \n\t" // block[i] < 0 ? -1 : 0
225                 "pxor %%mm2, %%mm0              \n\t"
226                 "pxor %%mm3, %%mm1              \n\t"
227                 "psubw %%mm2, %%mm0             \n\t" // abs(block[i])
228                 "psubw %%mm3, %%mm1             \n\t" // abs(block[i])
229                 "pmullw %%mm4, %%mm0            \n\t" // abs(block[i])*q
230                 "pmullw %%mm5, %%mm1            \n\t" // abs(block[i])*q
231                 "pxor %%mm4, %%mm4              \n\t"
232                 "pxor %%mm5, %%mm5              \n\t" // FIXME slow
233                 "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
234                 "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
235                 "psraw $3, %%mm0                \n\t"
236                 "psraw $3, %%mm1                \n\t"
237                 "psubw %%mm7, %%mm0             \n\t"
238                 "psubw %%mm7, %%mm1             \n\t"
239                 "por %%mm7, %%mm0               \n\t"
240                 "por %%mm7, %%mm1               \n\t"
241                 "pxor %%mm2, %%mm0              \n\t"
242                 "pxor %%mm3, %%mm1              \n\t"
243                 "psubw %%mm2, %%mm0             \n\t"
244                 "psubw %%mm3, %%mm1             \n\t"
245                 "pandn %%mm0, %%mm4             \n\t"
246                 "pandn %%mm1, %%mm5             \n\t"
247                 "movq %%mm4, (%0, %%"REG_a")    \n\t"
248                 "movq %%mm5, 8(%0, %%"REG_a")   \n\t"
249
250                 "add $16, %%"REG_a"             \n\t"
251                 "js 1b                          \n\t"
252                 ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
253                 : "%"REG_a, "memory"
254         );
255     block[0]= block0;
256 }
257
258 static void dct_unquantize_mpeg1_inter_mmx(MpegEncContext *s,
259                                      int16_t *block, int n, int qscale)
260 {
261     x86_reg nCoeffs;
262     const uint16_t *quant_matrix;
263
264     av_assert2(s->block_last_index[n]>=0);
265
266     nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1;
267
268         quant_matrix = s->inter_matrix;
269 __asm__ volatile(
270                 "pcmpeqw %%mm7, %%mm7           \n\t"
271                 "psrlw $15, %%mm7               \n\t"
272                 "movd %2, %%mm6                 \n\t"
273                 "packssdw %%mm6, %%mm6          \n\t"
274                 "packssdw %%mm6, %%mm6          \n\t"
275                 "mov %3, %%"REG_a"              \n\t"
276                 ".p2align 4                     \n\t"
277                 "1:                             \n\t"
278                 "movq (%0, %%"REG_a"), %%mm0    \n\t"
279                 "movq 8(%0, %%"REG_a"), %%mm1   \n\t"
280                 "movq (%1, %%"REG_a"), %%mm4    \n\t"
281                 "movq 8(%1, %%"REG_a"), %%mm5   \n\t"
282                 "pmullw %%mm6, %%mm4            \n\t" // q=qscale*quant_matrix[i]
283                 "pmullw %%mm6, %%mm5            \n\t" // q=qscale*quant_matrix[i]
284                 "pxor %%mm2, %%mm2              \n\t"
285                 "pxor %%mm3, %%mm3              \n\t"
286                 "pcmpgtw %%mm0, %%mm2           \n\t" // block[i] < 0 ? -1 : 0
287                 "pcmpgtw %%mm1, %%mm3           \n\t" // block[i] < 0 ? -1 : 0
288                 "pxor %%mm2, %%mm0              \n\t"
289                 "pxor %%mm3, %%mm1              \n\t"
290                 "psubw %%mm2, %%mm0             \n\t" // abs(block[i])
291                 "psubw %%mm3, %%mm1             \n\t" // abs(block[i])
292                 "paddw %%mm0, %%mm0             \n\t" // abs(block[i])*2
293                 "paddw %%mm1, %%mm1             \n\t" // abs(block[i])*2
294                 "paddw %%mm7, %%mm0             \n\t" // abs(block[i])*2 + 1
295                 "paddw %%mm7, %%mm1             \n\t" // abs(block[i])*2 + 1
296                 "pmullw %%mm4, %%mm0            \n\t" // (abs(block[i])*2 + 1)*q
297                 "pmullw %%mm5, %%mm1            \n\t" // (abs(block[i])*2 + 1)*q
298                 "pxor %%mm4, %%mm4              \n\t"
299                 "pxor %%mm5, %%mm5              \n\t" // FIXME slow
300                 "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
301                 "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
302                 "psraw $4, %%mm0                \n\t"
303                 "psraw $4, %%mm1                \n\t"
304                 "psubw %%mm7, %%mm0             \n\t"
305                 "psubw %%mm7, %%mm1             \n\t"
306                 "por %%mm7, %%mm0               \n\t"
307                 "por %%mm7, %%mm1               \n\t"
308                 "pxor %%mm2, %%mm0              \n\t"
309                 "pxor %%mm3, %%mm1              \n\t"
310                 "psubw %%mm2, %%mm0             \n\t"
311                 "psubw %%mm3, %%mm1             \n\t"
312                 "pandn %%mm0, %%mm4             \n\t"
313                 "pandn %%mm1, %%mm5             \n\t"
314                 "movq %%mm4, (%0, %%"REG_a")    \n\t"
315                 "movq %%mm5, 8(%0, %%"REG_a")   \n\t"
316
317                 "add $16, %%"REG_a"             \n\t"
318                 "js 1b                          \n\t"
319                 ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
320                 : "%"REG_a, "memory"
321         );
322 }
323
324 static void dct_unquantize_mpeg2_intra_mmx(MpegEncContext *s,
325                                      int16_t *block, int n, int qscale)
326 {
327     x86_reg nCoeffs;
328     const uint16_t *quant_matrix;
329     int block0;
330
331     av_assert2(s->block_last_index[n]>=0);
332
333     if(s->alternate_scan) nCoeffs= 63; //FIXME
334     else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
335
336     if (n < 4)
337         block0 = block[0] * s->y_dc_scale;
338     else
339         block0 = block[0] * s->c_dc_scale;
340     quant_matrix = s->intra_matrix;
341 __asm__ volatile(
342                 "pcmpeqw %%mm7, %%mm7           \n\t"
343                 "psrlw $15, %%mm7               \n\t"
344                 "movd %2, %%mm6                 \n\t"
345                 "packssdw %%mm6, %%mm6          \n\t"
346                 "packssdw %%mm6, %%mm6          \n\t"
347                 "mov %3, %%"REG_a"              \n\t"
348                 ".p2align 4                     \n\t"
349                 "1:                             \n\t"
350                 "movq (%0, %%"REG_a"), %%mm0    \n\t"
351                 "movq 8(%0, %%"REG_a"), %%mm1   \n\t"
352                 "movq (%1, %%"REG_a"), %%mm4    \n\t"
353                 "movq 8(%1, %%"REG_a"), %%mm5   \n\t"
354                 "pmullw %%mm6, %%mm4            \n\t" // q=qscale*quant_matrix[i]
355                 "pmullw %%mm6, %%mm5            \n\t" // q=qscale*quant_matrix[i]
356                 "pxor %%mm2, %%mm2              \n\t"
357                 "pxor %%mm3, %%mm3              \n\t"
358                 "pcmpgtw %%mm0, %%mm2           \n\t" // block[i] < 0 ? -1 : 0
359                 "pcmpgtw %%mm1, %%mm3           \n\t" // block[i] < 0 ? -1 : 0
360                 "pxor %%mm2, %%mm0              \n\t"
361                 "pxor %%mm3, %%mm1              \n\t"
362                 "psubw %%mm2, %%mm0             \n\t" // abs(block[i])
363                 "psubw %%mm3, %%mm1             \n\t" // abs(block[i])
364                 "pmullw %%mm4, %%mm0            \n\t" // abs(block[i])*q
365                 "pmullw %%mm5, %%mm1            \n\t" // abs(block[i])*q
366                 "pxor %%mm4, %%mm4              \n\t"
367                 "pxor %%mm5, %%mm5              \n\t" // FIXME slow
368                 "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
369                 "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
370                 "psraw $3, %%mm0                \n\t"
371                 "psraw $3, %%mm1                \n\t"
372                 "pxor %%mm2, %%mm0              \n\t"
373                 "pxor %%mm3, %%mm1              \n\t"
374                 "psubw %%mm2, %%mm0             \n\t"
375                 "psubw %%mm3, %%mm1             \n\t"
376                 "pandn %%mm0, %%mm4             \n\t"
377                 "pandn %%mm1, %%mm5             \n\t"
378                 "movq %%mm4, (%0, %%"REG_a")    \n\t"
379                 "movq %%mm5, 8(%0, %%"REG_a")   \n\t"
380
381                 "add $16, %%"REG_a"             \n\t"
382                 "jng 1b                         \n\t"
383                 ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
384                 : "%"REG_a, "memory"
385         );
386     block[0]= block0;
387         //Note, we do not do mismatch control for intra as errors cannot accumulate
388 }
389
390 static void dct_unquantize_mpeg2_inter_mmx(MpegEncContext *s,
391                                      int16_t *block, int n, int qscale)
392 {
393     x86_reg nCoeffs;
394     const uint16_t *quant_matrix;
395
396     av_assert2(s->block_last_index[n]>=0);
397
398     if(s->alternate_scan) nCoeffs= 63; //FIXME
399     else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
400
401         quant_matrix = s->inter_matrix;
402 __asm__ volatile(
403                 "pcmpeqw %%mm7, %%mm7           \n\t"
404                 "psrlq $48, %%mm7               \n\t"
405                 "movd %2, %%mm6                 \n\t"
406                 "packssdw %%mm6, %%mm6          \n\t"
407                 "packssdw %%mm6, %%mm6          \n\t"
408                 "mov %3, %%"REG_a"              \n\t"
409                 ".p2align 4                     \n\t"
410                 "1:                             \n\t"
411                 "movq (%0, %%"REG_a"), %%mm0    \n\t"
412                 "movq 8(%0, %%"REG_a"), %%mm1   \n\t"
413                 "movq (%1, %%"REG_a"), %%mm4    \n\t"
414                 "movq 8(%1, %%"REG_a"), %%mm5   \n\t"
415                 "pmullw %%mm6, %%mm4            \n\t" // q=qscale*quant_matrix[i]
416                 "pmullw %%mm6, %%mm5            \n\t" // q=qscale*quant_matrix[i]
417                 "pxor %%mm2, %%mm2              \n\t"
418                 "pxor %%mm3, %%mm3              \n\t"
419                 "pcmpgtw %%mm0, %%mm2           \n\t" // block[i] < 0 ? -1 : 0
420                 "pcmpgtw %%mm1, %%mm3           \n\t" // block[i] < 0 ? -1 : 0
421                 "pxor %%mm2, %%mm0              \n\t"
422                 "pxor %%mm3, %%mm1              \n\t"
423                 "psubw %%mm2, %%mm0             \n\t" // abs(block[i])
424                 "psubw %%mm3, %%mm1             \n\t" // abs(block[i])
425                 "paddw %%mm0, %%mm0             \n\t" // abs(block[i])*2
426                 "paddw %%mm1, %%mm1             \n\t" // abs(block[i])*2
427                 "pmullw %%mm4, %%mm0            \n\t" // abs(block[i])*2*q
428                 "pmullw %%mm5, %%mm1            \n\t" // abs(block[i])*2*q
429                 "paddw %%mm4, %%mm0             \n\t" // (abs(block[i])*2 + 1)*q
430                 "paddw %%mm5, %%mm1             \n\t" // (abs(block[i])*2 + 1)*q
431                 "pxor %%mm4, %%mm4              \n\t"
432                 "pxor %%mm5, %%mm5              \n\t" // FIXME slow
433                 "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
434                 "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
435                 "psrlw $4, %%mm0                \n\t"
436                 "psrlw $4, %%mm1                \n\t"
437                 "pxor %%mm2, %%mm0              \n\t"
438                 "pxor %%mm3, %%mm1              \n\t"
439                 "psubw %%mm2, %%mm0             \n\t"
440                 "psubw %%mm3, %%mm1             \n\t"
441                 "pandn %%mm0, %%mm4             \n\t"
442                 "pandn %%mm1, %%mm5             \n\t"
443                 "pxor %%mm4, %%mm7              \n\t"
444                 "pxor %%mm5, %%mm7              \n\t"
445                 "movq %%mm4, (%0, %%"REG_a")    \n\t"
446                 "movq %%mm5, 8(%0, %%"REG_a")   \n\t"
447
448                 "add $16, %%"REG_a"             \n\t"
449                 "jng 1b                         \n\t"
450                 "movd 124(%0, %3), %%mm0        \n\t"
451                 "movq %%mm7, %%mm6              \n\t"
452                 "psrlq $32, %%mm7               \n\t"
453                 "pxor %%mm6, %%mm7              \n\t"
454                 "movq %%mm7, %%mm6              \n\t"
455                 "psrlq $16, %%mm7               \n\t"
456                 "pxor %%mm6, %%mm7              \n\t"
457                 "pslld $31, %%mm7               \n\t"
458                 "psrlq $15, %%mm7               \n\t"
459                 "pxor %%mm7, %%mm0              \n\t"
460                 "movd %%mm0, 124(%0, %3)        \n\t"
461
462                 ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "r" (-2*nCoeffs)
463                 : "%"REG_a, "memory"
464         );
465 }
466
467 static void  denoise_dct_mmx(MpegEncContext *s, int16_t *block){
468     const int intra= s->mb_intra;
469     int *sum= s->dct_error_sum[intra];
470     uint16_t *offset= s->dct_offset[intra];
471
472     s->dct_count[intra]++;
473
474     __asm__ volatile(
475         "pxor %%mm7, %%mm7                      \n\t"
476         "1:                                     \n\t"
477         "pxor %%mm0, %%mm0                      \n\t"
478         "pxor %%mm1, %%mm1                      \n\t"
479         "movq (%0), %%mm2                       \n\t"
480         "movq 8(%0), %%mm3                      \n\t"
481         "pcmpgtw %%mm2, %%mm0                   \n\t"
482         "pcmpgtw %%mm3, %%mm1                   \n\t"
483         "pxor %%mm0, %%mm2                      \n\t"
484         "pxor %%mm1, %%mm3                      \n\t"
485         "psubw %%mm0, %%mm2                     \n\t"
486         "psubw %%mm1, %%mm3                     \n\t"
487         "movq %%mm2, %%mm4                      \n\t"
488         "movq %%mm3, %%mm5                      \n\t"
489         "psubusw (%2), %%mm2                    \n\t"
490         "psubusw 8(%2), %%mm3                   \n\t"
491         "pxor %%mm0, %%mm2                      \n\t"
492         "pxor %%mm1, %%mm3                      \n\t"
493         "psubw %%mm0, %%mm2                     \n\t"
494         "psubw %%mm1, %%mm3                     \n\t"
495         "movq %%mm2, (%0)                       \n\t"
496         "movq %%mm3, 8(%0)                      \n\t"
497         "movq %%mm4, %%mm2                      \n\t"
498         "movq %%mm5, %%mm3                      \n\t"
499         "punpcklwd %%mm7, %%mm4                 \n\t"
500         "punpckhwd %%mm7, %%mm2                 \n\t"
501         "punpcklwd %%mm7, %%mm5                 \n\t"
502         "punpckhwd %%mm7, %%mm3                 \n\t"
503         "paddd (%1), %%mm4                      \n\t"
504         "paddd 8(%1), %%mm2                     \n\t"
505         "paddd 16(%1), %%mm5                    \n\t"
506         "paddd 24(%1), %%mm3                    \n\t"
507         "movq %%mm4, (%1)                       \n\t"
508         "movq %%mm2, 8(%1)                      \n\t"
509         "movq %%mm5, 16(%1)                     \n\t"
510         "movq %%mm3, 24(%1)                     \n\t"
511         "add $16, %0                            \n\t"
512         "add $32, %1                            \n\t"
513         "add $16, %2                            \n\t"
514         "cmp %3, %0                             \n\t"
515             " jb 1b                             \n\t"
516         : "+r" (block), "+r" (sum), "+r" (offset)
517         : "r"(block+64)
518     );
519 }
520
521 static void  denoise_dct_sse2(MpegEncContext *s, int16_t *block){
522     const int intra= s->mb_intra;
523     int *sum= s->dct_error_sum[intra];
524     uint16_t *offset= s->dct_offset[intra];
525
526     s->dct_count[intra]++;
527
528     __asm__ volatile(
529         "pxor %%xmm7, %%xmm7                    \n\t"
530         "1:                                     \n\t"
531         "pxor %%xmm0, %%xmm0                    \n\t"
532         "pxor %%xmm1, %%xmm1                    \n\t"
533         "movdqa (%0), %%xmm2                    \n\t"
534         "movdqa 16(%0), %%xmm3                  \n\t"
535         "pcmpgtw %%xmm2, %%xmm0                 \n\t"
536         "pcmpgtw %%xmm3, %%xmm1                 \n\t"
537         "pxor %%xmm0, %%xmm2                    \n\t"
538         "pxor %%xmm1, %%xmm3                    \n\t"
539         "psubw %%xmm0, %%xmm2                   \n\t"
540         "psubw %%xmm1, %%xmm3                   \n\t"
541         "movdqa %%xmm2, %%xmm4                  \n\t"
542         "movdqa %%xmm3, %%xmm5                  \n\t"
543         "psubusw (%2), %%xmm2                   \n\t"
544         "psubusw 16(%2), %%xmm3                 \n\t"
545         "pxor %%xmm0, %%xmm2                    \n\t"
546         "pxor %%xmm1, %%xmm3                    \n\t"
547         "psubw %%xmm0, %%xmm2                   \n\t"
548         "psubw %%xmm1, %%xmm3                   \n\t"
549         "movdqa %%xmm2, (%0)                    \n\t"
550         "movdqa %%xmm3, 16(%0)                  \n\t"
551         "movdqa %%xmm4, %%xmm6                  \n\t"
552         "movdqa %%xmm5, %%xmm0                  \n\t"
553         "punpcklwd %%xmm7, %%xmm4               \n\t"
554         "punpckhwd %%xmm7, %%xmm6               \n\t"
555         "punpcklwd %%xmm7, %%xmm5               \n\t"
556         "punpckhwd %%xmm7, %%xmm0               \n\t"
557         "paddd (%1), %%xmm4                     \n\t"
558         "paddd 16(%1), %%xmm6                   \n\t"
559         "paddd 32(%1), %%xmm5                   \n\t"
560         "paddd 48(%1), %%xmm0                   \n\t"
561         "movdqa %%xmm4, (%1)                    \n\t"
562         "movdqa %%xmm6, 16(%1)                  \n\t"
563         "movdqa %%xmm5, 32(%1)                  \n\t"
564         "movdqa %%xmm0, 48(%1)                  \n\t"
565         "add $32, %0                            \n\t"
566         "add $64, %1                            \n\t"
567         "add $32, %2                            \n\t"
568         "cmp %3, %0                             \n\t"
569             " jb 1b                             \n\t"
570         : "+r" (block), "+r" (sum), "+r" (offset)
571         : "r"(block+64)
572           XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
573                             "%xmm4", "%xmm5", "%xmm6", "%xmm7")
574     );
575 }
576
577 #endif /* HAVE_INLINE_ASM */
578
579 void ff_MPV_common_init_x86(MpegEncContext *s)
580 {
581 #if HAVE_INLINE_ASM
582     int mm_flags = av_get_cpu_flags();
583
584     if (mm_flags & AV_CPU_FLAG_MMX) {
585         s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_mmx;
586         s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_mmx;
587         s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_mmx;
588         s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_mmx;
589         if(!(s->flags & CODEC_FLAG_BITEXACT))
590             s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx;
591         s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_mmx;
592
593         if (mm_flags & AV_CPU_FLAG_SSE2) {
594             s->denoise_dct= denoise_dct_sse2;
595         } else {
596                 s->denoise_dct= denoise_dct_mmx;
597         }
598     }
599 #endif /* HAVE_INLINE_ASM */
600 }