2 * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "dsputil_mmx.h"
23 /***********************************/
26 /* in/out: mma=mma+mmb, mmb=mmb-mma */
27 #define SUMSUB_BA( a, b ) \
28 "paddw "#b", "#a" \n\t"\
29 "paddw "#b", "#b" \n\t"\
30 "psubw "#a", "#b" \n\t"
32 #define SUMSUB_BADC( a, b, c, d ) \
33 "paddw "#b", "#a" \n\t"\
34 "paddw "#d", "#c" \n\t"\
35 "paddw "#b", "#b" \n\t"\
36 "paddw "#d", "#d" \n\t"\
37 "psubw "#a", "#b" \n\t"\
38 "psubw "#c", "#d" \n\t"
40 #define SUMSUBD2_AB( a, b, t ) \
41 "movq "#b", "#t" \n\t"\
42 "psraw $1 , "#b" \n\t"\
43 "paddw "#a", "#b" \n\t"\
44 "psraw $1 , "#a" \n\t"\
45 "psubw "#t", "#a" \n\t"
47 #define IDCT4_1D( s02, s13, d02, d13, t ) \
48 SUMSUB_BA ( s02, d02 )\
49 SUMSUBD2_AB( s13, d13, t )\
50 SUMSUB_BADC( d13, s02, s13, d02 )
52 #define STORE_DIFF_4P( p, t, z ) \
53 "psraw $6, "#p" \n\t"\
54 "movd (%0), "#t" \n\t"\
55 "punpcklbw "#z", "#t" \n\t"\
56 "paddsw "#t", "#p" \n\t"\
57 "packuswb "#z", "#p" \n\t"\
58 "movd "#p", (%0) \n\t"
60 static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
64 "movq (%0), %%mm0 \n\t"
65 "movq 8(%0), %%mm1 \n\t"
66 "movq 16(%0), %%mm2 \n\t"
67 "movq 24(%0), %%mm3 \n\t"
71 /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */
72 IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 )
75 /* in: 1,4,0,2 out: 1,2,3,0 */
76 TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 )
78 "paddw %%mm6, %%mm3 \n\t"
80 /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */
81 IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 )
83 "pxor %%mm7, %%mm7 \n\t"
87 STORE_DIFF_4P( %%mm0, %%mm1, %%mm7)
89 STORE_DIFF_4P( %%mm2, %%mm1, %%mm7)
91 STORE_DIFF_4P( %%mm3, %%mm1, %%mm7)
93 STORE_DIFF_4P( %%mm4, %%mm1, %%mm7)
99 static inline void h264_idct8_1d(int16_t *block)
102 "movq 112(%0), %%mm7 \n\t"
103 "movq 80(%0), %%mm5 \n\t"
104 "movq 48(%0), %%mm3 \n\t"
105 "movq 16(%0), %%mm1 \n\t"
107 "movq %%mm7, %%mm4 \n\t"
108 "movq %%mm3, %%mm6 \n\t"
109 "movq %%mm5, %%mm0 \n\t"
110 "movq %%mm7, %%mm2 \n\t"
111 "psraw $1, %%mm4 \n\t"
112 "psraw $1, %%mm6 \n\t"
113 "psubw %%mm7, %%mm0 \n\t"
114 "psubw %%mm6, %%mm2 \n\t"
115 "psubw %%mm4, %%mm0 \n\t"
116 "psubw %%mm3, %%mm2 \n\t"
117 "psubw %%mm3, %%mm0 \n\t"
118 "paddw %%mm1, %%mm2 \n\t"
120 "movq %%mm5, %%mm4 \n\t"
121 "movq %%mm1, %%mm6 \n\t"
122 "psraw $1, %%mm4 \n\t"
123 "psraw $1, %%mm6 \n\t"
124 "paddw %%mm5, %%mm4 \n\t"
125 "paddw %%mm1, %%mm6 \n\t"
126 "paddw %%mm7, %%mm4 \n\t"
127 "paddw %%mm5, %%mm6 \n\t"
128 "psubw %%mm1, %%mm4 \n\t"
129 "paddw %%mm3, %%mm6 \n\t"
131 "movq %%mm0, %%mm1 \n\t"
132 "movq %%mm4, %%mm3 \n\t"
133 "movq %%mm2, %%mm5 \n\t"
134 "movq %%mm6, %%mm7 \n\t"
135 "psraw $2, %%mm6 \n\t"
136 "psraw $2, %%mm3 \n\t"
137 "psraw $2, %%mm5 \n\t"
138 "psraw $2, %%mm0 \n\t"
139 "paddw %%mm6, %%mm1 \n\t"
140 "paddw %%mm2, %%mm3 \n\t"
141 "psubw %%mm4, %%mm5 \n\t"
142 "psubw %%mm0, %%mm7 \n\t"
144 "movq 32(%0), %%mm2 \n\t"
145 "movq 96(%0), %%mm6 \n\t"
146 "movq %%mm2, %%mm4 \n\t"
147 "movq %%mm6, %%mm0 \n\t"
148 "psraw $1, %%mm4 \n\t"
149 "psraw $1, %%mm6 \n\t"
150 "psubw %%mm0, %%mm4 \n\t"
151 "paddw %%mm2, %%mm6 \n\t"
153 "movq (%0), %%mm2 \n\t"
154 "movq 64(%0), %%mm0 \n\t"
155 SUMSUB_BA( %%mm0, %%mm2 )
156 SUMSUB_BA( %%mm6, %%mm0 )
157 SUMSUB_BA( %%mm4, %%mm2 )
158 SUMSUB_BA( %%mm7, %%mm6 )
159 SUMSUB_BA( %%mm5, %%mm4 )
160 SUMSUB_BA( %%mm3, %%mm2 )
161 SUMSUB_BA( %%mm1, %%mm0 )
166 static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
169 int16_t __attribute__ ((aligned(8))) b2[64];
174 DECLARE_ALIGNED_8(uint64_t, tmp);
176 h264_idct8_1d(block+4*i);
179 "movq %%mm7, %0 \n\t"
180 TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
181 "movq %%mm0, 8(%1) \n\t"
182 "movq %%mm6, 24(%1) \n\t"
183 "movq %%mm7, 40(%1) \n\t"
184 "movq %%mm4, 56(%1) \n\t"
185 "movq %0, %%mm7 \n\t"
186 TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
187 "movq %%mm7, (%1) \n\t"
188 "movq %%mm1, 16(%1) \n\t"
189 "movq %%mm0, 32(%1) \n\t"
190 "movq %%mm3, 48(%1) \n\t"
198 h264_idct8_1d(b2+4*i);
201 "psraw $6, %%mm7 \n\t"
202 "psraw $6, %%mm6 \n\t"
203 "psraw $6, %%mm5 \n\t"
204 "psraw $6, %%mm4 \n\t"
205 "psraw $6, %%mm3 \n\t"
206 "psraw $6, %%mm2 \n\t"
207 "psraw $6, %%mm1 \n\t"
208 "psraw $6, %%mm0 \n\t"
210 "movq %%mm7, (%0) \n\t"
211 "movq %%mm5, 16(%0) \n\t"
212 "movq %%mm3, 32(%0) \n\t"
213 "movq %%mm1, 48(%0) \n\t"
214 "movq %%mm0, 64(%0) \n\t"
215 "movq %%mm2, 80(%0) \n\t"
216 "movq %%mm4, 96(%0) \n\t"
217 "movq %%mm6, 112(%0) \n\t"
223 add_pixels_clamped_mmx(b2, dst, stride);
226 static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
228 int dc = (block[0] + 32) >> 6;
230 "movd %0, %%mm0 \n\t"
231 "pshufw $0, %%mm0, %%mm0 \n\t"
232 "pxor %%mm1, %%mm1 \n\t"
233 "psubw %%mm0, %%mm1 \n\t"
234 "packuswb %%mm0, %%mm0 \n\t"
235 "packuswb %%mm1, %%mm1 \n\t"
239 "movd %0, %%mm2 \n\t"
240 "movd %1, %%mm3 \n\t"
241 "movd %2, %%mm4 \n\t"
242 "movd %3, %%mm5 \n\t"
243 "paddusb %%mm0, %%mm2 \n\t"
244 "paddusb %%mm0, %%mm3 \n\t"
245 "paddusb %%mm0, %%mm4 \n\t"
246 "paddusb %%mm0, %%mm5 \n\t"
247 "psubusb %%mm1, %%mm2 \n\t"
248 "psubusb %%mm1, %%mm3 \n\t"
249 "psubusb %%mm1, %%mm4 \n\t"
250 "psubusb %%mm1, %%mm5 \n\t"
251 "movd %%mm2, %0 \n\t"
252 "movd %%mm3, %1 \n\t"
253 "movd %%mm4, %2 \n\t"
254 "movd %%mm5, %3 \n\t"
255 :"+m"(*(uint32_t*)(dst+0*stride)),
256 "+m"(*(uint32_t*)(dst+1*stride)),
257 "+m"(*(uint32_t*)(dst+2*stride)),
258 "+m"(*(uint32_t*)(dst+3*stride))
262 static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
264 int dc = (block[0] + 32) >> 6;
267 "movd %0, %%mm0 \n\t"
268 "pshufw $0, %%mm0, %%mm0 \n\t"
269 "pxor %%mm1, %%mm1 \n\t"
270 "psubw %%mm0, %%mm1 \n\t"
271 "packuswb %%mm0, %%mm0 \n\t"
272 "packuswb %%mm1, %%mm1 \n\t"
275 for(y=2; y--; dst += 4*stride){
277 "movq %0, %%mm2 \n\t"
278 "movq %1, %%mm3 \n\t"
279 "movq %2, %%mm4 \n\t"
280 "movq %3, %%mm5 \n\t"
281 "paddusb %%mm0, %%mm2 \n\t"
282 "paddusb %%mm0, %%mm3 \n\t"
283 "paddusb %%mm0, %%mm4 \n\t"
284 "paddusb %%mm0, %%mm5 \n\t"
285 "psubusb %%mm1, %%mm2 \n\t"
286 "psubusb %%mm1, %%mm3 \n\t"
287 "psubusb %%mm1, %%mm4 \n\t"
288 "psubusb %%mm1, %%mm5 \n\t"
289 "movq %%mm2, %0 \n\t"
290 "movq %%mm3, %1 \n\t"
291 "movq %%mm4, %2 \n\t"
292 "movq %%mm5, %3 \n\t"
293 :"+m"(*(uint64_t*)(dst+0*stride)),
294 "+m"(*(uint64_t*)(dst+1*stride)),
295 "+m"(*(uint64_t*)(dst+2*stride)),
296 "+m"(*(uint64_t*)(dst+3*stride))
302 /***********************************/
307 #define DIFF_GT_MMX(x,y,a,o,t)\
308 "movq "#y", "#t" \n\t"\
309 "movq "#x", "#o" \n\t"\
310 "psubusb "#x", "#t" \n\t"\
311 "psubusb "#y", "#o" \n\t"\
312 "por "#t", "#o" \n\t"\
313 "psubusb "#a", "#o" \n\t"
317 #define DIFF_GT2_MMX(x,y,a,o,t)\
318 "movq "#y", "#t" \n\t"\
319 "movq "#x", "#o" \n\t"\
320 "psubusb "#x", "#t" \n\t"\
321 "psubusb "#y", "#o" \n\t"\
322 "psubusb "#a", "#t" \n\t"\
323 "psubusb "#a", "#o" \n\t"\
324 "pcmpeqb "#t", "#o" \n\t"\
326 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
327 // out: mm5=beta-1, mm7=mask
329 #define H264_DEBLOCK_MASK(alpha1, beta1) \
330 "pshufw $0, "#alpha1", %%mm4 \n\t"\
331 "pshufw $0, "#beta1 ", %%mm5 \n\t"\
332 "packuswb %%mm4, %%mm4 \n\t"\
333 "packuswb %%mm5, %%mm5 \n\t"\
334 DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
335 DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
336 "por %%mm4, %%mm7 \n\t"\
337 DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
338 "por %%mm4, %%mm7 \n\t"\
339 "pxor %%mm6, %%mm6 \n\t"\
340 "pcmpeqb %%mm6, %%mm7 \n\t"
342 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
343 // out: mm1=p0' mm2=q0'
345 #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
346 "movq %%mm1 , %%mm5 \n\t"\
347 "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\
348 "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\
349 "pcmpeqb %%mm4 , %%mm4 \n\t"\
350 "pxor %%mm4 , %%mm3 \n\t"\
351 "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\
352 "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\
353 "pxor %%mm1 , %%mm4 \n\t"\
354 "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\
355 "pavgb %%mm5 , %%mm3 \n\t"\
356 "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\
357 "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\
358 "psubusb %%mm3 , %%mm6 \n\t"\
359 "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\
360 "pminub %%mm7 , %%mm6 \n\t"\
361 "pminub %%mm7 , %%mm3 \n\t"\
362 "psubusb %%mm6 , %%mm1 \n\t"\
363 "psubusb %%mm3 , %%mm2 \n\t"\
364 "paddusb %%mm3 , %%mm1 \n\t"\
365 "paddusb %%mm6 , %%mm2 \n\t"
367 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=ff_bone
368 // out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
369 // clobbers: q2, tmp, tc0
370 #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
371 "movq %%mm1, "#tmp" \n\t"\
372 "pavgb %%mm2, "#tmp" \n\t"\
373 "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
374 "pxor "q2addr", "#tmp" \n\t"\
375 "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
376 "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
377 "movq "#p1", "#tmp" \n\t"\
378 "psubusb "#tc0", "#tmp" \n\t"\
379 "paddusb "#p1", "#tc0" \n\t"\
380 "pmaxub "#tmp", "#q2" \n\t"\
381 "pminub "#tc0", "#q2" \n\t"\
382 "movq "#q2", "q1addr" \n\t"
384 static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
386 DECLARE_ALIGNED_8(uint64_t, tmp0[2]);
389 "movq (%1,%3), %%mm0 \n\t" //p1
390 "movq (%1,%3,2), %%mm1 \n\t" //p0
391 "movq (%2), %%mm2 \n\t" //q0
392 "movq (%2,%3), %%mm3 \n\t" //q1
393 H264_DEBLOCK_MASK(%6, %7)
395 "movd %5, %%mm4 \n\t"
396 "punpcklbw %%mm4, %%mm4 \n\t"
397 "punpcklwd %%mm4, %%mm4 \n\t"
398 "pcmpeqb %%mm3, %%mm3 \n\t"
399 "movq %%mm4, %%mm6 \n\t"
400 "pcmpgtb %%mm3, %%mm4 \n\t"
401 "movq %%mm6, 8+%0 \n\t"
402 "pand %%mm4, %%mm7 \n\t"
403 "movq %%mm7, %0 \n\t"
406 "movq (%1), %%mm3 \n\t" //p2
407 DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
408 "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
409 "pand 8+%0, %%mm7 \n\t" // mask & tc0
410 "movq %%mm7, %%mm4 \n\t"
411 "psubb %%mm6, %%mm7 \n\t"
412 "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
413 H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
416 "movq (%2,%3,2), %%mm4 \n\t" //q2
417 DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
418 "pand %0, %%mm6 \n\t"
419 "movq 8+%0, %%mm5 \n\t" // can be merged with the and below but is slower then
420 "pand %%mm6, %%mm5 \n\t"
421 "psubb %%mm6, %%mm7 \n\t"
422 "movq (%2,%3), %%mm3 \n\t"
423 H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
426 H264_DEBLOCK_P0_Q0(%8, unused)
427 "movq %%mm1, (%1,%3,2) \n\t"
428 "movq %%mm2, (%2) \n\t"
431 : "r"(pix-3*stride), "r"(pix), "r"((long)stride),
432 "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1),
437 static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
439 if((tc0[0] & tc0[1]) >= 0)
440 h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
441 if((tc0[2] & tc0[3]) >= 0)
442 h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
444 static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
446 //FIXME: could cut some load/stores by merging transpose with filter
447 // also, it only needs to transpose 6x8
448 DECLARE_ALIGNED_8(uint8_t, trans[8*8]);
450 for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
451 if((tc0[0] & tc0[1]) < 0)
453 transpose4x4(trans, pix-4, 8, stride);
454 transpose4x4(trans +4*8, pix, 8, stride);
455 transpose4x4(trans+4, pix-4+4*stride, 8, stride);
456 transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
457 h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
458 transpose4x4(pix-2, trans +2*8, stride, 8);
459 transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
463 static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
466 "movq (%0), %%mm0 \n\t" //p1
467 "movq (%0,%2), %%mm1 \n\t" //p0
468 "movq (%1), %%mm2 \n\t" //q0
469 "movq (%1,%2), %%mm3 \n\t" //q1
470 H264_DEBLOCK_MASK(%4, %5)
471 "movd %3, %%mm6 \n\t"
472 "punpcklbw %%mm6, %%mm6 \n\t"
473 "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
474 H264_DEBLOCK_P0_Q0(%6, %7)
475 "movq %%mm1, (%0,%2) \n\t"
476 "movq %%mm2, (%1) \n\t"
478 :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
479 "r"(*(uint32_t*)tc0),
480 "m"(alpha1), "m"(beta1), "m"(ff_bone), "m"(ff_pb_3F)
484 static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
486 h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
489 static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
491 //FIXME: could cut some load/stores by merging transpose with filter
492 DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
493 transpose4x4(trans, pix-2, 8, stride);
494 transpose4x4(trans+4, pix-2+4*stride, 8, stride);
495 h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
496 transpose4x4(pix-2, trans, stride, 8);
497 transpose4x4(pix-2+4*stride, trans+4, stride, 8);
500 // p0 = (p0 + q1 + 2*p1 + 2) >> 2
501 #define H264_FILTER_CHROMA4(p0, p1, q1, one) \
502 "movq "#p0", %%mm4 \n\t"\
503 "pxor "#q1", %%mm4 \n\t"\
504 "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
505 "pavgb "#q1", "#p0" \n\t"\
506 "psubusb %%mm4, "#p0" \n\t"\
507 "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
509 static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
512 "movq (%0), %%mm0 \n\t"
513 "movq (%0,%2), %%mm1 \n\t"
514 "movq (%1), %%mm2 \n\t"
515 "movq (%1,%2), %%mm3 \n\t"
516 H264_DEBLOCK_MASK(%3, %4)
517 "movq %%mm1, %%mm5 \n\t"
518 "movq %%mm2, %%mm6 \n\t"
519 H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
520 H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
521 "psubb %%mm5, %%mm1 \n\t"
522 "psubb %%mm6, %%mm2 \n\t"
523 "pand %%mm7, %%mm1 \n\t"
524 "pand %%mm7, %%mm2 \n\t"
525 "paddb %%mm5, %%mm1 \n\t"
526 "paddb %%mm6, %%mm2 \n\t"
527 "movq %%mm1, (%0,%2) \n\t"
528 "movq %%mm2, (%1) \n\t"
529 :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
530 "m"(alpha1), "m"(beta1), "m"(ff_bone)
534 static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
536 h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
539 static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
541 //FIXME: could cut some load/stores by merging transpose with filter
542 DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
543 transpose4x4(trans, pix-2, 8, stride);
544 transpose4x4(trans+4, pix-2+4*stride, 8, stride);
545 h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
546 transpose4x4(pix-2, trans, stride, 8);
547 transpose4x4(pix-2+4*stride, trans+4, stride, 8);
550 static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
551 int bidir, int edges, int step, int mask_mv0, int mask_mv1 ) {
554 "pxor %%mm7, %%mm7 \n\t"
555 "movq %0, %%mm6 \n\t"
556 "movq %1, %%mm5 \n\t"
557 "movq %2, %%mm4 \n\t"
558 ::"m"(ff_pb_1), "m"(ff_pb_3), "m"(ff_pb_7)
560 // could do a special case for dir==0 && edges==1, but it only reduces the
561 // average filter time by 1.2%
562 for( dir=1; dir>=0; dir-- ) {
563 const int d_idx = dir ? -8 : -1;
564 const int mask_mv = dir ? mask_mv1 : mask_mv0;
565 DECLARE_ALIGNED_8(const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
567 for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
569 "pand %0, %%mm0 \n\t"
572 if(!(mask_mv & edge)) {
573 asm volatile("pxor %%mm0, %%mm0 \n\t":);
574 for( l = bidir; l >= 0; l-- ) {
576 "movd %0, %%mm1 \n\t"
577 "punpckldq %1, %%mm1 \n\t"
578 "movq %%mm1, %%mm2 \n\t"
579 "psrlw $7, %%mm2 \n\t"
580 "pand %%mm6, %%mm2 \n\t"
581 "por %%mm2, %%mm1 \n\t" // ref_cache with -2 mapped to -1
582 "punpckldq %%mm1, %%mm2 \n\t"
583 "pcmpeqb %%mm2, %%mm1 \n\t"
584 "paddb %%mm6, %%mm1 \n\t"
585 "punpckhbw %%mm7, %%mm1 \n\t" // ref[b] != ref[bn]
586 "por %%mm1, %%mm0 \n\t"
588 "movq %2, %%mm1 \n\t"
589 "movq %3, %%mm2 \n\t"
590 "psubw %4, %%mm1 \n\t"
591 "psubw %5, %%mm2 \n\t"
592 "packsswb %%mm2, %%mm1 \n\t"
593 "paddb %%mm5, %%mm1 \n\t"
594 "pminub %%mm4, %%mm1 \n\t"
595 "pcmpeqb %%mm4, %%mm1 \n\t" // abs(mv[b] - mv[bn]) >= limit
596 "por %%mm1, %%mm0 \n\t"
597 ::"m"(ref[l][b_idx]),
598 "m"(ref[l][b_idx+d_idx]),
599 "m"(mv[l][b_idx][0]),
600 "m"(mv[l][b_idx+2][0]),
601 "m"(mv[l][b_idx+d_idx][0]),
602 "m"(mv[l][b_idx+d_idx+2][0])
607 "movd %0, %%mm1 \n\t"
609 "punpcklbw %%mm7, %%mm1 \n\t"
610 "pcmpgtw %%mm7, %%mm1 \n\t" // nnz[b] || nnz[bn]
612 "m"(nnz[b_idx+d_idx])
615 "pcmpeqw %%mm7, %%mm0 \n\t"
616 "pcmpeqw %%mm7, %%mm0 \n\t"
617 "psrlw $15, %%mm0 \n\t" // nonzero -> 1
618 "psrlw $14, %%mm1 \n\t"
619 "movq %%mm0, %%mm2 \n\t"
620 "por %%mm1, %%mm2 \n\t"
621 "psrlw $1, %%mm1 \n\t"
622 "pandn %%mm2, %%mm1 \n\t"
623 "movq %%mm1, %0 \n\t"
624 :"=m"(*bS[dir][edge])
632 "movq (%0), %%mm0 \n\t"
633 "movq 8(%0), %%mm1 \n\t"
634 "movq 16(%0), %%mm2 \n\t"
635 "movq 24(%0), %%mm3 \n\t"
636 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
637 "movq %%mm0, (%0) \n\t"
638 "movq %%mm3, 8(%0) \n\t"
639 "movq %%mm4, 16(%0) \n\t"
640 "movq %%mm2, 24(%0) \n\t"
646 /***********************************/
647 /* motion compensation */
649 #define QPEL_H264V(A,B,C,D,E,F,OP)\
650 "movd (%0), "#F" \n\t"\
651 "movq "#C", %%mm6 \n\t"\
652 "paddw "#D", %%mm6 \n\t"\
653 "psllw $2, %%mm6 \n\t"\
654 "psubw "#B", %%mm6 \n\t"\
655 "psubw "#E", %%mm6 \n\t"\
656 "pmullw %4, %%mm6 \n\t"\
658 "punpcklbw %%mm7, "#F" \n\t"\
659 "paddw %5, "#A" \n\t"\
660 "paddw "#F", "#A" \n\t"\
661 "paddw "#A", %%mm6 \n\t"\
662 "psraw $5, %%mm6 \n\t"\
663 "packuswb %%mm6, %%mm6 \n\t"\
664 OP(%%mm6, (%1), A, d)\
667 #define QPEL_H264HV(A,B,C,D,E,F,OF)\
668 "movd (%0), "#F" \n\t"\
669 "movq "#C", %%mm6 \n\t"\
670 "paddw "#D", %%mm6 \n\t"\
671 "psllw $2, %%mm6 \n\t"\
672 "psubw "#B", %%mm6 \n\t"\
673 "psubw "#E", %%mm6 \n\t"\
674 "pmullw %3, %%mm6 \n\t"\
676 "punpcklbw %%mm7, "#F" \n\t"\
677 "paddw "#F", "#A" \n\t"\
678 "paddw "#A", %%mm6 \n\t"\
679 "movq %%mm6, "#OF"(%1) \n\t"
681 #define QPEL_H264(OPNAME, OP, MMX)\
682 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
686 "pxor %%mm7, %%mm7 \n\t"\
687 "movq %5, %%mm4 \n\t"\
688 "movq %6, %%mm5 \n\t"\
690 "movd -1(%0), %%mm1 \n\t"\
691 "movd (%0), %%mm2 \n\t"\
692 "movd 1(%0), %%mm3 \n\t"\
693 "movd 2(%0), %%mm0 \n\t"\
694 "punpcklbw %%mm7, %%mm1 \n\t"\
695 "punpcklbw %%mm7, %%mm2 \n\t"\
696 "punpcklbw %%mm7, %%mm3 \n\t"\
697 "punpcklbw %%mm7, %%mm0 \n\t"\
698 "paddw %%mm0, %%mm1 \n\t"\
699 "paddw %%mm3, %%mm2 \n\t"\
700 "movd -2(%0), %%mm0 \n\t"\
701 "movd 3(%0), %%mm3 \n\t"\
702 "punpcklbw %%mm7, %%mm0 \n\t"\
703 "punpcklbw %%mm7, %%mm3 \n\t"\
704 "paddw %%mm3, %%mm0 \n\t"\
705 "psllw $2, %%mm2 \n\t"\
706 "psubw %%mm1, %%mm2 \n\t"\
707 "pmullw %%mm4, %%mm2 \n\t"\
708 "paddw %%mm5, %%mm0 \n\t"\
709 "paddw %%mm2, %%mm0 \n\t"\
710 "psraw $5, %%mm0 \n\t"\
711 "packuswb %%mm0, %%mm0 \n\t"\
712 OP(%%mm0, (%1),%%mm6, d)\
717 : "+a"(src), "+c"(dst), "+m"(h)\
718 : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
722 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
725 "pxor %%mm7, %%mm7 \n\t"\
726 "movq %0, %%mm4 \n\t"\
727 "movq %1, %%mm5 \n\t"\
728 :: "m"(ff_pw_5), "m"(ff_pw_16)\
732 "movd -1(%0), %%mm1 \n\t"\
733 "movd (%0), %%mm2 \n\t"\
734 "movd 1(%0), %%mm3 \n\t"\
735 "movd 2(%0), %%mm0 \n\t"\
736 "punpcklbw %%mm7, %%mm1 \n\t"\
737 "punpcklbw %%mm7, %%mm2 \n\t"\
738 "punpcklbw %%mm7, %%mm3 \n\t"\
739 "punpcklbw %%mm7, %%mm0 \n\t"\
740 "paddw %%mm0, %%mm1 \n\t"\
741 "paddw %%mm3, %%mm2 \n\t"\
742 "movd -2(%0), %%mm0 \n\t"\
743 "movd 3(%0), %%mm3 \n\t"\
744 "punpcklbw %%mm7, %%mm0 \n\t"\
745 "punpcklbw %%mm7, %%mm3 \n\t"\
746 "paddw %%mm3, %%mm0 \n\t"\
747 "psllw $2, %%mm2 \n\t"\
748 "psubw %%mm1, %%mm2 \n\t"\
749 "pmullw %%mm4, %%mm2 \n\t"\
750 "paddw %%mm5, %%mm0 \n\t"\
751 "paddw %%mm2, %%mm0 \n\t"\
752 "movd (%2), %%mm3 \n\t"\
753 "psraw $5, %%mm0 \n\t"\
754 "packuswb %%mm0, %%mm0 \n\t"\
755 PAVGB" %%mm3, %%mm0 \n\t"\
756 OP(%%mm0, (%1),%%mm6, d)\
760 : "+a"(src), "+c"(dst), "+d"(src2)\
761 : "D"((long)src2Stride), "S"((long)dstStride)\
766 static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
769 "pxor %%mm7, %%mm7 \n\t"\
770 "movd (%0), %%mm0 \n\t"\
772 "movd (%0), %%mm1 \n\t"\
774 "movd (%0), %%mm2 \n\t"\
776 "movd (%0), %%mm3 \n\t"\
778 "movd (%0), %%mm4 \n\t"\
780 "punpcklbw %%mm7, %%mm0 \n\t"\
781 "punpcklbw %%mm7, %%mm1 \n\t"\
782 "punpcklbw %%mm7, %%mm2 \n\t"\
783 "punpcklbw %%mm7, %%mm3 \n\t"\
784 "punpcklbw %%mm7, %%mm4 \n\t"\
785 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
786 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
787 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
788 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
790 : "+a"(src), "+c"(dst)\
791 : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
795 static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
798 src -= 2*srcStride+2;\
801 "pxor %%mm7, %%mm7 \n\t"\
802 "movd (%0), %%mm0 \n\t"\
804 "movd (%0), %%mm1 \n\t"\
806 "movd (%0), %%mm2 \n\t"\
808 "movd (%0), %%mm3 \n\t"\
810 "movd (%0), %%mm4 \n\t"\
812 "punpcklbw %%mm7, %%mm0 \n\t"\
813 "punpcklbw %%mm7, %%mm1 \n\t"\
814 "punpcklbw %%mm7, %%mm2 \n\t"\
815 "punpcklbw %%mm7, %%mm3 \n\t"\
816 "punpcklbw %%mm7, %%mm4 \n\t"\
817 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
818 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
819 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
820 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
823 : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
827 src += 4 - 9*srcStride;\
831 "movq %4, %%mm6 \n\t"\
833 "movq (%0), %%mm0 \n\t"\
834 "paddw 10(%0), %%mm0 \n\t"\
835 "movq 2(%0), %%mm1 \n\t"\
836 "paddw 8(%0), %%mm1 \n\t"\
837 "movq 4(%0), %%mm2 \n\t"\
838 "paddw 6(%0), %%mm2 \n\t"\
839 "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
840 "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
841 "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
842 "paddsw %%mm2, %%mm0 \n\t"\
843 "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\
844 "paddw %%mm6, %%mm2 \n\t"\
845 "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 +32 */\
846 "psraw $6, %%mm0 \n\t"\
847 "packuswb %%mm0, %%mm0 \n\t"\
848 OP(%%mm0, (%1),%%mm7, d)\
853 : "+a"(tmp), "+c"(dst), "+m"(h)\
854 : "S"((long)dstStride), "m"(ff_pw_32)\
859 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
862 "pxor %%mm7, %%mm7 \n\t"\
863 "movq %5, %%mm6 \n\t"\
865 "movq (%0), %%mm0 \n\t"\
866 "movq 1(%0), %%mm2 \n\t"\
867 "movq %%mm0, %%mm1 \n\t"\
868 "movq %%mm2, %%mm3 \n\t"\
869 "punpcklbw %%mm7, %%mm0 \n\t"\
870 "punpckhbw %%mm7, %%mm1 \n\t"\
871 "punpcklbw %%mm7, %%mm2 \n\t"\
872 "punpckhbw %%mm7, %%mm3 \n\t"\
873 "paddw %%mm2, %%mm0 \n\t"\
874 "paddw %%mm3, %%mm1 \n\t"\
875 "psllw $2, %%mm0 \n\t"\
876 "psllw $2, %%mm1 \n\t"\
877 "movq -1(%0), %%mm2 \n\t"\
878 "movq 2(%0), %%mm4 \n\t"\
879 "movq %%mm2, %%mm3 \n\t"\
880 "movq %%mm4, %%mm5 \n\t"\
881 "punpcklbw %%mm7, %%mm2 \n\t"\
882 "punpckhbw %%mm7, %%mm3 \n\t"\
883 "punpcklbw %%mm7, %%mm4 \n\t"\
884 "punpckhbw %%mm7, %%mm5 \n\t"\
885 "paddw %%mm4, %%mm2 \n\t"\
886 "paddw %%mm3, %%mm5 \n\t"\
887 "psubw %%mm2, %%mm0 \n\t"\
888 "psubw %%mm5, %%mm1 \n\t"\
889 "pmullw %%mm6, %%mm0 \n\t"\
890 "pmullw %%mm6, %%mm1 \n\t"\
891 "movd -2(%0), %%mm2 \n\t"\
892 "movd 7(%0), %%mm5 \n\t"\
893 "punpcklbw %%mm7, %%mm2 \n\t"\
894 "punpcklbw %%mm7, %%mm5 \n\t"\
895 "paddw %%mm3, %%mm2 \n\t"\
896 "paddw %%mm5, %%mm4 \n\t"\
897 "movq %6, %%mm5 \n\t"\
898 "paddw %%mm5, %%mm2 \n\t"\
899 "paddw %%mm5, %%mm4 \n\t"\
900 "paddw %%mm2, %%mm0 \n\t"\
901 "paddw %%mm4, %%mm1 \n\t"\
902 "psraw $5, %%mm0 \n\t"\
903 "psraw $5, %%mm1 \n\t"\
904 "packuswb %%mm1, %%mm0 \n\t"\
905 OP(%%mm0, (%1),%%mm5, q)\
910 : "+a"(src), "+c"(dst), "+m"(h)\
911 : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
916 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
919 "pxor %%mm7, %%mm7 \n\t"\
920 "movq %0, %%mm6 \n\t"\
925 "movq (%0), %%mm0 \n\t"\
926 "movq 1(%0), %%mm2 \n\t"\
927 "movq %%mm0, %%mm1 \n\t"\
928 "movq %%mm2, %%mm3 \n\t"\
929 "punpcklbw %%mm7, %%mm0 \n\t"\
930 "punpckhbw %%mm7, %%mm1 \n\t"\
931 "punpcklbw %%mm7, %%mm2 \n\t"\
932 "punpckhbw %%mm7, %%mm3 \n\t"\
933 "paddw %%mm2, %%mm0 \n\t"\
934 "paddw %%mm3, %%mm1 \n\t"\
935 "psllw $2, %%mm0 \n\t"\
936 "psllw $2, %%mm1 \n\t"\
937 "movq -1(%0), %%mm2 \n\t"\
938 "movq 2(%0), %%mm4 \n\t"\
939 "movq %%mm2, %%mm3 \n\t"\
940 "movq %%mm4, %%mm5 \n\t"\
941 "punpcklbw %%mm7, %%mm2 \n\t"\
942 "punpckhbw %%mm7, %%mm3 \n\t"\
943 "punpcklbw %%mm7, %%mm4 \n\t"\
944 "punpckhbw %%mm7, %%mm5 \n\t"\
945 "paddw %%mm4, %%mm2 \n\t"\
946 "paddw %%mm3, %%mm5 \n\t"\
947 "psubw %%mm2, %%mm0 \n\t"\
948 "psubw %%mm5, %%mm1 \n\t"\
949 "pmullw %%mm6, %%mm0 \n\t"\
950 "pmullw %%mm6, %%mm1 \n\t"\
951 "movd -2(%0), %%mm2 \n\t"\
952 "movd 7(%0), %%mm5 \n\t"\
953 "punpcklbw %%mm7, %%mm2 \n\t"\
954 "punpcklbw %%mm7, %%mm5 \n\t"\
955 "paddw %%mm3, %%mm2 \n\t"\
956 "paddw %%mm5, %%mm4 \n\t"\
957 "movq %5, %%mm5 \n\t"\
958 "paddw %%mm5, %%mm2 \n\t"\
959 "paddw %%mm5, %%mm4 \n\t"\
960 "paddw %%mm2, %%mm0 \n\t"\
961 "paddw %%mm4, %%mm1 \n\t"\
962 "psraw $5, %%mm0 \n\t"\
963 "psraw $5, %%mm1 \n\t"\
964 "movq (%2), %%mm4 \n\t"\
965 "packuswb %%mm1, %%mm0 \n\t"\
966 PAVGB" %%mm4, %%mm0 \n\t"\
967 OP(%%mm0, (%1),%%mm5, q)\
971 : "+a"(src), "+c"(dst), "+d"(src2)\
972 : "D"((long)src2Stride), "S"((long)dstStride),\
979 static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
985 "pxor %%mm7, %%mm7 \n\t"\
986 "movd (%0), %%mm0 \n\t"\
988 "movd (%0), %%mm1 \n\t"\
990 "movd (%0), %%mm2 \n\t"\
992 "movd (%0), %%mm3 \n\t"\
994 "movd (%0), %%mm4 \n\t"\
996 "punpcklbw %%mm7, %%mm0 \n\t"\
997 "punpcklbw %%mm7, %%mm1 \n\t"\
998 "punpcklbw %%mm7, %%mm2 \n\t"\
999 "punpcklbw %%mm7, %%mm3 \n\t"\
1000 "punpcklbw %%mm7, %%mm4 \n\t"\
1001 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1002 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1003 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1004 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1005 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
1006 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
1007 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1008 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1010 : "+a"(src), "+c"(dst)\
1011 : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1016 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1017 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1018 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
1019 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
1020 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1021 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1022 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1023 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1025 : "+a"(src), "+c"(dst)\
1026 : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1030 src += 4-(h+5)*srcStride;\
1031 dst += 4-h*dstStride;\
1034 static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
1036 int w = (size+8)>>2;\
1037 src -= 2*srcStride+2;\
1040 "pxor %%mm7, %%mm7 \n\t"\
1041 "movd (%0), %%mm0 \n\t"\
1043 "movd (%0), %%mm1 \n\t"\
1045 "movd (%0), %%mm2 \n\t"\
1047 "movd (%0), %%mm3 \n\t"\
1049 "movd (%0), %%mm4 \n\t"\
1051 "punpcklbw %%mm7, %%mm0 \n\t"\
1052 "punpcklbw %%mm7, %%mm1 \n\t"\
1053 "punpcklbw %%mm7, %%mm2 \n\t"\
1054 "punpcklbw %%mm7, %%mm3 \n\t"\
1055 "punpcklbw %%mm7, %%mm4 \n\t"\
1056 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
1057 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
1058 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
1059 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
1060 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
1061 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
1062 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
1063 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
1065 : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
1070 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\
1071 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\
1072 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
1073 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
1074 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
1075 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
1076 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
1077 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
1079 : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
1084 src += 4 - (size+5)*srcStride;\
1091 "movq %4, %%mm6 \n\t"\
1093 "movq (%0), %%mm0 \n\t"\
1094 "movq 8(%0), %%mm3 \n\t"\
1095 "movq 2(%0), %%mm1 \n\t"\
1096 "movq 10(%0), %%mm4 \n\t"\
1097 "paddw %%mm4, %%mm0 \n\t"\
1098 "paddw %%mm3, %%mm1 \n\t"\
1099 "paddw 18(%0), %%mm3 \n\t"\
1100 "paddw 16(%0), %%mm4 \n\t"\
1101 "movq 4(%0), %%mm2 \n\t"\
1102 "movq 12(%0), %%mm5 \n\t"\
1103 "paddw 6(%0), %%mm2 \n\t"\
1104 "paddw 14(%0), %%mm5 \n\t"\
1105 "psubw %%mm1, %%mm0 \n\t"\
1106 "psubw %%mm4, %%mm3 \n\t"\
1107 "psraw $2, %%mm0 \n\t"\
1108 "psraw $2, %%mm3 \n\t"\
1109 "psubw %%mm1, %%mm0 \n\t"\
1110 "psubw %%mm4, %%mm3 \n\t"\
1111 "paddsw %%mm2, %%mm0 \n\t"\
1112 "paddsw %%mm5, %%mm3 \n\t"\
1113 "psraw $2, %%mm0 \n\t"\
1114 "psraw $2, %%mm3 \n\t"\
1115 "paddw %%mm6, %%mm2 \n\t"\
1116 "paddw %%mm6, %%mm5 \n\t"\
1117 "paddw %%mm2, %%mm0 \n\t"\
1118 "paddw %%mm5, %%mm3 \n\t"\
1119 "psraw $6, %%mm0 \n\t"\
1120 "psraw $6, %%mm3 \n\t"\
1121 "packuswb %%mm3, %%mm0 \n\t"\
1122 OP(%%mm0, (%1),%%mm7, q)\
1127 : "+a"(tmp), "+c"(dst), "+m"(h)\
1128 : "S"((long)dstStride), "m"(ff_pw_32)\
1131 tmp += 8 - size*24;\
1132 dst += 8 - size*dstStride;\
1136 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1137 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
1139 static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1140 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
1141 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
1144 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1145 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
1146 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1147 src += 8*srcStride;\
1148 dst += 8*dstStride;\
1149 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
1150 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1153 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
1154 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
1155 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
1156 src += 8*dstStride;\
1157 dst += 8*dstStride;\
1158 src2 += 8*src2Stride;\
1159 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
1160 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
1163 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1164 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\
1167 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1168 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\
1171 static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1174 "movq %5, %%mm6 \n\t"\
1175 "movq (%1), %%mm0 \n\t"\
1176 "movq 24(%1), %%mm1 \n\t"\
1177 "paddw %%mm6, %%mm0 \n\t"\
1178 "paddw %%mm6, %%mm1 \n\t"\
1179 "psraw $5, %%mm0 \n\t"\
1180 "psraw $5, %%mm1 \n\t"\
1181 "packuswb %%mm0, %%mm0 \n\t"\
1182 "packuswb %%mm1, %%mm1 \n\t"\
1183 PAVGB" (%0), %%mm0 \n\t"\
1184 PAVGB" (%0,%3), %%mm1 \n\t"\
1185 OP(%%mm0, (%2), %%mm4, d)\
1186 OP(%%mm1, (%2,%4), %%mm5, d)\
1187 "lea (%0,%3,2), %0 \n\t"\
1188 "lea (%2,%4,2), %2 \n\t"\
1189 "movq 48(%1), %%mm0 \n\t"\
1190 "movq 72(%1), %%mm1 \n\t"\
1191 "paddw %%mm6, %%mm0 \n\t"\
1192 "paddw %%mm6, %%mm1 \n\t"\
1193 "psraw $5, %%mm0 \n\t"\
1194 "psraw $5, %%mm1 \n\t"\
1195 "packuswb %%mm0, %%mm0 \n\t"\
1196 "packuswb %%mm1, %%mm1 \n\t"\
1197 PAVGB" (%0), %%mm0 \n\t"\
1198 PAVGB" (%0,%3), %%mm1 \n\t"\
1199 OP(%%mm0, (%2), %%mm4, d)\
1200 OP(%%mm1, (%2,%4), %%mm5, d)\
1201 :"+a"(src8), "+c"(src16), "+d"(dst)\
1202 :"S"((long)src8Stride), "D"((long)dstStride), "m"(ff_pw_16)\
1205 static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1208 "movq %0, %%mm6 \n\t"\
1213 "movq (%1), %%mm0 \n\t"\
1214 "movq 8(%1), %%mm1 \n\t"\
1215 "paddw %%mm6, %%mm0 \n\t"\
1216 "paddw %%mm6, %%mm1 \n\t"\
1217 "psraw $5, %%mm0 \n\t"\
1218 "psraw $5, %%mm1 \n\t"\
1219 "packuswb %%mm1, %%mm0 \n\t"\
1220 PAVGB" (%0), %%mm0 \n\t"\
1221 OP(%%mm0, (%2), %%mm5, q)\
1222 ::"a"(src8), "c"(src16), "d"(dst)\
1224 src8 += src8Stride;\
1229 static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1231 OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\
1232 OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
1236 #define H264_MC(OPNAME, SIZE, MMX) \
1237 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1238 OPNAME ## pixels ## SIZE ## _mmx(dst, src, stride, SIZE);\
1241 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1242 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
1245 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1246 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
1249 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1250 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
1253 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1254 uint64_t temp[SIZE*SIZE/8];\
1255 uint8_t * const half= (uint8_t*)temp;\
1256 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
1257 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
1260 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1261 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
1264 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1265 uint64_t temp[SIZE*SIZE/8];\
1266 uint8_t * const half= (uint8_t*)temp;\
1267 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
1268 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, half, stride, stride, SIZE);\
1271 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1272 uint64_t temp[SIZE*SIZE/8];\
1273 uint8_t * const halfV= (uint8_t*)temp;\
1274 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
1275 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\
1278 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1279 uint64_t temp[SIZE*SIZE/8];\
1280 uint8_t * const halfV= (uint8_t*)temp;\
1281 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
1282 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\
1285 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1286 uint64_t temp[SIZE*SIZE/8];\
1287 uint8_t * const halfV= (uint8_t*)temp;\
1288 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
1289 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\
1292 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1293 uint64_t temp[SIZE*SIZE/8];\
1294 uint8_t * const halfV= (uint8_t*)temp;\
1295 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
1296 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\
1299 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1300 uint64_t temp[SIZE*(SIZE<8?12:24)/4];\
1301 int16_t * const tmp= (int16_t*)temp;\
1302 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\
1305 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1306 uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
1307 uint8_t * const halfHV= (uint8_t*)temp;\
1308 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\
1309 assert(((int)temp & 7) == 0);\
1310 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
1311 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
1314 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1315 uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
1316 uint8_t * const halfHV= (uint8_t*)temp;\
1317 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\
1318 assert(((int)temp & 7) == 0);\
1319 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
1320 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
1323 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1324 uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
1325 int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\
1326 uint8_t * const halfHV= ((uint8_t*)temp);\
1327 assert(((int)temp & 7) == 0);\
1328 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
1329 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
1332 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1333 uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
1334 int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\
1335 uint8_t * const halfHV= ((uint8_t*)temp);\
1336 assert(((int)temp & 7) == 0);\
1337 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
1338 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
1342 #define AVG_3DNOW_OP(a,b,temp, size) \
1343 "mov" #size " " #b ", " #temp " \n\t"\
1344 "pavgusb " #temp ", " #a " \n\t"\
1345 "mov" #size " " #a ", " #b " \n\t"
1346 #define AVG_MMX2_OP(a,b,temp, size) \
1347 "mov" #size " " #b ", " #temp " \n\t"\
1348 "pavgb " #temp ", " #a " \n\t"\
1349 "mov" #size " " #a ", " #b " \n\t"
1351 #define PAVGB "pavgusb"
1352 QPEL_H264(put_, PUT_OP, 3dnow)
1353 QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
1355 #define PAVGB "pavgb"
1356 QPEL_H264(put_, PUT_OP, mmx2)
1357 QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
1360 H264_MC(put_, 4, 3dnow)
1361 H264_MC(put_, 8, 3dnow)
1362 H264_MC(put_, 16,3dnow)
1363 H264_MC(avg_, 4, 3dnow)
1364 H264_MC(avg_, 8, 3dnow)
1365 H264_MC(avg_, 16,3dnow)
1366 H264_MC(put_, 4, mmx2)
1367 H264_MC(put_, 8, mmx2)
1368 H264_MC(put_, 16,mmx2)
1369 H264_MC(avg_, 4, mmx2)
1370 H264_MC(avg_, 8, mmx2)
1371 H264_MC(avg_, 16,mmx2)
1374 #define H264_CHROMA_OP(S,D)
1375 #define H264_CHROMA_OP4(S,D,T)
1376 #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
1377 #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx
1378 #define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2
1379 #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
1380 #include "dsputil_h264_template_mmx.c"
1381 #undef H264_CHROMA_OP
1382 #undef H264_CHROMA_OP4
1383 #undef H264_CHROMA_MC8_TMPL
1384 #undef H264_CHROMA_MC4_TMPL
1385 #undef H264_CHROMA_MC2_TMPL
1386 #undef H264_CHROMA_MC8_MV0
1388 #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
1389 #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
1390 "pavgb " #T ", " #D " \n\t"
1391 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
1392 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2
1393 #define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2
1394 #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
1395 #include "dsputil_h264_template_mmx.c"
1396 #undef H264_CHROMA_OP
1397 #undef H264_CHROMA_OP4
1398 #undef H264_CHROMA_MC8_TMPL
1399 #undef H264_CHROMA_MC4_TMPL
1400 #undef H264_CHROMA_MC2_TMPL
1401 #undef H264_CHROMA_MC8_MV0
1403 #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
1404 #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
1405 "pavgusb " #T ", " #D " \n\t"
1406 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
1407 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_3dnow
1408 #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
1409 #include "dsputil_h264_template_mmx.c"
1410 #undef H264_CHROMA_OP
1411 #undef H264_CHROMA_OP4
1412 #undef H264_CHROMA_MC8_TMPL
1413 #undef H264_CHROMA_MC4_TMPL
1414 #undef H264_CHROMA_MC8_MV0
1416 /***********************************/
1417 /* weighted prediction */
1419 static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h)
1422 offset <<= log2_denom;
1423 offset += (1 << log2_denom) >> 1;
1425 "movd %0, %%mm4 \n\t"
1426 "movd %1, %%mm5 \n\t"
1427 "movd %2, %%mm6 \n\t"
1428 "pshufw $0, %%mm4, %%mm4 \n\t"
1429 "pshufw $0, %%mm5, %%mm5 \n\t"
1430 "pxor %%mm7, %%mm7 \n\t"
1431 :: "g"(weight), "g"(offset), "g"(log2_denom)
1433 for(y=0; y<h; y+=2){
1434 for(x=0; x<w; x+=4){
1436 "movd %0, %%mm0 \n\t"
1437 "movd %1, %%mm1 \n\t"
1438 "punpcklbw %%mm7, %%mm0 \n\t"
1439 "punpcklbw %%mm7, %%mm1 \n\t"
1440 "pmullw %%mm4, %%mm0 \n\t"
1441 "pmullw %%mm4, %%mm1 \n\t"
1442 "paddsw %%mm5, %%mm0 \n\t"
1443 "paddsw %%mm5, %%mm1 \n\t"
1444 "psraw %%mm6, %%mm0 \n\t"
1445 "psraw %%mm6, %%mm1 \n\t"
1446 "packuswb %%mm7, %%mm0 \n\t"
1447 "packuswb %%mm7, %%mm1 \n\t"
1448 "movd %%mm0, %0 \n\t"
1449 "movd %%mm1, %1 \n\t"
1450 : "+m"(*(uint32_t*)(dst+x)),
1451 "+m"(*(uint32_t*)(dst+x+stride))
1458 static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h)
1461 offset = ((offset + 1) | 1) << log2_denom;
1463 "movd %0, %%mm3 \n\t"
1464 "movd %1, %%mm4 \n\t"
1465 "movd %2, %%mm5 \n\t"
1466 "movd %3, %%mm6 \n\t"
1467 "pshufw $0, %%mm3, %%mm3 \n\t"
1468 "pshufw $0, %%mm4, %%mm4 \n\t"
1469 "pshufw $0, %%mm5, %%mm5 \n\t"
1470 "pxor %%mm7, %%mm7 \n\t"
1471 :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
1474 for(x=0; x<w; x+=4){
1476 "movd %0, %%mm0 \n\t"
1477 "movd %1, %%mm1 \n\t"
1478 "punpcklbw %%mm7, %%mm0 \n\t"
1479 "punpcklbw %%mm7, %%mm1 \n\t"
1480 "pmullw %%mm3, %%mm0 \n\t"
1481 "pmullw %%mm4, %%mm1 \n\t"
1482 "paddsw %%mm1, %%mm0 \n\t"
1483 "paddsw %%mm5, %%mm0 \n\t"
1484 "psraw %%mm6, %%mm0 \n\t"
1485 "packuswb %%mm0, %%mm0 \n\t"
1486 "movd %%mm0, %0 \n\t"
1487 : "+m"(*(uint32_t*)(dst+x))
1488 : "m"(*(uint32_t*)(src+x))
1496 #define H264_WEIGHT(W,H) \
1497 static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
1498 ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
1500 static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \
1501 ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \