]> git.sesse.net Git - x264/blob - common/x86/mc-c.c
Bump dates to 2011
[x264] / common / x86 / mc-c.c
1 /*****************************************************************************
2  * mc-c.c: x86 motion compensation
3  *****************************************************************************
4  * Copyright (C) 2003-2011 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31
32 #include "common/common.h"
33 #include "mc.h"
34
35 #define DECL_SUF( func, args )\
36     void func##_mmxext args;\
37     void func##_sse2 args;\
38     void func##_ssse3 args;
39
40 DECL_SUF( x264_pixel_avg_16x16, ( pixel *, int, pixel *, int, pixel *, int, int ))
41 DECL_SUF( x264_pixel_avg_16x8,  ( pixel *, int, pixel *, int, pixel *, int, int ))
42 DECL_SUF( x264_pixel_avg_8x16,  ( pixel *, int, pixel *, int, pixel *, int, int ))
43 DECL_SUF( x264_pixel_avg_8x8,   ( pixel *, int, pixel *, int, pixel *, int, int ))
44 DECL_SUF( x264_pixel_avg_8x4,   ( pixel *, int, pixel *, int, pixel *, int, int ))
45 DECL_SUF( x264_pixel_avg_4x8,   ( pixel *, int, pixel *, int, pixel *, int, int ))
46 DECL_SUF( x264_pixel_avg_4x4,   ( pixel *, int, pixel *, int, pixel *, int, int ))
47 DECL_SUF( x264_pixel_avg_4x2,   ( pixel *, int, pixel *, int, pixel *, int, int ))
48
49 #define MC_WEIGHT(w,type) \
50     void x264_mc_weight_w##w##_##type( pixel *,int, pixel *,int, const x264_weight_t *,int );
51
52 #define MC_WEIGHT_OFFSET(w,type) \
53     void x264_mc_offsetadd_w##w##_##type( pixel *,int, pixel *,int, const x264_weight_t *,int ); \
54     void x264_mc_offsetsub_w##w##_##type( pixel *,int, pixel *,int, const x264_weight_t *,int ); \
55     MC_WEIGHT(w,type)
56
57 MC_WEIGHT_OFFSET( 4, mmxext )
58 MC_WEIGHT_OFFSET( 8, mmxext )
59 MC_WEIGHT_OFFSET( 12, mmxext )
60 MC_WEIGHT_OFFSET( 16, mmxext )
61 MC_WEIGHT_OFFSET( 20, mmxext )
62 MC_WEIGHT_OFFSET( 12, sse2 )
63 MC_WEIGHT_OFFSET( 16, sse2 )
64 MC_WEIGHT_OFFSET( 20, sse2 )
65 #if HIGH_BIT_DEPTH
66 MC_WEIGHT_OFFSET( 8, sse2 )
67 #endif
68 MC_WEIGHT( 8, sse2  )
69 MC_WEIGHT( 4, ssse3 )
70 MC_WEIGHT( 8, ssse3 )
71 MC_WEIGHT( 12, ssse3 )
72 MC_WEIGHT( 16, ssse3 )
73 MC_WEIGHT( 20, ssse3 )
74 #undef MC_OFFSET
75 #undef MC_WEIGHT
76
77 void x264_mc_copy_w4_mmx( pixel *, int, pixel *, int, int );
78 void x264_mc_copy_w8_mmx( pixel *, int, pixel *, int, int );
79 void x264_mc_copy_w8_sse2( pixel *, int, pixel *, int, int );
80 void x264_mc_copy_w8_aligned_sse2( pixel *, int, pixel *, int, int );
81 void x264_mc_copy_w16_mmx( pixel *, int, pixel *, int, int );
82 void x264_mc_copy_w16_sse2( pixel *, int, pixel *, int, int );
83 void x264_mc_copy_w16_sse3( uint8_t *, int, uint8_t *, int, int );
84 void x264_mc_copy_w16_aligned_sse2( pixel *, int, pixel *, int, int );
85 void x264_prefetch_fenc_mmxext( uint8_t *, int, uint8_t *, int, int );
86 void x264_prefetch_ref_mmxext( uint8_t *, int, int );
87 void x264_plane_copy_core_mmxext( pixel *, int, pixel *, int, int w, int h);
88 void x264_plane_copy_c( pixel *, int, pixel *, int, int w, int h );
89 void x264_plane_copy_interleave_core_mmxext( pixel *dst, int i_dst,
90                                              pixel *srcu, int i_srcu,
91                                              pixel *srcv, int i_srcv, int w, int h );
92 void x264_plane_copy_interleave_core_sse2( pixel *dst, int i_dst,
93                                            pixel *srcu, int i_srcu,
94                                            pixel *srcv, int i_srcv, int w, int h );
95 void x264_plane_copy_interleave_c( pixel *dst, int i_dst,
96                                    pixel *srcu, int i_srcu,
97                                    pixel *srcv, int i_srcv, int w, int h );
98 void x264_plane_copy_deinterleave_mmx( pixel *dstu, int i_dstu,
99                                        pixel *dstv, int i_dstv,
100                                        pixel *src, int i_src, int w, int h );
101 void x264_plane_copy_deinterleave_sse2( pixel *dstu, int i_dstu,
102                                         pixel *dstv, int i_dstv,
103                                         pixel *src, int i_src, int w, int h );
104 void x264_plane_copy_deinterleave_ssse3( uint8_t *dstu, int i_dstu,
105                                          uint8_t *dstv, int i_dstv,
106                                          uint8_t *src, int i_src, int w, int h );
107 void x264_store_interleave_8x8x2_mmxext( pixel *dst, int i_dst, pixel *srcu, pixel *srcv );
108 void x264_store_interleave_8x8x2_sse2( pixel *dst, int i_dst, pixel *srcu, pixel *srcv );
109 void x264_load_deinterleave_8x8x2_fenc_mmx( pixel *dst, pixel *src, int i_src );
110 void x264_load_deinterleave_8x8x2_fenc_sse2( pixel *dst, pixel *src, int i_src );
111 void x264_load_deinterleave_8x8x2_fenc_ssse3( uint8_t *dst, uint8_t *src, int i_src );
112 void x264_load_deinterleave_8x8x2_fdec_mmx( pixel *dst, pixel *src, int i_src );
113 void x264_load_deinterleave_8x8x2_fdec_sse2( pixel *dst, pixel *src, int i_src );
114 void x264_load_deinterleave_8x8x2_fdec_ssse3( uint8_t *dst, uint8_t *src, int i_src );
115 void *x264_memcpy_aligned_mmx( void * dst, const void * src, size_t n );
116 void *x264_memcpy_aligned_sse2( void * dst, const void * src, size_t n );
117 void x264_memzero_aligned_mmx( void * dst, int n );
118 void x264_memzero_aligned_sse2( void * dst, int n );
119 void x264_integral_init4h_sse4( uint16_t *sum, uint8_t *pix, int stride );
120 void x264_integral_init8h_sse4( uint16_t *sum, uint8_t *pix, int stride );
121 void x264_integral_init4v_mmx( uint16_t *sum8, uint16_t *sum4, int stride );
122 void x264_integral_init4v_sse2( uint16_t *sum8, uint16_t *sum4, int stride );
123 void x264_integral_init8v_mmx( uint16_t *sum8, int stride );
124 void x264_integral_init8v_sse2( uint16_t *sum8, int stride );
125 void x264_integral_init4v_ssse3( uint16_t *sum8, uint16_t *sum4, int stride );
126 void x264_mbtree_propagate_cost_sse2( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
127                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
128
129 #define MC_CHROMA(cpu)\
130 void x264_mc_chroma_##cpu( pixel *dstu, pixel *dstv, int i_dst,\
131                            pixel *src, int i_src,\
132                            int dx, int dy, int i_width, int i_height );
133 MC_CHROMA(mmxext)
134 MC_CHROMA(sse2)
135 MC_CHROMA(sse2_misalign)
136 MC_CHROMA(ssse3)
137 MC_CHROMA(ssse3_cache64)
138
139 #define LOWRES(cpu)\
140 void x264_frame_init_lowres_core_##cpu( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,\
141                                         int src_stride, int dst_stride, int width, int height );
142 LOWRES(mmxext)
143 LOWRES(cache32_mmxext)
144 LOWRES(sse2)
145 LOWRES(ssse3)
146
147 #define PIXEL_AVG_W(width,cpu)\
148 void x264_pixel_avg2_w##width##_##cpu( pixel *, int, pixel *, int, pixel *, int );
149 /* This declares some functions that don't exist, but that isn't a problem. */
150 #define PIXEL_AVG_WALL(cpu)\
151 PIXEL_AVG_W(4,cpu); PIXEL_AVG_W(8,cpu); PIXEL_AVG_W(10,cpu); PIXEL_AVG_W(12,cpu); PIXEL_AVG_W(16,cpu); PIXEL_AVG_W(18,cpu); PIXEL_AVG_W(20,cpu);
152
153 PIXEL_AVG_WALL(mmxext)
154 PIXEL_AVG_WALL(cache32_mmxext)
155 PIXEL_AVG_WALL(cache64_mmxext)
156 PIXEL_AVG_WALL(cache64_sse2)
157 PIXEL_AVG_WALL(sse2)
158 PIXEL_AVG_WALL(sse2_misalign)
159 PIXEL_AVG_WALL(cache64_ssse3)
160
161 #define PIXEL_AVG_WTAB(instr, name1, name2, name3, name4, name5)\
162 static void (* const x264_pixel_avg_wtab_##instr[6])( pixel *, int, pixel *, int, pixel *, int ) =\
163 {\
164     NULL,\
165     x264_pixel_avg2_w4_##name1,\
166     x264_pixel_avg2_w8_##name2,\
167     x264_pixel_avg2_w12_##name3,\
168     x264_pixel_avg2_w16_##name4,\
169     x264_pixel_avg2_w20_##name5,\
170 };
171
172 #if HIGH_BIT_DEPTH
173 /* we can replace w12/w20 with w10/w18 as only 9/17 pixels in fact are important */
174 #define x264_pixel_avg2_w12_mmxext       x264_pixel_avg2_w10_mmxext
175 #define x264_pixel_avg2_w20_mmxext       x264_pixel_avg2_w18_mmxext
176 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w10_sse2
177 #define x264_pixel_avg2_w20_sse2         x264_pixel_avg2_w18_sse2
178 #else
179 /* w16 sse2 is faster than w12 mmx as long as the cacheline issue is resolved */
180 #define x264_pixel_avg2_w12_cache64_ssse3 x264_pixel_avg2_w16_cache64_ssse3
181 #define x264_pixel_avg2_w12_cache64_sse2 x264_pixel_avg2_w16_cache64_sse2
182 #define x264_pixel_avg2_w12_sse3         x264_pixel_avg2_w16_sse3
183 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w16_sse2
184 #endif // HIGH_BIT_DEPTH
185
186 PIXEL_AVG_WTAB(mmxext, mmxext, mmxext, mmxext, mmxext, mmxext)
187 #if HIGH_BIT_DEPTH
188 PIXEL_AVG_WTAB(sse2, mmxext, sse2, sse2, sse2, sse2)
189 #else // !HIGH_BIT_DEPTH
190 #if ARCH_X86
191 PIXEL_AVG_WTAB(cache32_mmxext, mmxext, cache32_mmxext, cache32_mmxext, cache32_mmxext, cache32_mmxext)
192 PIXEL_AVG_WTAB(cache64_mmxext, mmxext, cache64_mmxext, cache64_mmxext, cache64_mmxext, cache64_mmxext)
193 #endif
194 PIXEL_AVG_WTAB(sse2, mmxext, mmxext, sse2, sse2, sse2)
195 PIXEL_AVG_WTAB(sse2_misalign, mmxext, mmxext, sse2, sse2, sse2_misalign)
196 PIXEL_AVG_WTAB(cache64_sse2, mmxext, cache64_mmxext, cache64_sse2, cache64_sse2, cache64_sse2)
197 PIXEL_AVG_WTAB(cache64_ssse3, mmxext, cache64_mmxext, cache64_ssse3, cache64_ssse3, cache64_sse2)
198 #endif // HIGH_BIT_DEPTH
199
200 #define MC_COPY_WTAB(instr, name1, name2, name3)\
201 static void (* const x264_mc_copy_wtab_##instr[5])( pixel *, int, pixel *, int, int ) =\
202 {\
203     NULL,\
204     x264_mc_copy_w4_##name1,\
205     x264_mc_copy_w8_##name2,\
206     NULL,\
207     x264_mc_copy_w16_##name3,\
208 };
209
210 MC_COPY_WTAB(mmx,mmx,mmx,mmx)
211 MC_COPY_WTAB(sse2,mmx,mmx,sse2)
212
213 #define MC_WEIGHT_WTAB(function, instr, name1, name2, w12version)\
214     static void (* x264_mc_##function##_wtab_##instr[6])( pixel *, int, pixel *, int, const x264_weight_t *, int ) =\
215 {\
216     x264_mc_##function##_w4_##name1,\
217     x264_mc_##function##_w4_##name1,\
218     x264_mc_##function##_w8_##name2,\
219     x264_mc_##function##_w##w12version##_##instr,\
220     x264_mc_##function##_w16_##instr,\
221     x264_mc_##function##_w20_##instr,\
222 };
223
224 #if HIGH_BIT_DEPTH
225 MC_WEIGHT_WTAB(weight,mmxext,mmxext,mmxext,12)
226 MC_WEIGHT_WTAB(offsetadd,mmxext,mmxext,mmxext,12)
227 MC_WEIGHT_WTAB(offsetsub,mmxext,mmxext,mmxext,12)
228 MC_WEIGHT_WTAB(weight,sse2,mmxext,sse2,12)
229 MC_WEIGHT_WTAB(offsetadd,sse2,mmxext,sse2,16)
230 MC_WEIGHT_WTAB(offsetsub,sse2,mmxext,sse2,16)
231
232 static void x264_weight_cache_mmxext( x264_t *h, x264_weight_t *w )
233 {
234     if( w->i_scale == 1<<w->i_denom )
235     {
236         if( w->i_offset < 0 )
237             w->weightfn = h->mc.offsetsub;
238         else
239             w->weightfn = h->mc.offsetadd;
240         for( int i = 0; i < 8; i++ )
241             w->cachea[i] = abs(w->i_offset<<(BIT_DEPTH-8));
242         return;
243     }
244     w->weightfn = h->mc.weight;
245     int den1 = 1<<w->i_denom;
246     int den2 = w->i_scale<<1;
247     int den3 = 1+(w->i_offset<<(BIT_DEPTH-8+1));
248     for( int i = 0; i < 8; i++ )
249     {
250         w->cachea[i] = den1;
251         w->cacheb[i] = i&1 ? den3 : den2;
252     }
253 }
254 #else
255 MC_WEIGHT_WTAB(weight,mmxext,mmxext,mmxext,12)
256 MC_WEIGHT_WTAB(offsetadd,mmxext,mmxext,mmxext,12)
257 MC_WEIGHT_WTAB(offsetsub,mmxext,mmxext,mmxext,12)
258 MC_WEIGHT_WTAB(weight,sse2,mmxext,sse2,16)
259 MC_WEIGHT_WTAB(offsetadd,sse2,mmxext,mmxext,16)
260 MC_WEIGHT_WTAB(offsetsub,sse2,mmxext,mmxext,16)
261 MC_WEIGHT_WTAB(weight,ssse3,ssse3,ssse3,16)
262
263 static void x264_weight_cache_mmxext( x264_t *h, x264_weight_t *w )
264 {
265     int i;
266     int16_t den1;
267
268     if( w->i_scale == 1<<w->i_denom )
269     {
270         if( w->i_offset < 0 )
271             w->weightfn = h->mc.offsetsub;
272         else
273             w->weightfn = h->mc.offsetadd;
274         memset( w->cachea, abs(w->i_offset), sizeof(w->cachea) );
275         return;
276     }
277     w->weightfn = h->mc.weight;
278     den1 = 1 << (w->i_denom - 1) | w->i_offset << w->i_denom;
279     for( i = 0; i < 8; i++ )
280     {
281         w->cachea[i] = w->i_scale;
282         w->cacheb[i] = den1;
283     }
284 }
285
286 static void x264_weight_cache_ssse3( x264_t *h, x264_weight_t *w )
287 {
288     int i, den1;
289     if( w->i_scale == 1<<w->i_denom )
290     {
291         if( w->i_offset < 0 )
292             w->weightfn = h->mc.offsetsub;
293         else
294             w->weightfn = h->mc.offsetadd;
295
296         memset( w->cachea, abs( w->i_offset ), sizeof(w->cachea) );
297         return;
298     }
299     w->weightfn = h->mc.weight;
300     den1 = w->i_scale << (8 - w->i_denom);
301     for( i = 0; i < 8; i++ )
302     {
303         w->cachea[i] = den1;
304         w->cacheb[i] = w->i_offset;
305     }
306 }
307 #endif // !HIGH_BIT_DEPTH
308
309 static const uint8_t hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
310 static const uint8_t hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
311
312 #define MC_LUMA(name,instr1,instr2)\
313 static void mc_luma_##name( pixel *dst,    int i_dst_stride,\
314                   pixel *src[4], int i_src_stride,\
315                   int mvx, int mvy,\
316                   int i_width, int i_height, const x264_weight_t *weight )\
317 {\
318     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
319     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
320     pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
321     if( qpel_idx & 5 ) /* qpel interpolation needed */\
322     {\
323         pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
324         x264_pixel_avg_wtab_##instr1[i_width>>2](\
325                 dst, i_dst_stride, src1, i_src_stride,\
326                 src2, i_height );\
327         if( weight->weightfn )\
328             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );\
329     }\
330     else if( weight->weightfn )\
331         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );\
332     else\
333         x264_mc_copy_wtab_##instr2[i_width>>2](dst, i_dst_stride, src1, i_src_stride, i_height );\
334 }
335
336 MC_LUMA(mmxext,mmxext,mmx)
337 MC_LUMA(sse2,sse2,sse2)
338 #if !HIGH_BIT_DEPTH
339 #if ARCH_X86
340 MC_LUMA(cache32_mmxext,cache32_mmxext,mmx)
341 MC_LUMA(cache64_mmxext,cache64_mmxext,mmx)
342 #endif
343 MC_LUMA(cache64_sse2,cache64_sse2,sse2)
344 MC_LUMA(cache64_ssse3,cache64_ssse3,sse2)
345 #endif // !HIGH_BIT_DEPTH
346
347 #define GET_REF(name)\
348 static pixel *get_ref_##name( pixel *dst,   int *i_dst_stride,\
349                          pixel *src[4], int i_src_stride,\
350                          int mvx, int mvy,\
351                          int i_width, int i_height, const x264_weight_t *weight )\
352 {\
353     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
354     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
355     pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
356     if( qpel_idx & 5 ) /* qpel interpolation needed */\
357     {\
358         pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
359         x264_pixel_avg_wtab_##name[i_width>>2](\
360                 dst, *i_dst_stride, src1, i_src_stride,\
361                 src2, i_height );\
362         if( weight->weightfn )\
363             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );\
364         return dst;\
365     }\
366     else if( weight->weightfn )\
367     {\
368         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );\
369         return dst;\
370     }\
371     else\
372     {\
373         *i_dst_stride = i_src_stride;\
374         return src1;\
375     }\
376 }
377
378 GET_REF(mmxext)
379 GET_REF(sse2)
380 #if !HIGH_BIT_DEPTH
381 #if ARCH_X86
382 GET_REF(cache32_mmxext)
383 GET_REF(cache64_mmxext)
384 #endif
385 GET_REF(sse2_misalign)
386 GET_REF(cache64_sse2)
387 GET_REF(cache64_ssse3)
388 #endif // !HIGH_BIT_DEPTH
389
390 #define HPEL(align, cpu, cpuv, cpuc, cpuh)\
391 void x264_hpel_filter_v_##cpuv( pixel *dst, pixel *src, int16_t *buf, int stride, int width);\
392 void x264_hpel_filter_c_##cpuc( pixel *dst, int16_t *buf, int width );\
393 void x264_hpel_filter_h_##cpuh( pixel *dst, pixel *src, int width );\
394 static void x264_hpel_filter_##cpu( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,\
395                              int stride, int width, int height, int16_t *buf )\
396 {\
397     int realign = (intptr_t)src & (align-1);\
398     src -= realign;\
399     dstv -= realign;\
400     dstc -= realign;\
401     dsth -= realign;\
402     width += realign;\
403     while( height-- )\
404     {\
405         x264_hpel_filter_v_##cpuv( dstv, src, buf+8, stride, width );\
406         x264_hpel_filter_c_##cpuc( dstc, buf+8, width );\
407         x264_hpel_filter_h_##cpuh( dsth, src, width );\
408         dsth += stride;\
409         dstv += stride;\
410         dstc += stride;\
411         src  += stride;\
412     }\
413     x264_sfence();\
414 }
415
416 HPEL(8, mmxext, mmxext, mmxext, mmxext)
417 #if HIGH_BIT_DEPTH
418 HPEL(16, sse2, sse2, sse2, sse2 )
419 #else // !HIGH_BIT_DEPTH
420 HPEL(16, sse2_amd, mmxext, mmxext, sse2)
421 #if ARCH_X86_64
422 void x264_hpel_filter_sse2( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, int stride, int width, int height, int16_t *buf );
423 void x264_hpel_filter_ssse3( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, int stride, int width, int height, int16_t *buf );
424 #else
425 HPEL(16, sse2, sse2, sse2, sse2)
426 HPEL(16, ssse3, ssse3, ssse3, ssse3)
427 #endif
428 HPEL(16, sse2_misalign, sse2, sse2_misalign, sse2)
429 #endif // HIGH_BIT_DEPTH
430
431 static void x264_plane_copy_mmxext( pixel *dst, int i_dst, pixel *src, int i_src, int w, int h )
432 {
433     int c_w = 16/sizeof(pixel) - 1;
434     if( w < 256 ) { // tiny resolutions don't want non-temporal hints. dunno the exact threshold.
435         x264_plane_copy_c( dst, i_dst, src, i_src, w, h );
436     } else if( !(w&c_w) ) {
437         x264_plane_copy_core_mmxext( dst, i_dst, src, i_src, w, h );
438     } else if( i_src > 0 ) {
439         // have to use plain memcpy on the last line (in memory order) to avoid overreading src
440         x264_plane_copy_core_mmxext( dst, i_dst, src, i_src, (w+c_w)&~c_w, h-1 );
441         memcpy( dst+i_dst*(h-1), src+i_src*(h-1), w*sizeof(pixel) );
442     } else {
443         memcpy( dst, src, w*sizeof(pixel) );
444         x264_plane_copy_core_mmxext( dst+i_dst, i_dst, src+i_src, i_src, (w+c_w)&~c_w, h-1 );
445     }
446 }
447
448 #define PLANE_INTERLEAVE(cpu) \
449 static void x264_plane_copy_interleave_##cpu( pixel *dst, int i_dst,\
450                                               pixel *srcu, int i_srcu,\
451                                               pixel *srcv, int i_srcv, int w, int h )\
452 {\
453     if( !(w&15) ) {\
454         x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
455     } else if( w < 16 || (i_srcu ^ i_srcv) ) {\
456         x264_plane_copy_interleave_c( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
457     } else if( i_srcu > 0 ) {\
458         x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, (w+15)&~15, h-1 );\
459         x264_plane_copy_interleave_c( dst+i_dst*(h-1), 0, srcu+i_srcu*(h-1), 0, srcv+i_srcv*(h-1), 0, w, 1 );\
460     } else {\
461         x264_plane_copy_interleave_c( dst, 0, srcu, 0, srcv, 0, w, 1 );\
462         x264_plane_copy_interleave_core_##cpu( dst+i_dst, i_dst, srcu+i_srcu, i_srcu, srcv+i_srcv, i_srcv, (w+15)&~15, h-1 );\
463     }\
464 }
465
466 PLANE_INTERLEAVE(mmxext)
467 PLANE_INTERLEAVE(sse2)
468
469 void x264_mc_init_mmx( int cpu, x264_mc_functions_t *pf )
470 {
471     if( !(cpu&X264_CPU_MMX) )
472         return;
473
474     pf->load_deinterleave_8x8x2_fenc = x264_load_deinterleave_8x8x2_fenc_mmx;
475     pf->load_deinterleave_8x8x2_fdec = x264_load_deinterleave_8x8x2_fdec_mmx;
476
477     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_mmx;
478
479     pf->copy_16x16_unaligned = x264_mc_copy_w16_mmx;
480     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_mmx;
481     pf->copy[PIXEL_8x8]   = x264_mc_copy_w8_mmx;
482     pf->copy[PIXEL_4x4]   = x264_mc_copy_w4_mmx;
483     pf->memcpy_aligned  = x264_memcpy_aligned_mmx;
484     pf->memzero_aligned = x264_memzero_aligned_mmx;
485     pf->integral_init4v = x264_integral_init4v_mmx;
486     pf->integral_init8v = x264_integral_init8v_mmx;
487
488     if( !(cpu&X264_CPU_MMXEXT) )
489         return;
490
491     pf->plane_copy = x264_plane_copy_mmxext;
492     pf->plane_copy_interleave = x264_plane_copy_interleave_mmxext;
493     pf->store_interleave_8x8x2 = x264_store_interleave_8x8x2_mmxext;
494
495     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_mmxext;
496     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_mmxext;
497     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_mmxext;
498     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_mmxext;
499     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_mmxext;
500     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_mmxext;
501     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_mmxext;
502     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_mmxext;
503
504     pf->mc_luma = mc_luma_mmxext;
505     pf->get_ref = get_ref_mmxext;
506     pf->mc_chroma = x264_mc_chroma_mmxext;
507     pf->hpel_filter = x264_hpel_filter_mmxext;
508     pf->weight = x264_mc_weight_wtab_mmxext;
509     pf->weight_cache = x264_weight_cache_mmxext;
510     pf->offsetadd = x264_mc_offsetadd_wtab_mmxext;
511     pf->offsetsub = x264_mc_offsetsub_wtab_mmxext;
512
513     pf->frame_init_lowres_core = x264_frame_init_lowres_core_mmxext;
514
515 #if HIGH_BIT_DEPTH
516 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
517     if( cpu&(X264_CPU_CACHELINE_32|X264_CPU_CACHELINE_64) )
518         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmxext;
519 #endif
520
521     if( !(cpu&X264_CPU_SSE2) )
522         return;
523
524     pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
525
526     pf->load_deinterleave_8x8x2_fenc = x264_load_deinterleave_8x8x2_fenc_sse2;
527     pf->load_deinterleave_8x8x2_fdec = x264_load_deinterleave_8x8x2_fdec_sse2;
528
529     pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
530     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
531
532     if( cpu&X264_CPU_SSE2_IS_FAST )
533     {
534         pf->get_ref = get_ref_sse2;
535         pf->mc_luma = mc_luma_sse2;
536         pf->hpel_filter = x264_hpel_filter_sse2;
537     }
538
539     pf->memcpy_aligned  = x264_memcpy_aligned_sse2;
540     pf->memzero_aligned = x264_memzero_aligned_sse2;
541     pf->integral_init4v = x264_integral_init4v_sse2;
542     pf->integral_init8v = x264_integral_init8v_sse2;
543     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
544     pf->store_interleave_8x8x2 = x264_store_interleave_8x8x2_sse2;
545     pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
546     pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
547
548     if( cpu&X264_CPU_SSE2_IS_SLOW )
549         return;
550
551     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
552     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
553     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_sse2;
554     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_sse2;
555     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_sse2;
556     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_sse2;
557     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_sse2;
558     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_sse2;
559
560     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse2;
561     pf->weight = x264_mc_weight_wtab_sse2;
562
563     if( !(cpu&X264_CPU_STACK_MOD4) )
564         pf->mc_chroma = x264_mc_chroma_sse2;
565
566     if( !(cpu&X264_CPU_SSSE3) )
567         return;
568
569     pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
570
571     if( (cpu&X264_CPU_SHUFFLE_IS_FAST) && !(cpu&X264_CPU_SLOW_ATOM) )
572         pf->integral_init4v = x264_integral_init4v_ssse3;
573 #else // !HIGH_BIT_DEPTH
574     pf->prefetch_fenc = x264_prefetch_fenc_mmxext;
575     pf->prefetch_ref  = x264_prefetch_ref_mmxext;
576
577 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
578     if( cpu&X264_CPU_CACHELINE_32 )
579     {
580         pf->mc_luma = mc_luma_cache32_mmxext;
581         pf->get_ref = get_ref_cache32_mmxext;
582         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmxext;
583     }
584     else if( cpu&X264_CPU_CACHELINE_64 )
585     {
586         pf->mc_luma = mc_luma_cache64_mmxext;
587         pf->get_ref = get_ref_cache64_mmxext;
588         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmxext;
589     }
590 #endif
591
592     if( !(cpu&X264_CPU_SSE2) )
593         return;
594
595     pf->memcpy_aligned = x264_memcpy_aligned_sse2;
596     pf->memzero_aligned = x264_memzero_aligned_sse2;
597     pf->integral_init4v = x264_integral_init4v_sse2;
598     pf->integral_init8v = x264_integral_init8v_sse2;
599     pf->hpel_filter = x264_hpel_filter_sse2_amd;
600     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
601
602     if( cpu&X264_CPU_SSE2_IS_SLOW )
603         return;
604
605     pf->weight = x264_mc_weight_wtab_sse2;
606     if( !(cpu&X264_CPU_SLOW_ATOM) )
607     {
608         pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
609         pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
610     }
611
612     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse2;
613     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
614     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
615     pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_sse2;
616     pf->avg[PIXEL_8x8]  = x264_pixel_avg_8x8_sse2;
617     pf->avg[PIXEL_8x4]  = x264_pixel_avg_8x4_sse2;
618     pf->hpel_filter = x264_hpel_filter_sse2;
619     if( cpu&X264_CPU_SSE_MISALIGN )
620         pf->hpel_filter = x264_hpel_filter_sse2_misalign;
621     pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
622     if( !(cpu&X264_CPU_STACK_MOD4) )
623         pf->mc_chroma = x264_mc_chroma_sse2;
624
625     if( cpu&X264_CPU_SSE2_IS_FAST )
626     {
627         pf->store_interleave_8x8x2  = x264_store_interleave_8x8x2_sse2; // FIXME sse2fast? sse2medium?
628         pf->load_deinterleave_8x8x2_fenc = x264_load_deinterleave_8x8x2_fenc_sse2;
629         pf->load_deinterleave_8x8x2_fdec = x264_load_deinterleave_8x8x2_fdec_sse2;
630         pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
631         pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
632         pf->mc_luma = mc_luma_sse2;
633         pf->get_ref = get_ref_sse2;
634         if( cpu&X264_CPU_CACHELINE_64 )
635         {
636             pf->mc_luma = mc_luma_cache64_sse2;
637             pf->get_ref = get_ref_cache64_sse2;
638         }
639         if( cpu&X264_CPU_SSE_MISALIGN )
640         {
641             pf->get_ref = get_ref_sse2_misalign;
642             if( !(cpu&X264_CPU_STACK_MOD4) )
643                 pf->mc_chroma = x264_mc_chroma_sse2_misalign;
644         }
645     }
646
647     if( !(cpu&X264_CPU_SSSE3) )
648         return;
649
650     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_ssse3;
651     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_ssse3;
652     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_ssse3;
653     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_ssse3;
654     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_ssse3;
655     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_ssse3;
656     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_ssse3;
657     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_ssse3;
658
659     pf->load_deinterleave_8x8x2_fenc = x264_load_deinterleave_8x8x2_fenc_ssse3;
660     pf->load_deinterleave_8x8x2_fdec = x264_load_deinterleave_8x8x2_fdec_ssse3;
661     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_ssse3;
662
663     pf->hpel_filter = x264_hpel_filter_ssse3;
664     pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
665     if( !(cpu&X264_CPU_STACK_MOD4) )
666         pf->mc_chroma = x264_mc_chroma_ssse3;
667
668     if( cpu&X264_CPU_CACHELINE_64 )
669     {
670         if( !(cpu&X264_CPU_STACK_MOD4) )
671             pf->mc_chroma = x264_mc_chroma_ssse3_cache64;
672         pf->mc_luma = mc_luma_cache64_ssse3;
673         pf->get_ref = get_ref_cache64_ssse3;
674
675         /* ssse3 weight is slower on Nehalem, so only assign here. */
676         pf->weight_cache = x264_weight_cache_ssse3;
677         pf->weight = x264_mc_weight_wtab_ssse3;
678     }
679
680     if( (cpu&X264_CPU_SHUFFLE_IS_FAST) && !(cpu&X264_CPU_SLOW_ATOM) )
681         pf->integral_init4v = x264_integral_init4v_ssse3;
682
683     if( !(cpu&X264_CPU_SSE4) )
684         return;
685
686     pf->integral_init4h = x264_integral_init4h_sse4;
687     pf->integral_init8h = x264_integral_init8h_sse4;
688 #endif // HIGH_BIT_DEPTH
689 }