1 /*****************************************************************************
2 * mc-c.c: h264 encoder library (Motion Compensation)
3 *****************************************************************************
4 * Copyright (C) 2003-2008 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
22 *****************************************************************************/
28 #include "common/common.h"
31 extern void x264_pixel_avg_16x16_sse2( uint8_t *, int, uint8_t *, int );
32 extern void x264_pixel_avg_16x8_sse2( uint8_t *, int, uint8_t *, int );
33 extern void x264_pixel_avg_16x16_mmxext( uint8_t *, int, uint8_t *, int );
34 extern void x264_pixel_avg_16x8_mmxext( uint8_t *, int, uint8_t *, int );
35 extern void x264_pixel_avg_8x16_mmxext( uint8_t *, int, uint8_t *, int );
36 extern void x264_pixel_avg_8x8_mmxext( uint8_t *, int, uint8_t *, int );
37 extern void x264_pixel_avg_8x4_mmxext( uint8_t *, int, uint8_t *, int );
38 extern void x264_pixel_avg_4x8_mmxext( uint8_t *, int, uint8_t *, int );
39 extern void x264_pixel_avg_4x4_mmxext( uint8_t *, int, uint8_t *, int );
40 extern void x264_pixel_avg_4x2_mmxext( uint8_t *, int, uint8_t *, int );
41 extern void x264_pixel_avg2_w4_mmxext( uint8_t *, int, uint8_t *, int, uint8_t *, int );
42 extern void x264_pixel_avg2_w8_mmxext( uint8_t *, int, uint8_t *, int, uint8_t *, int );
43 extern void x264_pixel_avg2_w12_mmxext( uint8_t *, int, uint8_t *, int, uint8_t *, int );
44 extern void x264_pixel_avg2_w16_mmxext( uint8_t *, int, uint8_t *, int, uint8_t *, int );
45 extern void x264_pixel_avg2_w20_mmxext( uint8_t *, int, uint8_t *, int, uint8_t *, int );
46 extern void x264_pixel_avg2_w16_sse2( uint8_t *, int, uint8_t *, int, uint8_t *, int );
47 extern void x264_pixel_avg2_w20_sse2( uint8_t *, int, uint8_t *, int, uint8_t *, int );
48 extern void x264_mc_copy_w4_mmx( uint8_t *, int, uint8_t *, int, int );
49 extern void x264_mc_copy_w8_mmx( uint8_t *, int, uint8_t *, int, int );
50 extern void x264_mc_copy_w16_mmx( uint8_t *, int, uint8_t *, int, int );
51 extern void x264_mc_copy_w16_sse2( uint8_t *, int, uint8_t *, int, int );
52 extern void x264_pixel_avg_weight_4x4_mmxext( uint8_t *, int, uint8_t *, int, int );
53 extern void x264_pixel_avg_weight_w8_mmxext( uint8_t *, int, uint8_t *, int, int, int );
54 extern void x264_pixel_avg_weight_w16_mmxext( uint8_t *, int, uint8_t *, int, int, int );
55 extern void x264_prefetch_fenc_mmxext( uint8_t *, int, uint8_t *, int, int );
56 extern void x264_prefetch_ref_mmxext( uint8_t *, int, int );
57 extern void x264_mc_chroma_mmxext( uint8_t *src, int i_src_stride,
58 uint8_t *dst, int i_dst_stride,
59 int dx, int dy, int i_width, int i_height );
60 extern void x264_plane_copy_mmxext( uint8_t *, int, uint8_t *, int, int w, int h);
61 extern void x264_hpel_filter_mmxext( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
62 int i_stride, int i_width, int i_height );
63 extern void *x264_memcpy_aligned_mmx( void * dst, const void * src, size_t n );
64 extern void *x264_memcpy_aligned_sse2( void * dst, const void * src, size_t n );
66 #define AVG_WEIGHT(W,H) \
67 void x264_pixel_avg_weight_ ## W ## x ## H ## _mmxext( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int i_weight_dst ) \
69 x264_pixel_avg_weight_w ## W ## _mmxext( dst, i_dst, src, i_src, i_weight_dst, H ); \
77 static void (* const x264_pixel_avg_wtab_mmxext[6])( uint8_t *, int, uint8_t *, int, uint8_t *, int ) =
80 x264_pixel_avg2_w4_mmxext,
81 x264_pixel_avg2_w8_mmxext,
82 x264_pixel_avg2_w12_mmxext,
83 x264_pixel_avg2_w16_mmxext,
84 x264_pixel_avg2_w20_mmxext,
86 static void (* const x264_mc_copy_wtab_mmx[5])( uint8_t *, int, uint8_t *, int, int ) =
94 static void (* const x264_pixel_avg_wtab_sse2[6])( uint8_t *, int, uint8_t *, int, uint8_t *, int ) =
97 x264_pixel_avg2_w4_mmxext,
98 x264_pixel_avg2_w8_mmxext,
99 x264_pixel_avg2_w12_mmxext,
100 x264_pixel_avg2_w16_sse2,
101 x264_pixel_avg2_w20_sse2,
103 static void (* const x264_mc_copy_wtab_sse2[5])( uint8_t *, int, uint8_t *, int, int ) =
109 x264_mc_copy_w16_sse2,
111 static const int hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
112 static const int hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
114 #define MC_LUMA(name,instr1,instr2)\
115 void mc_luma_##name( uint8_t *dst, int i_dst_stride,\
116 uint8_t *src[4], int i_src_stride,\
118 int i_width, int i_height )\
120 int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
121 int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
122 uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
123 if( qpel_idx & 5 ) /* qpel interpolation needed */\
125 uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
126 x264_pixel_avg_wtab_##instr1[i_width>>2](\
127 dst, i_dst_stride, src1, i_src_stride,\
132 x264_mc_copy_wtab_##instr2[i_width>>2](\
133 dst, i_dst_stride, src1, i_src_stride, i_height );\
137 MC_LUMA(mmxext,mmxext,mmx)
138 MC_LUMA(sse2,sse2,sse2)
140 #define GET_REF(name)\
141 uint8_t *get_ref_##name( uint8_t *dst, int *i_dst_stride,\
142 uint8_t *src[4], int i_src_stride,\
144 int i_width, int i_height )\
146 int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
147 int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
148 uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
149 if( qpel_idx & 5 ) /* qpel interpolation needed */\
151 uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
152 x264_pixel_avg_wtab_##name[i_width>>2](\
153 dst, *i_dst_stride, src1, i_src_stride,\
159 *i_dst_stride = i_src_stride;\
167 void x264_mc_init_mmx( int cpu, x264_mc_functions_t *pf )
169 if( !(cpu&X264_CPU_MMX) )
172 pf->copy[PIXEL_16x16] = x264_mc_copy_w16_mmx;
173 pf->copy[PIXEL_8x8] = x264_mc_copy_w8_mmx;
174 pf->copy[PIXEL_4x4] = x264_mc_copy_w4_mmx;
175 pf->memcpy_aligned = x264_memcpy_aligned_mmx;
177 if( !(cpu&X264_CPU_MMXEXT) )
180 pf->mc_luma = mc_luma_mmxext;
181 pf->get_ref = get_ref_mmxext;
182 pf->mc_chroma = x264_mc_chroma_mmxext;
184 pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_mmxext;
185 pf->avg[PIXEL_16x8] = x264_pixel_avg_16x8_mmxext;
186 pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_mmxext;
187 pf->avg[PIXEL_8x8] = x264_pixel_avg_8x8_mmxext;
188 pf->avg[PIXEL_8x4] = x264_pixel_avg_8x4_mmxext;
189 pf->avg[PIXEL_4x8] = x264_pixel_avg_4x8_mmxext;
190 pf->avg[PIXEL_4x4] = x264_pixel_avg_4x4_mmxext;
191 pf->avg[PIXEL_4x2] = x264_pixel_avg_4x2_mmxext;
193 pf->avg_weight[PIXEL_16x16] = x264_pixel_avg_weight_16x16_mmxext;
194 pf->avg_weight[PIXEL_16x8] = x264_pixel_avg_weight_16x8_mmxext;
195 pf->avg_weight[PIXEL_8x16] = x264_pixel_avg_weight_8x16_mmxext;
196 pf->avg_weight[PIXEL_8x8] = x264_pixel_avg_weight_8x8_mmxext;
197 pf->avg_weight[PIXEL_8x4] = x264_pixel_avg_weight_8x4_mmxext;
198 pf->avg_weight[PIXEL_4x4] = x264_pixel_avg_weight_4x4_mmxext;
199 // avg_weight_4x8 is rare and 4x2 is not used
201 pf->plane_copy = x264_plane_copy_mmxext;
202 pf->hpel_filter = x264_hpel_filter_mmxext;
204 pf->prefetch_fenc = x264_prefetch_fenc_mmxext;
205 pf->prefetch_ref = x264_prefetch_ref_mmxext;
207 if( !(cpu&X264_CPU_SSE2) )
210 pf->memcpy_aligned = x264_memcpy_aligned_sse2;
212 // disable on AMD processors since it is slower
213 if( cpu&X264_CPU_3DNOW )
216 pf->mc_luma = mc_luma_sse2;
217 pf->get_ref = get_ref_sse2;
218 pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
219 pf->avg[PIXEL_16x8] = x264_pixel_avg_16x8_sse2;