1 /*****************************************************************************
2 * mc.h: motion compensation
3 *****************************************************************************
4 * Copyright (C) 2004-2016 x264 project
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
22 * This program is also available under a commercial proprietary license.
23 * For more information, contact us at licensing@x264.com.
24 *****************************************************************************/
29 #define MC_CLIP_ADD(s,x) (s) = X264_MIN((s)+(x),(1<<15)-1)
30 #define MC_CLIP_ADD2(s,x)\
33 MC_CLIP_ADD((s)[0], (x)[0]);\
34 MC_CLIP_ADD((s)[1], (x)[1]);\
37 #define PROPAGATE_LIST(cpu)\
38 void x264_mbtree_propagate_list_internal_##cpu( int16_t (*mvs)[2], int16_t *propagate_amount,\
39 uint16_t *lowres_costs, int16_t *output,\
40 int bipred_weight, int mb_y, int len );\
42 static void x264_mbtree_propagate_list_##cpu( x264_t *h, uint16_t *ref_costs, int16_t (*mvs)[2],\
43 int16_t *propagate_amount, uint16_t *lowres_costs,\
44 int bipred_weight, int mb_y, int len, int list )\
46 int16_t *current = h->scratch_buffer2;\
48 x264_mbtree_propagate_list_internal_##cpu( mvs, propagate_amount, lowres_costs,\
49 current, bipred_weight, mb_y, len );\
51 unsigned stride = h->mb.i_mb_stride;\
52 unsigned width = h->mb.i_mb_width;\
53 unsigned height = h->mb.i_mb_height;\
55 for( unsigned i = 0; i < len; current += 32 )\
57 int end = X264_MIN( i+8, len );\
58 for( ; i < end; i++, current += 2 )\
60 if( !(lowres_costs[i] & (1 << (list+LOWRES_COST_SHIFT))) )\
63 unsigned mbx = current[0];\
64 unsigned mby = current[1];\
65 unsigned idx0 = mbx + mby * stride;\
66 unsigned idx2 = idx0 + stride;\
68 /* Shortcut for the simple/common case of zero MV */\
71 MC_CLIP_ADD( ref_costs[idx0], current[16] );\
75 if( mbx < width-1 && mby < height-1 )\
77 MC_CLIP_ADD2( ref_costs+idx0, current+16 );\
78 MC_CLIP_ADD2( ref_costs+idx2, current+32 );\
82 /* Note: this takes advantage of unsigned representation to\
83 * catch negative mbx/mby. */\
87 MC_CLIP_ADD( ref_costs[idx0+0], current[16] );\
89 MC_CLIP_ADD( ref_costs[idx0+1], current[17] );\
94 MC_CLIP_ADD( ref_costs[idx2+0], current[32] );\
96 MC_CLIP_ADD( ref_costs[idx2+1], current[33] );\
103 struct x264_weight_t;
104 typedef void (* weight_fn_t)( pixel *, intptr_t, pixel *,intptr_t, const struct x264_weight_t *, int );
105 typedef struct x264_weight_t
107 /* aligning the first member is a gcc hack to force the struct to be
108 * 16 byte aligned, as well as force sizeof(struct) to be a multiple of 16 */
109 ALIGNED_16( int16_t cachea[8] );
114 weight_fn_t *weightfn;
115 } ALIGNED_16( x264_weight_t );
117 extern const x264_weight_t x264_weight_none[3];
118 extern const uint8_t x264_hpel_ref0[16];
119 extern const uint8_t x264_hpel_ref1[16];
121 #define SET_WEIGHT( w, b, s, d, o )\
127 h->mc.weight_cache( h, &w );\
133 * XXX: Only width = 4, 8 or 16 are valid
134 * width == 4 -> height == 4 or 8
135 * width == 8 -> height == 4 or 8 or 16
136 * width == 16-> height == 8 or 16
141 void (*mc_luma)( pixel *dst, intptr_t i_dst, pixel **src, intptr_t i_src,
142 int mvx, int mvy, int i_width, int i_height, const x264_weight_t *weight );
144 /* may round up the dimensions if they're not a power of 2 */
145 pixel* (*get_ref)( pixel *dst, intptr_t *i_dst, pixel **src, intptr_t i_src,
146 int mvx, int mvy, int i_width, int i_height, const x264_weight_t *weight );
148 /* mc_chroma may write up to 2 bytes of garbage to the right of dst,
149 * so it must be run from left to right. */
150 void (*mc_chroma)( pixel *dstu, pixel *dstv, intptr_t i_dst, pixel *src, intptr_t i_src,
151 int mvx, int mvy, int i_width, int i_height );
153 void (*avg[12])( pixel *dst, intptr_t dst_stride, pixel *src1, intptr_t src1_stride,
154 pixel *src2, intptr_t src2_stride, int i_weight );
156 /* only 16x16, 8x8, and 4x4 defined */
157 void (*copy[7])( pixel *dst, intptr_t dst_stride, pixel *src, intptr_t src_stride, int i_height );
158 void (*copy_16x16_unaligned)( pixel *dst, intptr_t dst_stride, pixel *src, intptr_t src_stride, int i_height );
160 void (*store_interleave_chroma)( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
161 void (*load_deinterleave_chroma_fenc)( pixel *dst, pixel *src, intptr_t i_src, int height );
162 void (*load_deinterleave_chroma_fdec)( pixel *dst, pixel *src, intptr_t i_src, int height );
164 void (*plane_copy)( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h );
165 void (*plane_copy_swap)( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h );
166 void (*plane_copy_interleave)( pixel *dst, intptr_t i_dst, pixel *srcu, intptr_t i_srcu,
167 pixel *srcv, intptr_t i_srcv, int w, int h );
168 /* may write up to 15 pixels off the end of each plane */
169 void (*plane_copy_deinterleave)( pixel *dstu, intptr_t i_dstu, pixel *dstv, intptr_t i_dstv,
170 pixel *src, intptr_t i_src, int w, int h );
171 void (*plane_copy_deinterleave_rgb)( pixel *dsta, intptr_t i_dsta, pixel *dstb, intptr_t i_dstb,
172 pixel *dstc, intptr_t i_dstc, pixel *src, intptr_t i_src, int pw, int w, int h );
173 void (*plane_copy_deinterleave_v210)( pixel *dsty, intptr_t i_dsty,
174 pixel *dstc, intptr_t i_dstc,
175 uint32_t *src, intptr_t i_src, int w, int h );
176 void (*hpel_filter)( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,
177 intptr_t i_stride, int i_width, int i_height, int16_t *buf );
179 /* prefetch the next few macroblocks of fenc or fdec */
180 void (*prefetch_fenc) ( pixel *pix_y, intptr_t stride_y, pixel *pix_uv, intptr_t stride_uv, int mb_x );
181 void (*prefetch_fenc_420)( pixel *pix_y, intptr_t stride_y, pixel *pix_uv, intptr_t stride_uv, int mb_x );
182 void (*prefetch_fenc_422)( pixel *pix_y, intptr_t stride_y, pixel *pix_uv, intptr_t stride_uv, int mb_x );
183 /* prefetch the next few macroblocks of a hpel reference frame */
184 void (*prefetch_ref)( pixel *pix, intptr_t stride, int parity );
186 void *(*memcpy_aligned)( void *dst, const void *src, size_t n );
187 void (*memzero_aligned)( void *dst, size_t n );
189 /* successive elimination prefilter */
190 void (*integral_init4h)( uint16_t *sum, pixel *pix, intptr_t stride );
191 void (*integral_init8h)( uint16_t *sum, pixel *pix, intptr_t stride );
192 void (*integral_init4v)( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
193 void (*integral_init8v)( uint16_t *sum8, intptr_t stride );
195 void (*frame_init_lowres_core)( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,
196 intptr_t src_stride, intptr_t dst_stride, int width, int height );
198 weight_fn_t *offsetadd;
199 weight_fn_t *offsetsub;
200 void (*weight_cache)( x264_t *, x264_weight_t * );
202 void (*mbtree_propagate_cost)( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
203 uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
204 void (*mbtree_propagate_list)( x264_t *h, uint16_t *ref_costs, int16_t (*mvs)[2],
205 int16_t *propagate_amount, uint16_t *lowres_costs,
206 int bipred_weight, int mb_y, int len, int list );
207 void (*mbtree_fix8_pack)( uint16_t *dst, float *src, int count );
208 void (*mbtree_fix8_unpack)( float *dst, uint16_t *src, int count );
209 } x264_mc_functions_t;
211 void x264_mc_init( int cpu, x264_mc_functions_t *pf, int cpu_independent );