return score;
}
+int x264_field_vsad( x264_t *h, pixel *fenc, int stride )
+{
+ int score_field, score_frame;
+ score_frame = h->pixf.vsad( fenc, stride );
+ score_frame += h->pixf.vsad( fenc+16*stride, stride );
+ score_field = h->pixf.vsad( fenc, stride*2 );
+ score_field += h->pixf.vsad( fenc+stride, stride*2 );
+ return (score_field < score_frame);
+}
+
/****************************************************************************
* successive elimination
****************************************************************************/
void x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height, uint64_t *ssd_u, uint64_t *ssd_v );
uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height );
float x264_pixel_ssim_wxh( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height, void *buf );
+int x264_field_vsad( x264_t *h, pixel *fenc, int stride );
#endif
if( h->mb.b_adaptive_mbaff )
{
if( !(i_mb_y&1) )
- h->mb.b_interlaced = 1;
+ {
+ /* FIXME: VSAD is fast but fairly poor at choosing the best interlace type. */
+ int stride = h->fenc->i_stride[0];
+ pixel *fenc = h->fenc->plane[0] + 16 * (i_mb_x + i_mb_y * stride);
+ h->mb.b_interlaced = x264_field_vsad( h, fenc, stride );
+ }
x264_zigzag_init( h->param.cpu, &h->zigzagf, h->mb.b_interlaced );
}
h->mb.field[mb_xy] = h->mb.b_interlaced;