+ x264_pixel_cmp_t mbcmp[8]; /* either satd or sad for subpel refine and mode decision */
+ x264_pixel_cmp_t mbcmp_unaligned[8]; /* unaligned mbcmp for subpel */
+ x264_pixel_cmp_t fpelcmp[8]; /* either satd or sad for fullpel motion search */
+ x264_pixel_cmp_x3_t fpelcmp_x3[7];
+ x264_pixel_cmp_x4_t fpelcmp_x4[7];
+ x264_pixel_cmp_t sad_aligned[8]; /* Aligned SAD for mbcmp */
+ int (*vsad)( pixel *, intptr_t, int );
+ int (*asd8)( pixel *pix1, intptr_t stride1, pixel *pix2, intptr_t stride2, int height );
+ uint64_t (*sa8d_satd[1])( pixel *pix1, intptr_t stride1, pixel *pix2, intptr_t stride2 );
+
+ uint64_t (*var[4])( pixel *pix, intptr_t stride );
+ int (*var2[4])( pixel *pix1, intptr_t stride1,
+ pixel *pix2, intptr_t stride2, int *ssd );
+ uint64_t (*hadamard_ac[4])( pixel *pix, intptr_t stride );
+
+ void (*ssd_nv12_core)( pixel *pixuv1, intptr_t stride1,
+ pixel *pixuv2, intptr_t stride2, int width, int height,
+ uint64_t *ssd_u, uint64_t *ssd_v );
+ void (*ssim_4x4x2_core)( const pixel *pix1, intptr_t stride1,
+ const pixel *pix2, intptr_t stride2, int sums[2][4] );
+ float (*ssim_end4)( int sum0[5][4], int sum1[5][4], int width );
+
+ /* multiple parallel calls to cmp. */
+ x264_pixel_cmp_x3_t sad_x3[7];
+ x264_pixel_cmp_x4_t sad_x4[7];
+ x264_pixel_cmp_x3_t satd_x3[7];
+ x264_pixel_cmp_x4_t satd_x4[7];
+
+ /* abs-diff-sum for successive elimination.
+ * may round width up to a multiple of 16. */
+ int (*ads[7])( int enc_dc[4], uint16_t *sums, int delta,
+ uint16_t *cost_mvx, int16_t *mvs, int width, int thresh );
+
+ /* calculate satd or sad of V, H, and DC modes. */
+ void (*intra_mbcmp_x3_16x16)( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_satd_x3_16x16) ( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_sad_x3_16x16) ( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_mbcmp_x3_4x4) ( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_satd_x3_4x4) ( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_sad_x3_4x4) ( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_mbcmp_x3_chroma)( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_satd_x3_chroma) ( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_sad_x3_chroma) ( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_mbcmp_x3_8x16c) ( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_satd_x3_8x16c) ( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_sad_x3_8x16c) ( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_mbcmp_x3_8x8c) ( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_satd_x3_8x8c) ( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_sad_x3_8x8c) ( pixel *fenc, pixel *fdec, int res[3] );
+ void (*intra_mbcmp_x3_8x8) ( pixel *fenc, pixel edge[36], int res[3] );
+ void (*intra_sa8d_x3_8x8) ( pixel *fenc, pixel edge[36], int res[3] );
+ void (*intra_sad_x3_8x8) ( pixel *fenc, pixel edge[36], int res[3] );
+ /* find minimum satd or sad of all modes, and set fdec.
+ * may be NULL, in which case just use pred+satd instead. */
+ int (*intra_mbcmp_x9_4x4)( pixel *fenc, pixel *fdec, uint16_t *bitcosts );
+ int (*intra_satd_x9_4x4) ( pixel *fenc, pixel *fdec, uint16_t *bitcosts );
+ int (*intra_sad_x9_4x4) ( pixel *fenc, pixel *fdec, uint16_t *bitcosts );
+ int (*intra_mbcmp_x9_8x8)( pixel *fenc, pixel *fdec, pixel edge[36], uint16_t *bitcosts, uint16_t *satds );
+ int (*intra_sa8d_x9_8x8) ( pixel *fenc, pixel *fdec, pixel edge[36], uint16_t *bitcosts, uint16_t *satds );
+ int (*intra_sad_x9_8x8) ( pixel *fenc, pixel *fdec, pixel edge[36], uint16_t *bitcosts, uint16_t *satds );