/**
* @file
- * H.264 / AVC / MPEG4 prediction functions.
+ * H.264 / AVC / MPEG-4 prediction functions.
* @author Michael Niedermayer <michaelni@gmx.at>
*/
#define VERT_VP8_PRED 10 ///< for VP8, #VERT_PRED is the average of
///< (left col+cur col x2+right col) / 4;
///< this is the "unaveraged" one
-#define HOR_VP8_PRED 11 ///< unaveraged version of #HOR_PRED, see
+#define HOR_VP8_PRED 14 ///< unaveraged version of #HOR_PRED, see
///< #VERT_VP8_PRED for details
#define DC_127_PRED 12
#define DC_129_PRED 13
#define TOP_DC_PRED8x8 5
#define DC_128_PRED8x8 6
-// H264/SVQ3 (8x8) specific
+// H.264/SVQ3 (8x8) specific
#define ALZHEIMER_DC_L0T_PRED8x8 7
#define ALZHEIMER_DC_0LT_PRED8x8 8
#define ALZHEIMER_DC_L00_PRED8x8 9
void(*pred16x16[4 + 3 + 2])(uint8_t *src, ptrdiff_t stride);
void(*pred4x4_add[2])(uint8_t *pix /*align 4*/,
- const int16_t *block /*align 16*/, ptrdiff_t stride);
+ int16_t *block /*align 16*/, ptrdiff_t stride);
void(*pred8x8l_add[2])(uint8_t *pix /*align 8*/,
- const int16_t *block /*align 16*/, ptrdiff_t stride);
+ int16_t *block /*align 16*/, ptrdiff_t stride);
void(*pred8x8_add[3])(uint8_t *pix /*align 8*/,
const int *block_offset,
- const int16_t *block /*align 16*/, ptrdiff_t stride);
+ int16_t *block /*align 16*/, ptrdiff_t stride);
void(*pred16x16_add[3])(uint8_t *pix /*align 16*/,
const int *block_offset,
- const int16_t *block /*align 16*/, ptrdiff_t stride);
+ int16_t *block /*align 16*/, ptrdiff_t stride);
} H264PredContext;
void ff_h264_pred_init(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
+void ff_h264_pred_init_aarch64(H264PredContext *h, int codec_id,
+ const int bit_depth,
+ const int chroma_format_idc);
void ff_h264_pred_init_arm(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init_x86(H264PredContext *h, int codec_id,