/* dct code */
typedef short DCTELEM;
-void fdct_ifast (DCTELEM *data);
-void fdct_ifast248 (DCTELEM *data);
+void ff_fdct_ifast (DCTELEM *data);
+void ff_fdct_ifast248 (DCTELEM *data);
void ff_jpeg_fdct_islow_8(DCTELEM *data);
void ff_jpeg_fdct_islow_10(DCTELEM *data);
void ff_fdct248_islow_8(DCTELEM *data);
void ff_fdct248_islow_10(DCTELEM *data);
-void j_rev_dct (DCTELEM *data);
-void j_rev_dct4 (DCTELEM *data);
-void j_rev_dct2 (DCTELEM *data);
-void j_rev_dct1 (DCTELEM *data);
+void ff_j_rev_dct (DCTELEM *data);
void ff_wmv2_idct_c(DCTELEM *data);
void ff_fdct_mmx(DCTELEM *block);
-void ff_fdct_mmx2(DCTELEM *block);
+void ff_fdct_mmxext(DCTELEM *block);
void ff_fdct_sse2(DCTELEM *block);
#define H264_IDCT(depth) \
void ff_h264_idct_add16_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
void ff_h264_idct_add16intra_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
void ff_h264_idct8_add4_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
+void ff_h264_idct_add8_422_ ## depth ## _c(uint8_t **dest, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
void ff_h264_idct_add8_ ## depth ## _c(uint8_t **dest, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
void ff_h264_luma_dc_dequant_idct_ ## depth ## _c(DCTELEM *output, DCTELEM *input, int qmul);\
+void ff_h264_chroma422_dc_dequant_idct_ ## depth ## _c(DCTELEM *block, int qmul);\
void ff_h264_chroma_dc_dequant_idct_ ## depth ## _c(DCTELEM *block, int qmul);
H264_IDCT( 8)
#define ff_put_pixels16x16_c ff_put_pixels16x16_8_c
#define ff_avg_pixels16x16_c ff_avg_pixels16x16_8_c
-/* VP3 DSP functions */
-void ff_vp3_idct_c(DCTELEM *block/* align 16*/);
-void ff_vp3_idct_put_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
-void ff_vp3_idct_add_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
-void ff_vp3_idct_dc_add_c(uint8_t *dest/*align 8*/, int line_size, const DCTELEM *block/*align 16*/);
-
-void ff_vp3_v_loop_filter_c(uint8_t *src, int stride, int *bounding_values);
-void ff_vp3_h_loop_filter_c(uint8_t *src, int stride, int *bounding_values);
-
-/* EA functions */
-void ff_ea_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block);
+/* RV40 functions */
+void ff_put_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride);
+void ff_avg_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride);
+void ff_put_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride);
+void ff_avg_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride);
/* 1/2^n downscaling functions from imgconvert.c */
void ff_shrink22(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
const uint8_t *scantable;
uint8_t permutated[64];
uint8_t raster_end[64];
-#if ARCH_PPC
- /** Used by dct_quantize_altivec to find last-non-zero */
- DECLARE_ALIGNED(16, uint8_t, inverse)[64];
-#endif
} ScanTable;
void ff_init_scantable(uint8_t *, ScanTable *st, const uint8_t *src_scantable);
+void ff_init_scantable_permutation(uint8_t *idct_permutation,
+ int idct_permutation_type);
#define EMULATED_EDGE(depth) \
void ff_emulated_edge_mc_ ## depth (uint8_t *buf, const uint8_t *src, int linesize,\
EMULATED_EDGE(9)
EMULATED_EDGE(10)
-void ff_add_pixels_clamped_c(const DCTELEM *block, uint8_t *dest, int linesize);
-void ff_put_pixels_clamped_c(const DCTELEM *block, uint8_t *dest, int linesize);
-void ff_put_signed_pixels_clamped_c(const DCTELEM *block, uint8_t *dest, int linesize);
-
/**
* DSPContext.
*/
/* huffyuv specific */
void (*add_bytes)(uint8_t *dst/*align 16*/, uint8_t *src/*align 16*/, int w);
- void (*add_bytes_l2)(uint8_t *dst/*align 16*/, uint8_t *src1/*align 16*/, uint8_t *src2/*align 16*/, int w);
void (*diff_bytes)(uint8_t *dst/*align 16*/, uint8_t *src1/*align 16*/, uint8_t *src2/*align 1*/,int w);
/**
* subtract huffyuv's variant of median prediction
void (*add_hfyu_median_prediction)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
int (*add_hfyu_left_prediction)(uint8_t *dst, const uint8_t *src, int w, int left);
void (*add_hfyu_left_prediction_bgr32)(uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha);
- /* this might write to dst[w] */
- void (*add_png_paeth_prediction)(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp);
void (*bswap_buf)(uint32_t *dst, const uint32_t *src, int w);
void (*bswap16_buf)(uint16_t *dst, const uint16_t *src, int len);
void (*h261_loop_filter)(uint8_t *src, int stride);
- void (*x8_v_loop_filter)(uint8_t *src, int stride, int qscale);
- void (*x8_h_loop_filter)(uint8_t *src, int stride, int qscale);
-
- void (*vp3_idct_dc_add)(uint8_t *dest/*align 8*/, int line_size, const DCTELEM *block/*align 16*/);
- void (*vp3_v_loop_filter)(uint8_t *src, int stride, int *bounding_values);
- void (*vp3_h_loop_filter)(uint8_t *src, int stride, int *bounding_values);
-
/* assume len is a multiple of 4, and arrays are 16-byte aligned */
void (*vorbis_inverse_coupling)(float *mag, float *ang, int blocksize);
- void (*ac3_downmix)(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len);
- /* assume len is a multiple of 8, and arrays are 16-byte aligned */
- void (*vector_fmul)(float *dst, const float *src0, const float *src1, int len);
+ /* assume len is a multiple of 16, and arrays are 32-byte aligned */
void (*vector_fmul_reverse)(float *dst, const float *src0, const float *src1, int len);
/* assume len is a multiple of 8, and src arrays are 16-byte aligned */
void (*vector_fmul_add)(float *dst, const float *src0, const float *src1, const float *src2, int len);
*/
void (*butterflies_float)(float *restrict v1, float *restrict v2, int len);
+ /**
+ * Calculate the sum and difference of two vectors of floats and interleave
+ * results into a separate output vector of floats, with each sum
+ * positioned before the corresponding difference.
+ *
+ * @param dst output vector
+ * constraints: 16-byte aligned
+ * @param src0 first input vector
+ * constraints: 32-byte aligned
+ * @param src1 second input vector
+ * constraints: 32-byte aligned
+ * @param len number of elements in the input
+ * constraints: multiple of 8
+ */
+ void (*butterflies_float_interleave)(float *dst, const float *src0,
+ const float *src1, int len);
+
/* (I)DCT */
void (*fdct)(DCTELEM *block/* align 16*/);
void (*fdct248)(DCTELEM *block/* align 16*/);
* with the zigzag/alternate scan<br>
* an example to avoid confusion:
* - (->decode coeffs -> zigzag reorder -> dequant -> reference idct ->...)
- * - (x -> referece dct -> reference idct -> x)
- * - (x -> referece dct -> simple_mmx_perm = idct_permutation -> simple_idct_mmx -> x)
+ * - (x -> reference dct -> reference idct -> x)
+ * - (x -> reference dct -> simple_mmx_perm = idct_permutation -> simple_idct_mmx -> x)
* - (->decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant -> simple_idct_mmx ->...)
*/
uint8_t idct_permutation[64];
void (*shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
- /* mlp/truehd functions */
- void (*mlp_filter_channel)(int32_t *state, const int32_t *coeff,
- int firorder, int iirorder,
- unsigned int filter_shift, int32_t mask, int blocksize,
- int32_t *sample_buffer);
-
- /* intrax8 functions */
- void (*x8_spatial_compensation[12])(uint8_t *src , uint8_t *dst, int linesize);
- void (*x8_setup_spatial_compensation)(uint8_t *src, uint8_t *dst, int linesize,
- int * range, int * sum, int edges);
-
/**
* Calculate scalar product of two vectors.
* @param len length of vectors, should be multiple of 16
- * @param shift number of bits to discard from product
*/
- int32_t (*scalarproduct_int16)(const int16_t *v1, const int16_t *v2/*align 16*/, int len, int shift);
+ int32_t (*scalarproduct_int16)(const int16_t *v1, const int16_t *v2/*align 16*/, int len);
/* ape functions */
/**
* Calculate scalar product of v1 and v2,
* @param src source array
* constraints: 16-byte aligned
* @param min minimum value
- * constraints: must in the the range [-(1<<24), 1<<24]
+ * constraints: must be in the range [-(1 << 24), 1 << 24]
* @param max maximum value
- * constraints: must in the the range [-(1<<24), 1<<24]
+ * constraints: must be in the range [-(1 << 24), 1 << 24]
* @param len number of elements in the array
* constraints: multiple of 32 greater than zero
*/
void (*vector_clip_int32)(int32_t *dst, const int32_t *src, int32_t min,
int32_t max, unsigned int len);
- /* rv30 functions */
- qpel_mc_func put_rv30_tpel_pixels_tab[4][16];
- qpel_mc_func avg_rv30_tpel_pixels_tab[4][16];
-
- /* rv40 functions */
- qpel_mc_func put_rv40_qpel_pixels_tab[4][16];
- qpel_mc_func avg_rv40_qpel_pixels_tab[4][16];
- h264_chroma_mc_func put_rv40_chroma_pixels_tab[3];
- h264_chroma_mc_func avg_rv40_chroma_pixels_tab[3];
-
op_fill_func fill_block_tab[2];
} DSPContext;
-void dsputil_static_init(void);
-void dsputil_init(DSPContext* p, AVCodecContext *avctx);
+void ff_dsputil_static_init(void);
+void ff_dsputil_init(DSPContext* p, AVCodecContext *avctx);
int ff_check_alignment(void);
+/**
+ * Return the scalar product of two vectors.
+ *
+ * @param v1 first input vector
+ * @param v2 first input vector
+ * @param len number of elements
+ *
+ * @return sum of elementwise products
+ */
+float ff_scalarproduct_float_c(const float *v1, const float *v2, int len);
+
/**
* permute block according to permuatation.
* @param last last non zero element in scantable order
}
}
-void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx);
-void dsputil_init_arm(DSPContext* c, AVCodecContext *avctx);
-void dsputil_init_bfin(DSPContext* c, AVCodecContext *avctx);
-void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx);
-void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx);
-void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx);
-void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx);
-void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx);
-void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx);
+void ff_dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx);
+void ff_dsputil_init_arm(DSPContext* c, AVCodecContext *avctx);
+void ff_dsputil_init_bfin(DSPContext* c, AVCodecContext *avctx);
+void ff_dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx);
+void ff_dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx);
+void ff_dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx);
+void ff_dsputil_init_vis(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_dwt(DSPContext *c);
-void ff_rv30dsp_init(DSPContext* c, AVCodecContext *avctx);
-void ff_rv40dsp_init(DSPContext* c, AVCodecContext *avctx);
-void ff_intrax8dsp_init(DSPContext* c, AVCodecContext *avctx);
-void ff_mlp_init(DSPContext* c, AVCodecContext *avctx);
-void ff_mlp_init_x86(DSPContext* c, AVCodecContext *avctx);
-#if ARCH_ARM
-
-#if HAVE_NEON
+#if (ARCH_ARM && HAVE_NEON) || ARCH_PPC || HAVE_MMX
# define STRIDE_ALIGN 16
-#endif
-
-#elif ARCH_PPC
-
-#define STRIDE_ALIGN 16
-
-#elif HAVE_MMI
-
-#define STRIDE_ALIGN 16
-
-#endif
-
-#ifndef STRIDE_ALIGN
+#else
# define STRIDE_ALIGN 8
#endif
+// Some broken preprocessors need a second expansion
+// to be forced to tokenize __VA_ARGS__
+#define E(x) x
+
#define LOCAL_ALIGNED_A(a, t, v, s, o, ...) \
uint8_t la_##v[sizeof(t s o) + (a)]; \
t (*v) o = (void *)FFALIGN((uintptr_t)la_##v, a)
#define LOCAL_ALIGNED_D(a, t, v, s, o, ...) DECLARE_ALIGNED(a, t, v) s o
-#define LOCAL_ALIGNED(a, t, v, ...) LOCAL_ALIGNED_A(a, t, v, __VA_ARGS__,,)
+#define LOCAL_ALIGNED(a, t, v, ...) E(LOCAL_ALIGNED_A(a, t, v, __VA_ARGS__,,))
#if HAVE_LOCAL_ALIGNED_8
-# define LOCAL_ALIGNED_8(t, v, ...) LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,)
+# define LOCAL_ALIGNED_8(t, v, ...) E(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
#else
# define LOCAL_ALIGNED_8(t, v, ...) LOCAL_ALIGNED(8, t, v, __VA_ARGS__)
#endif
#if HAVE_LOCAL_ALIGNED_16
-# define LOCAL_ALIGNED_16(t, v, ...) LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,)
+# define LOCAL_ALIGNED_16(t, v, ...) E(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
#else
# define LOCAL_ALIGNED_16(t, v, ...) LOCAL_ALIGNED(16, t, v, __VA_ARGS__)
#endif
int i;
for(i=0; i<h; i++)
{
- AV_WN16(dst , AV_RN16(src ));
+ AV_COPY16U(dst, src);
dst+=dstStride;
src+=srcStride;
}
int i;
for(i=0; i<h; i++)
{
- AV_WN32(dst , AV_RN32(src ));
+ AV_COPY32U(dst, src);
dst+=dstStride;
src+=srcStride;
}
int i;
for(i=0; i<h; i++)
{
- AV_WN32(dst , AV_RN32(src ));
- AV_WN32(dst+4 , AV_RN32(src+4 ));
+ AV_COPY64U(dst, src);
dst+=dstStride;
src+=srcStride;
}
int i;
for(i=0; i<h; i++)
{
- AV_WN32(dst , AV_RN32(src ));
- AV_WN32(dst+4 , AV_RN32(src+4 ));
+ AV_COPY64U(dst, src);
dst[8]= src[8];
dst+=dstStride;
src+=srcStride;
int i;
for(i=0; i<h; i++)
{
- AV_WN32(dst , AV_RN32(src ));
- AV_WN32(dst+4 , AV_RN32(src+4 ));
- AV_WN32(dst+8 , AV_RN32(src+8 ));
- AV_WN32(dst+12, AV_RN32(src+12));
+ AV_COPY128U(dst, src);
dst+=dstStride;
src+=srcStride;
}
int i;
for(i=0; i<h; i++)
{
- AV_WN32(dst , AV_RN32(src ));
- AV_WN32(dst+4 , AV_RN32(src+4 ));
- AV_WN32(dst+8 , AV_RN32(src+8 ));
- AV_WN32(dst+12, AV_RN32(src+12));
+ AV_COPY128U(dst, src);
dst[16]= src[16];
dst+=dstStride;
src+=srcStride;