* Copyright (c) 2000, 2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
+ * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
+ *
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
*/
/**
#include "mpegvideo.h"
#include "simple_idct.h"
#include "faandct.h"
+#include "h263.h"
#include "snow.h"
/* snow.c */
/* vorbis.c */
void vorbis_inverse_coupling(float *mag, float *ang, int blocksize);
+/* flacenc.c */
+void ff_flac_compute_autocorr(const int32_t *data, int len, int lag, double *autoc);
+
uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP] = {0, };
uint32_t ff_squareTbl[512] = {0, };
}
}
+static int sum_abs_dctelem_c(DCTELEM *block)
+{
+ int sum=0, i;
+ for(i=0; i<64; i++)
+ sum+= FFABS(block[i]);
+ return sum;
+}
+
#if 0
#define PIXOP2(OPNAME, OP) \
{\
int i;\
for(i=0; i<h; i++){\
- OP(*((uint64_t*)block), LD64(pixels));\
+ OP(*((uint64_t*)block), AV_RN64(pixels));\
pixels+=line_size;\
block +=line_size;\
}\
{\
int i;\
for(i=0; i<h; i++){\
- const uint64_t a= LD64(pixels );\
- const uint64_t b= LD64(pixels+1);\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+1);\
OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
pixels+=line_size;\
block +=line_size;\
{\
int i;\
for(i=0; i<h; i++){\
- const uint64_t a= LD64(pixels );\
- const uint64_t b= LD64(pixels+1);\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+1);\
OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
pixels+=line_size;\
block +=line_size;\
{\
int i;\
for(i=0; i<h; i++){\
- const uint64_t a= LD64(pixels );\
- const uint64_t b= LD64(pixels+line_size);\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+line_size);\
OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
pixels+=line_size;\
block +=line_size;\
{\
int i;\
for(i=0; i<h; i++){\
- const uint64_t a= LD64(pixels );\
- const uint64_t b= LD64(pixels+line_size);\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+line_size);\
OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
pixels+=line_size;\
block +=line_size;\
static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
- const uint64_t a= LD64(pixels );\
- const uint64_t b= LD64(pixels+1);\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+1);\
uint64_t l0= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL)\
+ 0x0202020202020202ULL;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
- uint64_t a= LD64(pixels );\
- uint64_t b= LD64(pixels+1);\
+ uint64_t a= AV_RN64(pixels );\
+ uint64_t b= AV_RN64(pixels+1);\
l1= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL);\
h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
pixels+=line_size;\
block +=line_size;\
- a= LD64(pixels );\
- b= LD64(pixels+1);\
+ a= AV_RN64(pixels );\
+ b= AV_RN64(pixels+1);\
l0= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL)\
+ 0x0202020202020202ULL;\
static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
- const uint64_t a= LD64(pixels );\
- const uint64_t b= LD64(pixels+1);\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+1);\
uint64_t l0= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL)\
+ 0x0101010101010101ULL;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
- uint64_t a= LD64(pixels );\
- uint64_t b= LD64(pixels+1);\
+ uint64_t a= AV_RN64(pixels );\
+ uint64_t b= AV_RN64(pixels+1);\
l1= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL);\
h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
pixels+=line_size;\
block +=line_size;\
- a= LD64(pixels );\
- b= LD64(pixels+1);\
+ a= AV_RN64(pixels );\
+ b= AV_RN64(pixels+1);\
l0= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL)\
+ 0x0101010101010101ULL;\
static void OPNAME ## _pixels2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
int i;\
for(i=0; i<h; i++){\
- OP(*((uint16_t*)(block )), LD16(pixels ));\
+ OP(*((uint16_t*)(block )), AV_RN16(pixels ));\
pixels+=line_size;\
block +=line_size;\
}\
static void OPNAME ## _pixels4_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
int i;\
for(i=0; i<h; i++){\
- OP(*((uint32_t*)(block )), LD32(pixels ));\
+ OP(*((uint32_t*)(block )), AV_RN32(pixels ));\
pixels+=line_size;\
block +=line_size;\
}\
static void OPNAME ## _pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
int i;\
for(i=0; i<h; i++){\
- OP(*((uint32_t*)(block )), LD32(pixels ));\
- OP(*((uint32_t*)(block+4)), LD32(pixels+4));\
+ OP(*((uint32_t*)(block )), AV_RN32(pixels ));\
+ OP(*((uint32_t*)(block+4)), AV_RN32(pixels+4));\
pixels+=line_size;\
block +=line_size;\
}\
int i;\
for(i=0; i<h; i++){\
uint32_t a,b;\
- a= LD32(&src1[i*src_stride1 ]);\
- b= LD32(&src2[i*src_stride2 ]);\
+ a= AV_RN32(&src1[i*src_stride1 ]);\
+ b= AV_RN32(&src2[i*src_stride2 ]);\
OP(*((uint32_t*)&dst[i*dst_stride ]), no_rnd_avg32(a, b));\
- a= LD32(&src1[i*src_stride1+4]);\
- b= LD32(&src2[i*src_stride2+4]);\
+ a= AV_RN32(&src1[i*src_stride1+4]);\
+ b= AV_RN32(&src2[i*src_stride2+4]);\
OP(*((uint32_t*)&dst[i*dst_stride+4]), no_rnd_avg32(a, b));\
}\
}\
int i;\
for(i=0; i<h; i++){\
uint32_t a,b;\
- a= LD32(&src1[i*src_stride1 ]);\
- b= LD32(&src2[i*src_stride2 ]);\
+ a= AV_RN32(&src1[i*src_stride1 ]);\
+ b= AV_RN32(&src2[i*src_stride2 ]);\
OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
- a= LD32(&src1[i*src_stride1+4]);\
- b= LD32(&src2[i*src_stride2+4]);\
+ a= AV_RN32(&src1[i*src_stride1+4]);\
+ b= AV_RN32(&src2[i*src_stride2+4]);\
OP(*((uint32_t*)&dst[i*dst_stride+4]), rnd_avg32(a, b));\
}\
}\
int i;\
for(i=0; i<h; i++){\
uint32_t a,b;\
- a= LD32(&src1[i*src_stride1 ]);\
- b= LD32(&src2[i*src_stride2 ]);\
+ a= AV_RN32(&src1[i*src_stride1 ]);\
+ b= AV_RN32(&src2[i*src_stride2 ]);\
OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
}\
}\
int i;\
for(i=0; i<h; i++){\
uint32_t a,b;\
- a= LD16(&src1[i*src_stride1 ]);\
- b= LD16(&src2[i*src_stride2 ]);\
+ a= AV_RN16(&src1[i*src_stride1 ]);\
+ b= AV_RN16(&src2[i*src_stride2 ]);\
OP(*((uint16_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
}\
}\
int i;\
for(i=0; i<h; i++){\
uint32_t a, b, c, d, l0, l1, h0, h1;\
- a= LD32(&src1[i*src_stride1]);\
- b= LD32(&src2[i*src_stride2]);\
- c= LD32(&src3[i*src_stride3]);\
- d= LD32(&src4[i*src_stride4]);\
+ a= AV_RN32(&src1[i*src_stride1]);\
+ b= AV_RN32(&src2[i*src_stride2]);\
+ c= AV_RN32(&src3[i*src_stride3]);\
+ d= AV_RN32(&src4[i*src_stride4]);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
h1= ((c&0xFCFCFCFCUL)>>2)\
+ ((d&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
- a= LD32(&src1[i*src_stride1+4]);\
- b= LD32(&src2[i*src_stride2+4]);\
- c= LD32(&src3[i*src_stride3+4]);\
- d= LD32(&src4[i*src_stride4+4]);\
+ a= AV_RN32(&src1[i*src_stride1+4]);\
+ b= AV_RN32(&src2[i*src_stride2+4]);\
+ c= AV_RN32(&src3[i*src_stride3+4]);\
+ d= AV_RN32(&src4[i*src_stride4+4]);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
int i;\
for(i=0; i<h; i++){\
uint32_t a, b, c, d, l0, l1, h0, h1;\
- a= LD32(&src1[i*src_stride1]);\
- b= LD32(&src2[i*src_stride2]);\
- c= LD32(&src3[i*src_stride3]);\
- d= LD32(&src4[i*src_stride4]);\
+ a= AV_RN32(&src1[i*src_stride1]);\
+ b= AV_RN32(&src2[i*src_stride2]);\
+ c= AV_RN32(&src3[i*src_stride3]);\
+ d= AV_RN32(&src4[i*src_stride4]);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x01010101UL;\
h1= ((c&0xFCFCFCFCUL)>>2)\
+ ((d&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
- a= LD32(&src1[i*src_stride1+4]);\
- b= LD32(&src2[i*src_stride2+4]);\
- c= LD32(&src3[i*src_stride3+4]);\
- d= LD32(&src4[i*src_stride4+4]);\
+ a= AV_RN32(&src1[i*src_stride1+4]);\
+ b= AV_RN32(&src2[i*src_stride2+4]);\
+ c= AV_RN32(&src3[i*src_stride3+4]);\
+ d= AV_RN32(&src4[i*src_stride4+4]);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x01010101UL;\
static inline void OPNAME ## _pixels4_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
- const uint32_t a= LD32(pixels );\
- const uint32_t b= LD32(pixels+1);\
+ const uint32_t a= AV_RN32(pixels );\
+ const uint32_t b= AV_RN32(pixels+1);\
uint32_t l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
- uint32_t a= LD32(pixels );\
- uint32_t b= LD32(pixels+1);\
+ uint32_t a= AV_RN32(pixels );\
+ uint32_t b= AV_RN32(pixels+1);\
l1= (a&0x03030303UL)\
+ (b&0x03030303UL);\
h1= ((a&0xFCFCFCFCUL)>>2)\
OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
pixels+=line_size;\
block +=line_size;\
- a= LD32(pixels );\
- b= LD32(pixels+1);\
+ a= AV_RN32(pixels );\
+ b= AV_RN32(pixels+1);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
int j;\
for(j=0; j<2; j++){\
int i;\
- const uint32_t a= LD32(pixels );\
- const uint32_t b= LD32(pixels+1);\
+ const uint32_t a= AV_RN32(pixels );\
+ const uint32_t b= AV_RN32(pixels+1);\
uint32_t l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
- uint32_t a= LD32(pixels );\
- uint32_t b= LD32(pixels+1);\
+ uint32_t a= AV_RN32(pixels );\
+ uint32_t b= AV_RN32(pixels+1);\
l1= (a&0x03030303UL)\
+ (b&0x03030303UL);\
h1= ((a&0xFCFCFCFCUL)>>2)\
OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
pixels+=line_size;\
block +=line_size;\
- a= LD32(pixels );\
- b= LD32(pixels+1);\
+ a= AV_RN32(pixels );\
+ b= AV_RN32(pixels+1);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
int j;\
for(j=0; j<2; j++){\
int i;\
- const uint32_t a= LD32(pixels );\
- const uint32_t b= LD32(pixels+1);\
+ const uint32_t a= AV_RN32(pixels );\
+ const uint32_t b= AV_RN32(pixels+1);\
uint32_t l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x01010101UL;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
- uint32_t a= LD32(pixels );\
- uint32_t b= LD32(pixels+1);\
+ uint32_t a= AV_RN32(pixels );\
+ uint32_t b= AV_RN32(pixels+1);\
l1= (a&0x03030303UL)\
+ (b&0x03030303UL);\
h1= ((a&0xFCFCFCFCUL)>>2)\
OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
pixels+=line_size;\
block +=line_size;\
- a= LD32(pixels );\
- b= LD32(pixels+1);\
+ a= AV_RN32(pixels );\
+ b= AV_RN32(pixels+1);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x01010101UL;\
+ src[index+stride+1]* frac_x )* frac_y
+ r)>>(shift*2);
}else{
- index= src_x + clip(src_y, 0, height)*stride;
+ index= src_x + av_clip(src_y, 0, height)*stride;
dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
+ src[index +1]* frac_x )*s
+ r)>>(shift*2);
}
}else{
if((unsigned)src_y < height){
- index= clip(src_x, 0, width) + src_y*stride;
+ index= av_clip(src_x, 0, width) + src_y*stride;
dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
+ src[index+stride ]* frac_y )*s
+ r)>>(shift*2);
}else{
- index= clip(src_x, 0, width) + clip(src_y, 0, height)*stride;
+ index= av_clip(src_x, 0, width) + av_clip(src_y, 0, height)*stride;
dst[y*stride + x]= src[index ];
}
}
}
}
-static inline void copy_block2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST16(dst , LD16(src ));
- dst+=dstStride;
- src+=srcStride;
- }
-}
-
-static inline void copy_block4(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST32(dst , LD32(src ));
- dst+=dstStride;
- src+=srcStride;
- }
-}
-
-static inline void copy_block8(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST32(dst , LD32(src ));
- ST32(dst+4 , LD32(src+4 ));
- dst+=dstStride;
- src+=srcStride;
- }
-}
-
-static inline void copy_block16(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST32(dst , LD32(src ));
- ST32(dst+4 , LD32(src+4 ));
- ST32(dst+8 , LD32(src+8 ));
- ST32(dst+12, LD32(src+12));
- dst+=dstStride;
- src+=srcStride;
- }
-}
-
-static inline void copy_block17(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST32(dst , LD32(src ));
- ST32(dst+4 , LD32(src+4 ));
- ST32(dst+8 , LD32(src+8 ));
- ST32(dst+12, LD32(src+12));
- dst[16]= src[16];
- dst+=dstStride;
- src+=srcStride;
- }
-}
-
-static inline void copy_block9(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST32(dst , LD32(src ));
- ST32(dst+4 , LD32(src+4 ));
- dst[8]= src[8];
- dst+=dstStride;
- src+=srcStride;
- }
-}
-
-
#define QPEL_MC(r, OPNAME, RND, OP) \
static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
#if 1
#define H264_LOWPASS(OPNAME, OP, OP2) \
-static void OPNAME ## h264_qpel2_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+static av_unused void OPNAME ## h264_qpel2_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int h=2;\
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
}\
}\
\
-static void OPNAME ## h264_qpel2_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+static av_unused void OPNAME ## h264_qpel2_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int w=2;\
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
}\
}\
\
-static void OPNAME ## h264_qpel2_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+static av_unused void OPNAME ## h264_qpel2_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
const int h=2;\
const int w=2;\
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
#undef op2_put
#endif
-#define op_scale1(x) block[x] = clip_uint8( (block[x]*weight + offset) >> log2_denom )
-#define op_scale2(x) dst[x] = clip_uint8( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1))
+#define op_scale1(x) block[x] = av_clip_uint8( (block[x]*weight + offset) >> log2_denom )
+#define op_scale2(x) dst[x] = av_clip_uint8( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1))
#define H264_WEIGHT(W,H) \
static void weight_h264_pixels ## W ## x ## H ## _c(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \
int y; \
}
#endif /* CONFIG_VC1_DECODER||CONFIG_WMV3_DECODER */
+#if defined(CONFIG_H264_ENCODER)
+/* H264 specific */
+void ff_h264dspenc_init(DSPContext* c, AVCodecContext *avctx);
+#endif /* CONFIG_H264_ENCODER */
+
static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int i;
}
static void h263_v_loop_filter_c(uint8_t *src, int stride, int qscale){
+ if(ENABLE_ANY_H263) {
int x;
const int strength= ff_h263_loop_filter_strength[qscale];
ad1= FFABS(d1)>>1;
- d2= clip((p0-p3)/4, -ad1, ad1);
+ d2= av_clip((p0-p3)/4, -ad1, ad1);
src[x-2*stride] = p0 - d2;
src[x+ stride] = p3 + d2;
}
+ }
}
static void h263_h_loop_filter_c(uint8_t *src, int stride, int qscale){
+ if(ENABLE_ANY_H263) {
int y;
const int strength= ff_h263_loop_filter_strength[qscale];
ad1= FFABS(d1)>>1;
- d2= clip((p0-p3)/4, -ad1, ad1);
+ d2= av_clip((p0-p3)/4, -ad1, ad1);
src[y*stride-2] = p0 - d2;
src[y*stride+1] = p3 + d2;
}
+ }
}
static void h261_loop_filter_c(uint8_t *src, int stride){
int i_delta;
if( FFABS( p2 - p0 ) < beta ) {
- pix[-2*xstride] = p1 + clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] );
+ pix[-2*xstride] = p1 + av_clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] );
tc++;
}
if( FFABS( q2 - q0 ) < beta ) {
- pix[ xstride] = q1 + clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] );
+ pix[ xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] );
tc++;
}
- i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
- pix[-xstride] = clip_uint8( p0 + i_delta ); /* p0' */
- pix[0] = clip_uint8( q0 - i_delta ); /* q0' */
+ i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
+ pix[-xstride] = av_clip_uint8( p0 + i_delta ); /* p0' */
+ pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */
}
pix += ystride;
}
FFABS( p1 - p0 ) < beta &&
FFABS( q1 - q0 ) < beta ) {
- int delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
+ int delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
- pix[-xstride] = clip_uint8( p0 + delta ); /* p0' */
- pix[0] = clip_uint8( q0 - delta ); /* q0' */
+ pix[-xstride] = av_clip_uint8( p0 + delta ); /* p0' */
+ pix[0] = av_clip_uint8( q0 - delta ); /* q0' */
}
pix += ystride;
}
DCTELEM temp[64];
if(last<=0) return;
- //if(permutation[1]==1) return; //FIXME its ok but not clean and might fail for some perms
+ //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
for(i=0; i<=last; i++){
const int j= scantable[i];
static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
- DECLARE_ALIGNED_8(uint64_t, aligned_temp[sizeof(DCTELEM)*64/8]);
+ DECLARE_ALIGNED_16(uint64_t, aligned_temp[sizeof(DCTELEM)*64/8]);
DCTELEM * const temp= (DCTELEM*)aligned_temp;
- int sum=0, i;
assert(h==8);
s->dsp.diff_pixels(temp, src1, src2, stride);
s->dsp.fdct(temp);
-
- for(i=0; i<64; i++)
- sum+= FFABS(temp[i]);
-
- return sum;
+ return s->dsp.sum_abs_dctelem(temp);
}
#ifdef CONFIG_GPL
static int dct264_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
- int16_t dct[8][8];
+ DCTELEM dct[8][8];
int i;
int sum=0;
- s->dsp.diff_pixels(dct, src1, src2, stride);
+ s->dsp.diff_pixels(dct[0], src1, src2, stride);
#define SRC(x) dct[i][x]
#define DST(x,v) dct[i][x]= v
return score;
}
+static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
+ int size){
+ int score=0;
+ int i;
+ for(i=0; i<size; i++)
+ score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
+ return score;
+}
+
WARPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
WARPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
WARPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
dest[0] = cm[dest[0] + ((block[0] + 4)>>3)];
}
-static void just_return() { return; }
+static void just_return(void *mem av_unused, int stride av_unused, int h av_unused) { return; }
/* init static data */
void dsputil_static_init(void)
for(i=0; i<64; i++) inv_zigzag_direct16[ff_zigzag_direct[i]]= i+1;
}
+int ff_check_alignment(void){
+ static int did_fail=0;
+ DECLARE_ALIGNED_16(int, aligned);
+
+ if((long)&aligned & 15){
+ if(!did_fail){
+#if defined(HAVE_MMX) || defined(HAVE_ALTIVEC)
+ av_log(NULL, AV_LOG_ERROR,
+ "Compiler did not align stack variables. Libavcodec has been miscompiled\n"
+ "and may be very slow or crash. This is not a bug in libavcodec,\n"
+ "but in the compiler. You may try recompiling using gcc >= 4.2.\n"
+ "Do not report crashes to FFmpeg developers.\n");
+#endif
+ did_fail=1;
+ }
+ return -1;
+ }
+ return 0;
+}
void dsputil_init(DSPContext* c, AVCodecContext *avctx)
{
int i;
+ ff_check_alignment();
+
#ifdef CONFIG_ENCODERS
if(avctx->dct_algo==FF_DCT_FASTINT) {
c->fdct = fdct_ifast;
#endif //CONFIG_ENCODERS
if(avctx->lowres==1){
- if(avctx->idct_algo==FF_IDCT_INT || avctx->idct_algo==FF_IDCT_AUTO){
+ if(avctx->idct_algo==FF_IDCT_INT || avctx->idct_algo==FF_IDCT_AUTO || !ENABLE_H264_DECODER){
c->idct_put= ff_jref_idct4_put;
c->idct_add= ff_jref_idct4_add;
}else{
c->idct_add= ff_jref_idct_add;
c->idct = j_rev_dct;
c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
- }else if(avctx->idct_algo==FF_IDCT_VP3){
+ }else if((ENABLE_VP3_DECODER || ENABLE_VP5_DECODER || ENABLE_VP6_DECODER || ENABLE_THEORA_DECODER ) &&
+ avctx->idct_algo==FF_IDCT_VP3){
c->idct_put= ff_vp3_idct_put_c;
c->idct_add= ff_vp3_idct_add_c;
c->idct = ff_vp3_idct_c;
}
}
- c->h264_idct_add= ff_h264_idct_add_c;
- c->h264_idct8_add= ff_h264_idct8_add_c;
- c->h264_idct_dc_add= ff_h264_idct_dc_add_c;
- c->h264_idct8_dc_add= ff_h264_idct8_dc_add_c;
+ if (ENABLE_H264_DECODER) {
+ c->h264_idct_add= ff_h264_idct_add_c;
+ c->h264_idct8_add= ff_h264_idct8_add_c;
+ c->h264_idct_dc_add= ff_h264_idct_dc_add_c;
+ c->h264_idct8_dc_add= ff_h264_idct8_dc_add_c;
+ }
c->get_pixels = get_pixels_c;
c->diff_pixels = diff_pixels_c;
c->add_pixels_clamped = add_pixels_clamped_c;
c->add_pixels8 = add_pixels8_c;
c->add_pixels4 = add_pixels4_c;
+ c->sum_abs_dctelem = sum_abs_dctelem_c;
c->gmc1 = gmc1_c;
c->gmc = ff_gmc_c;
c->clear_blocks = clear_blocks_c;
#if defined(CONFIG_VC1_DECODER) || defined(CONFIG_WMV3_DECODER)
ff_vc1dsp_init(c,avctx);
#endif
+#if defined(CONFIG_H264_ENCODER)
+ ff_h264dspenc_init(c,avctx);
+#endif
c->put_mspel_pixels_tab[0]= put_mspel8_mc00_c;
c->put_mspel_pixels_tab[1]= put_mspel8_mc10_c;
c->w97[1]= w97_8_c;
#endif
+ c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c;
+
c->add_bytes= add_bytes_c;
c->diff_bytes= diff_bytes_c;
c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_c;
c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_c;
c->h264_loop_filter_strength= NULL;
- c->h263_h_loop_filter= h263_h_loop_filter_c;
- c->h263_v_loop_filter= h263_v_loop_filter_c;
+ if (ENABLE_ANY_H263) {
+ c->h263_h_loop_filter= h263_h_loop_filter_c;
+ c->h263_v_loop_filter= h263_v_loop_filter_c;
+ }
c->h261_loop_filter= h261_loop_filter_c;
c->try_8x8basis= try_8x8basis_c;
c->add_8x8basis= add_8x8basis_c;
-#ifdef CONFIG_SNOW_ENCODER
+#ifdef CONFIG_SNOW_DECODER
c->vertical_compose97i = ff_snow_vertical_compose97i;
c->horizontal_compose97i = ff_snow_horizontal_compose97i;
c->inner_add_yblock = ff_snow_inner_add_yblock;
#ifdef CONFIG_VORBIS_DECODER
c->vorbis_inverse_coupling = vorbis_inverse_coupling;
+#endif
+#ifdef CONFIG_FLAC_ENCODER
+ c->flac_compute_autocorr = ff_flac_compute_autocorr;
#endif
c->vector_fmul = vector_fmul_c;
c->vector_fmul_reverse = vector_fmul_reverse_c;
memset(c->put_2tap_qpel_pixels_tab, 0, sizeof(c->put_2tap_qpel_pixels_tab));
memset(c->avg_2tap_qpel_pixels_tab, 0, sizeof(c->avg_2tap_qpel_pixels_tab));
-#ifdef HAVE_MMX
- dsputil_init_mmx(c, avctx);
-#endif
-#ifdef ARCH_ARMV4L
- dsputil_init_armv4l(c, avctx);
-#endif
-#ifdef HAVE_MLIB
- dsputil_init_mlib(c, avctx);
-#endif
-#ifdef ARCH_SPARC
- dsputil_init_vis(c,avctx);
-#endif
-#ifdef ARCH_ALPHA
- dsputil_init_alpha(c, avctx);
-#endif
-#ifdef ARCH_POWERPC
- dsputil_init_ppc(c, avctx);
-#endif
-#ifdef HAVE_MMI
- dsputil_init_mmi(c, avctx);
-#endif
-#ifdef ARCH_SH4
- dsputil_init_sh4(c,avctx);
-#endif
-#ifdef ARCH_BFIN
- dsputil_init_bfin(c,avctx);
-#endif
+ if (ENABLE_MMX) dsputil_init_mmx (c, avctx);
+ if (ENABLE_ARMV4L) dsputil_init_armv4l(c, avctx);
+ if (ENABLE_MLIB) dsputil_init_mlib (c, avctx);
+ if (ENABLE_VIS) dsputil_init_vis (c, avctx);
+ if (ENABLE_ALPHA) dsputil_init_alpha (c, avctx);
+ if (ENABLE_POWERPC) dsputil_init_ppc (c, avctx);
+ if (ENABLE_MMI) dsputil_init_mmi (c, avctx);
+ if (ENABLE_SH4) dsputil_init_sh4 (c, avctx);
+ if (ENABLE_BFIN) dsputil_init_bfin (c, avctx);
for(i=0; i<64; i++){
if(!c->put_2tap_qpel_pixels_tab[0][i])