* Copyright (c) 2000, 2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
*/
/**
#include "avcodec.h"
#include "dsputil.h"
-#include "mpegvideo.h"
#include "simple_idct.h"
#include "faandct.h"
+#include "faanidct.h"
+#include "h263.h"
#include "snow.h"
/* snow.c */
/* vorbis.c */
void vorbis_inverse_coupling(float *mag, float *ang, int blocksize);
-uint8_t cropTbl[256 + 2 * MAX_NEG_CROP] = {0, };
-uint32_t squareTbl[512] = {0, };
+/* flacenc.c */
+void ff_flac_compute_autocorr(const int32_t *data, int len, int lag, double *autoc);
+
+/* pngdec.c */
+void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp);
+
+uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP] = {0, };
+uint32_t ff_squareTbl[512] = {0, };
+
+// 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
+#define pb_7f (~0UL/255 * 0x7f)
+#define pb_80 (~0UL/255 * 0x80)
const uint8_t ff_zigzag_direct[64] = {
0, 1, 8, 16, 9, 2, 3, 10,
};
/* a*inverse[b]>>32 == a/b for all 0<=a<=65536 && 2<=b<=255 */
-const uint32_t inverse[256]={
+const uint32_t ff_inverse[256]={
0, 4294967295U,2147483648U,1431655766, 1073741824, 858993460, 715827883, 613566757,
536870912, 477218589, 429496730, 390451573, 357913942, 330382100, 306783379, 286331154,
268435456, 252645136, 238609295, 226050911, 214748365, 204522253, 195225787, 186737709,
0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
};
+static const uint8_t idct_sse2_row_perm[8] = {0, 4, 1, 5, 2, 6, 3, 7};
+
+void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
+ int i;
+ int end;
+
+ st->scantable= src_scantable;
+
+ for(i=0; i<64; i++){
+ int j;
+ j = src_scantable[i];
+ st->permutated[i] = permutation[j];
+#ifdef ARCH_POWERPC
+ st->inverse[j] = i;
+#endif
+ }
+
+ end=-1;
+ for(i=0; i<64; i++){
+ int j;
+ j = st->permutated[i];
+ if(j>end) end=j;
+ st->raster_end[i]= end;
+ }
+}
+
static int pix_sum_c(uint8_t * pix, int line_size)
{
int s, i, j;
static int pix_norm1_c(uint8_t * pix, int line_size)
{
int s, i, j;
- uint32_t *sq = squareTbl + 256;
+ uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < 16; i++) {
return s;
}
-static void bswap_buf(uint32_t *dst, uint32_t *src, int w){
+static void bswap_buf(uint32_t *dst, const uint32_t *src, int w){
int i;
for(i=0; i+8<=w; i+=8){
static int sse4_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
{
int s, i;
- uint32_t *sq = squareTbl + 256;
+ uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
{
int s, i;
- uint32_t *sq = squareTbl + 256;
+ uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
- uint32_t *sq = squareTbl + 256;
+ uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
for(i=0; i<size; i++){
for(j=0; j<size; j++){
int v= tmp[sx + sy + i*stride + j] * scale[type][dec_count-3][level][ori];
- s += ABS(v);
+ s += FFABS(v);
}
}
}
}
#endif
+/* draw the edges of width 'w' of an image of size width, height */
+//FIXME check that this is ok for mpeg4 interlaced
+static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
+{
+ uint8_t *ptr, *last_line;
+ int i;
+
+ last_line = buf + (height - 1) * wrap;
+ for(i=0;i<w;i++) {
+ /* top and bottom */
+ memcpy(buf - (i + 1) * wrap, buf, width);
+ memcpy(last_line + (i + 1) * wrap, last_line, width);
+ }
+ /* left and right */
+ ptr = buf;
+ for(i=0;i<height;i++) {
+ memset(ptr - w, ptr[0], w);
+ memset(ptr + width, ptr[width-1], w);
+ ptr += wrap;
+ }
+ /* corners */
+ for(i=0;i<w;i++) {
+ memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
+ memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
+ memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
+ memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
+ }
+}
+
+/**
+ * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
+ * @param buf destination buffer
+ * @param src source buffer
+ * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
+ * @param block_w width of block
+ * @param block_h height of block
+ * @param src_x x coordinate of the top left sample of the block in the source buffer
+ * @param src_y y coordinate of the top left sample of the block in the source buffer
+ * @param w width of the source buffer
+ * @param h height of the source buffer
+ */
+void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
+ int src_x, int src_y, int w, int h){
+ int x, y;
+ int start_y, start_x, end_y, end_x;
+
+ if(src_y>= h){
+ src+= (h-1-src_y)*linesize;
+ src_y=h-1;
+ }else if(src_y<=-block_h){
+ src+= (1-block_h-src_y)*linesize;
+ src_y=1-block_h;
+ }
+ if(src_x>= w){
+ src+= (w-1-src_x);
+ src_x=w-1;
+ }else if(src_x<=-block_w){
+ src+= (1-block_w-src_x);
+ src_x=1-block_w;
+ }
+
+ start_y= FFMAX(0, -src_y);
+ start_x= FFMAX(0, -src_x);
+ end_y= FFMIN(block_h, h-src_y);
+ end_x= FFMIN(block_w, w-src_x);
+
+ // copy existing part
+ for(y=start_y; y<end_y; y++){
+ for(x=start_x; x<end_x; x++){
+ buf[x + y*linesize]= src[x + y*linesize];
+ }
+ }
+
+ //top
+ for(y=0; y<start_y; y++){
+ for(x=start_x; x<end_x; x++){
+ buf[x + y*linesize]= buf[x + start_y*linesize];
+ }
+ }
+
+ //bottom
+ for(y=end_y; y<block_h; y++){
+ for(x=start_x; x<end_x; x++){
+ buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
+ }
+ }
+
+ for(y=0; y<block_h; y++){
+ //left
+ for(x=0; x<start_x; x++){
+ buf[x + y*linesize]= buf[start_x + y*linesize];
+ }
+
+ //right
+ for(x=end_x; x<block_w; x++){
+ buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
+ }
+ }
+}
+
static void get_pixels_c(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
{
int i;
int line_size)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* read the pixels */
for(i=0;i<8;i++) {
int line_size)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* read the pixels */
for(i=0;i<4;i++) {
int line_size)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* read the pixels */
for(i=0;i<2;i++) {
int line_size)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* read the pixels */
for(i=0;i<8;i++) {
int line_size)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* read the pixels */
for(i=0;i<4;i++) {
int line_size)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* read the pixels */
for(i=0;i<2;i++) {
}
}
+static int sum_abs_dctelem_c(DCTELEM *block)
+{
+ int sum=0, i;
+ for(i=0; i<64; i++)
+ sum+= FFABS(block[i]);
+ return sum;
+}
+
#if 0
#define PIXOP2(OPNAME, OP) \
{\
int i;\
for(i=0; i<h; i++){\
- OP(*((uint64_t*)block), LD64(pixels));\
+ OP(*((uint64_t*)block), AV_RN64(pixels));\
pixels+=line_size;\
block +=line_size;\
}\
{\
int i;\
for(i=0; i<h; i++){\
- const uint64_t a= LD64(pixels );\
- const uint64_t b= LD64(pixels+1);\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+1);\
OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
pixels+=line_size;\
block +=line_size;\
{\
int i;\
for(i=0; i<h; i++){\
- const uint64_t a= LD64(pixels );\
- const uint64_t b= LD64(pixels+1);\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+1);\
OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
pixels+=line_size;\
block +=line_size;\
{\
int i;\
for(i=0; i<h; i++){\
- const uint64_t a= LD64(pixels );\
- const uint64_t b= LD64(pixels+line_size);\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+line_size);\
OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
pixels+=line_size;\
block +=line_size;\
{\
int i;\
for(i=0; i<h; i++){\
- const uint64_t a= LD64(pixels );\
- const uint64_t b= LD64(pixels+line_size);\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+line_size);\
OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
pixels+=line_size;\
block +=line_size;\
static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
- const uint64_t a= LD64(pixels );\
- const uint64_t b= LD64(pixels+1);\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+1);\
uint64_t l0= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL)\
+ 0x0202020202020202ULL;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
- uint64_t a= LD64(pixels );\
- uint64_t b= LD64(pixels+1);\
+ uint64_t a= AV_RN64(pixels );\
+ uint64_t b= AV_RN64(pixels+1);\
l1= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL);\
h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
pixels+=line_size;\
block +=line_size;\
- a= LD64(pixels );\
- b= LD64(pixels+1);\
+ a= AV_RN64(pixels );\
+ b= AV_RN64(pixels+1);\
l0= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL)\
+ 0x0202020202020202ULL;\
static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
- const uint64_t a= LD64(pixels );\
- const uint64_t b= LD64(pixels+1);\
+ const uint64_t a= AV_RN64(pixels );\
+ const uint64_t b= AV_RN64(pixels+1);\
uint64_t l0= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL)\
+ 0x0101010101010101ULL;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
- uint64_t a= LD64(pixels );\
- uint64_t b= LD64(pixels+1);\
+ uint64_t a= AV_RN64(pixels );\
+ uint64_t b= AV_RN64(pixels+1);\
l1= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL);\
h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
pixels+=line_size;\
block +=line_size;\
- a= LD64(pixels );\
- b= LD64(pixels+1);\
+ a= AV_RN64(pixels );\
+ b= AV_RN64(pixels+1);\
l0= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL)\
+ 0x0101010101010101ULL;\
static void OPNAME ## _pixels2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
int i;\
for(i=0; i<h; i++){\
- OP(*((uint16_t*)(block )), LD16(pixels ));\
+ OP(*((uint16_t*)(block )), AV_RN16(pixels ));\
pixels+=line_size;\
block +=line_size;\
}\
static void OPNAME ## _pixels4_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
int i;\
for(i=0; i<h; i++){\
- OP(*((uint32_t*)(block )), LD32(pixels ));\
+ OP(*((uint32_t*)(block )), AV_RN32(pixels ));\
pixels+=line_size;\
block +=line_size;\
}\
static void OPNAME ## _pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
int i;\
for(i=0; i<h; i++){\
- OP(*((uint32_t*)(block )), LD32(pixels ));\
- OP(*((uint32_t*)(block+4)), LD32(pixels+4));\
+ OP(*((uint32_t*)(block )), AV_RN32(pixels ));\
+ OP(*((uint32_t*)(block+4)), AV_RN32(pixels+4));\
pixels+=line_size;\
block +=line_size;\
}\
int i;\
for(i=0; i<h; i++){\
uint32_t a,b;\
- a= LD32(&src1[i*src_stride1 ]);\
- b= LD32(&src2[i*src_stride2 ]);\
+ a= AV_RN32(&src1[i*src_stride1 ]);\
+ b= AV_RN32(&src2[i*src_stride2 ]);\
OP(*((uint32_t*)&dst[i*dst_stride ]), no_rnd_avg32(a, b));\
- a= LD32(&src1[i*src_stride1+4]);\
- b= LD32(&src2[i*src_stride2+4]);\
+ a= AV_RN32(&src1[i*src_stride1+4]);\
+ b= AV_RN32(&src2[i*src_stride2+4]);\
OP(*((uint32_t*)&dst[i*dst_stride+4]), no_rnd_avg32(a, b));\
}\
}\
int i;\
for(i=0; i<h; i++){\
uint32_t a,b;\
- a= LD32(&src1[i*src_stride1 ]);\
- b= LD32(&src2[i*src_stride2 ]);\
+ a= AV_RN32(&src1[i*src_stride1 ]);\
+ b= AV_RN32(&src2[i*src_stride2 ]);\
OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
- a= LD32(&src1[i*src_stride1+4]);\
- b= LD32(&src2[i*src_stride2+4]);\
+ a= AV_RN32(&src1[i*src_stride1+4]);\
+ b= AV_RN32(&src2[i*src_stride2+4]);\
OP(*((uint32_t*)&dst[i*dst_stride+4]), rnd_avg32(a, b));\
}\
}\
int i;\
for(i=0; i<h; i++){\
uint32_t a,b;\
- a= LD32(&src1[i*src_stride1 ]);\
- b= LD32(&src2[i*src_stride2 ]);\
+ a= AV_RN32(&src1[i*src_stride1 ]);\
+ b= AV_RN32(&src2[i*src_stride2 ]);\
OP(*((uint32_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
}\
}\
int i;\
for(i=0; i<h; i++){\
uint32_t a,b;\
- a= LD16(&src1[i*src_stride1 ]);\
- b= LD16(&src2[i*src_stride2 ]);\
+ a= AV_RN16(&src1[i*src_stride1 ]);\
+ b= AV_RN16(&src2[i*src_stride2 ]);\
OP(*((uint16_t*)&dst[i*dst_stride ]), rnd_avg32(a, b));\
}\
}\
int i;\
for(i=0; i<h; i++){\
uint32_t a, b, c, d, l0, l1, h0, h1;\
- a= LD32(&src1[i*src_stride1]);\
- b= LD32(&src2[i*src_stride2]);\
- c= LD32(&src3[i*src_stride3]);\
- d= LD32(&src4[i*src_stride4]);\
+ a= AV_RN32(&src1[i*src_stride1]);\
+ b= AV_RN32(&src2[i*src_stride2]);\
+ c= AV_RN32(&src3[i*src_stride3]);\
+ d= AV_RN32(&src4[i*src_stride4]);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
h1= ((c&0xFCFCFCFCUL)>>2)\
+ ((d&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
- a= LD32(&src1[i*src_stride1+4]);\
- b= LD32(&src2[i*src_stride2+4]);\
- c= LD32(&src3[i*src_stride3+4]);\
- d= LD32(&src4[i*src_stride4+4]);\
+ a= AV_RN32(&src1[i*src_stride1+4]);\
+ b= AV_RN32(&src2[i*src_stride2+4]);\
+ c= AV_RN32(&src3[i*src_stride3+4]);\
+ d= AV_RN32(&src4[i*src_stride4+4]);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
int i;\
for(i=0; i<h; i++){\
uint32_t a, b, c, d, l0, l1, h0, h1;\
- a= LD32(&src1[i*src_stride1]);\
- b= LD32(&src2[i*src_stride2]);\
- c= LD32(&src3[i*src_stride3]);\
- d= LD32(&src4[i*src_stride4]);\
+ a= AV_RN32(&src1[i*src_stride1]);\
+ b= AV_RN32(&src2[i*src_stride2]);\
+ c= AV_RN32(&src3[i*src_stride3]);\
+ d= AV_RN32(&src4[i*src_stride4]);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x01010101UL;\
h1= ((c&0xFCFCFCFCUL)>>2)\
+ ((d&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
- a= LD32(&src1[i*src_stride1+4]);\
- b= LD32(&src2[i*src_stride2+4]);\
- c= LD32(&src3[i*src_stride3+4]);\
- d= LD32(&src4[i*src_stride4+4]);\
+ a= AV_RN32(&src1[i*src_stride1+4]);\
+ b= AV_RN32(&src2[i*src_stride2+4]);\
+ c= AV_RN32(&src3[i*src_stride3+4]);\
+ d= AV_RN32(&src4[i*src_stride4+4]);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x01010101UL;\
static inline void OPNAME ## _pixels4_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
- const uint32_t a= LD32(pixels );\
- const uint32_t b= LD32(pixels+1);\
+ const uint32_t a= AV_RN32(pixels );\
+ const uint32_t b= AV_RN32(pixels+1);\
uint32_t l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
- uint32_t a= LD32(pixels );\
- uint32_t b= LD32(pixels+1);\
+ uint32_t a= AV_RN32(pixels );\
+ uint32_t b= AV_RN32(pixels+1);\
l1= (a&0x03030303UL)\
+ (b&0x03030303UL);\
h1= ((a&0xFCFCFCFCUL)>>2)\
OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
pixels+=line_size;\
block +=line_size;\
- a= LD32(pixels );\
- b= LD32(pixels+1);\
+ a= AV_RN32(pixels );\
+ b= AV_RN32(pixels+1);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
int j;\
for(j=0; j<2; j++){\
int i;\
- const uint32_t a= LD32(pixels );\
- const uint32_t b= LD32(pixels+1);\
+ const uint32_t a= AV_RN32(pixels );\
+ const uint32_t b= AV_RN32(pixels+1);\
uint32_t l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
- uint32_t a= LD32(pixels );\
- uint32_t b= LD32(pixels+1);\
+ uint32_t a= AV_RN32(pixels );\
+ uint32_t b= AV_RN32(pixels+1);\
l1= (a&0x03030303UL)\
+ (b&0x03030303UL);\
h1= ((a&0xFCFCFCFCUL)>>2)\
OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
pixels+=line_size;\
block +=line_size;\
- a= LD32(pixels );\
- b= LD32(pixels+1);\
+ a= AV_RN32(pixels );\
+ b= AV_RN32(pixels+1);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
int j;\
for(j=0; j<2; j++){\
int i;\
- const uint32_t a= LD32(pixels );\
- const uint32_t b= LD32(pixels+1);\
+ const uint32_t a= AV_RN32(pixels );\
+ const uint32_t b= AV_RN32(pixels+1);\
uint32_t l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x01010101UL;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
- uint32_t a= LD32(pixels );\
- uint32_t b= LD32(pixels+1);\
+ uint32_t a= AV_RN32(pixels );\
+ uint32_t b= AV_RN32(pixels+1);\
l1= (a&0x03030303UL)\
+ (b&0x03030303UL);\
h1= ((a&0xFCFCFCFCUL)>>2)\
OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
pixels+=line_size;\
block +=line_size;\
- a= LD32(pixels );\
- b= LD32(pixels+1);\
+ a= AV_RN32(pixels );\
+ b= AV_RN32(pixels+1);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x01010101UL;\
+ src[index+stride+1]* frac_x )* frac_y
+ r)>>(shift*2);
}else{
- index= src_x + clip(src_y, 0, height)*stride;
+ index= src_x + av_clip(src_y, 0, height)*stride;
dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
+ src[index +1]* frac_x )*s
+ r)>>(shift*2);
}
}else{
if((unsigned)src_y < height){
- index= clip(src_x, 0, width) + src_y*stride;
+ index= av_clip(src_x, 0, width) + src_y*stride;
dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
+ src[index+stride ]* frac_y )*s
+ r)>>(shift*2);
}else{
- index= clip(src_x, 0, width) + clip(src_y, 0, height)*stride;
+ index= av_clip(src_x, 0, width) + av_clip(src_y, 0, height)*stride;
dst[y*stride + x]= src[index ];
}
}
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
- for(i=0; i<h; i++)\
- {\
- OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
- OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
- dst+= stride;\
- src+= stride;\
+ if(D){\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
+ OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
+ dst+= stride;\
+ src+= stride;\
+ }\
+ }else{\
+ const int E= B+C;\
+ const int step= C ? stride : 1;\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + E*src[step+0]));\
+ OP(dst[1], (A*src[1] + E*src[step+1]));\
+ dst+= stride;\
+ src+= stride;\
+ }\
}\
}\
\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
- for(i=0; i<h; i++)\
- {\
- OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
- OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
- OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
- OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
- dst+= stride;\
- src+= stride;\
+ if(D){\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
+ OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
+ OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
+ OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
+ dst+= stride;\
+ src+= stride;\
+ }\
+ }else{\
+ const int E= B+C;\
+ const int step= C ? stride : 1;\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + E*src[step+0]));\
+ OP(dst[1], (A*src[1] + E*src[step+1]));\
+ OP(dst[2], (A*src[2] + E*src[step+2]));\
+ OP(dst[3], (A*src[3] + E*src[step+3]));\
+ dst+= stride;\
+ src+= stride;\
+ }\
}\
}\
\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
- for(i=0; i<h; i++)\
- {\
- OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
- OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
- OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
- OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
- OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
- OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
- OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
- OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
- dst+= stride;\
- src+= stride;\
+ if(D){\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
+ OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
+ OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
+ OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
+ OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
+ OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
+ OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
+ OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
+ dst+= stride;\
+ src+= stride;\
+ }\
+ }else{\
+ const int E= B+C;\
+ const int step= C ? stride : 1;\
+ for(i=0; i<h; i++){\
+ OP(dst[0], (A*src[0] + E*src[step+0]));\
+ OP(dst[1], (A*src[1] + E*src[step+1]));\
+ OP(dst[2], (A*src[2] + E*src[step+2]));\
+ OP(dst[3], (A*src[3] + E*src[step+3]));\
+ OP(dst[4], (A*src[4] + E*src[step+4]));\
+ OP(dst[5], (A*src[5] + E*src[step+5]));\
+ OP(dst[6], (A*src[6] + E*src[step+6]));\
+ OP(dst[7], (A*src[7] + E*src[step+7]));\
+ dst+= stride;\
+ src+= stride;\
+ }\
}\
}
#undef op_avg
#undef op_put
-static inline void copy_block2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST16(dst , LD16(src ));
- dst+=dstStride;
- src+=srcStride;
- }
-}
-
-static inline void copy_block4(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
+static void put_no_rnd_h264_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){
+ const int A=(8-x)*(8-y);
+ const int B=( x)*(8-y);
+ const int C=(8-x)*( y);
+ const int D=( x)*( y);
int i;
- for(i=0; i<h; i++)
- {
- ST32(dst , LD32(src ));
- dst+=dstStride;
- src+=srcStride;
- }
-}
-static inline void copy_block8(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST32(dst , LD32(src ));
- ST32(dst+4 , LD32(src+4 ));
- dst+=dstStride;
- src+=srcStride;
- }
-}
+ assert(x<8 && y<8 && x>=0 && y>=0);
-static inline void copy_block16(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
for(i=0; i<h; i++)
{
- ST32(dst , LD32(src ));
- ST32(dst+4 , LD32(src+4 ));
- ST32(dst+8 , LD32(src+8 ));
- ST32(dst+12, LD32(src+12));
- dst+=dstStride;
- src+=srcStride;
- }
-}
-
-static inline void copy_block17(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST32(dst , LD32(src ));
- ST32(dst+4 , LD32(src+4 ));
- ST32(dst+8 , LD32(src+8 ));
- ST32(dst+12, LD32(src+12));
- dst[16]= src[16];
- dst+=dstStride;
- src+=srcStride;
- }
-}
-
-static inline void copy_block9(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST32(dst , LD32(src ));
- ST32(dst+4 , LD32(src+4 ));
- dst[8]= src[8];
- dst+=dstStride;
- src+=srcStride;
+ dst[0] = (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6;
+ dst[1] = (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6;
+ dst[2] = (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6;
+ dst[3] = (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6;
+ dst[4] = (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6;
+ dst[5] = (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6;
+ dst[6] = (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6;
+ dst[7] = (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6;
+ dst+= stride;
+ src+= stride;
}
}
-
#define QPEL_MC(r, OPNAME, RND, OP) \
static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<h; i++)\
{\
\
static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int w=8;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<w; i++)\
{\
}\
\
static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
\
for(i=0; i<h; i++)\
}\
\
static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
const int w=16;\
for(i=0; i<w; i++)\
#if 1
#define H264_LOWPASS(OPNAME, OP, OP2) \
-static void OPNAME ## h264_qpel2_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+static av_unused void OPNAME ## h264_qpel2_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int h=2;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<h; i++)\
{\
}\
}\
\
-static void OPNAME ## h264_qpel2_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+static av_unused void OPNAME ## h264_qpel2_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int w=2;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<w; i++)\
{\
}\
}\
\
-static void OPNAME ## h264_qpel2_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+static av_unused void OPNAME ## h264_qpel2_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
const int h=2;\
const int w=2;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
}\
static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int h=4;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<h; i++)\
{\
\
static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int w=4;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<w; i++)\
{\
static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
const int h=4;\
const int w=4;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
\
static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int h=8;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<h; i++)\
{\
\
static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int w=8;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<w; i++)\
{\
static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
const int h=8;\
const int w=8;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
#undef op2_put
#endif
-#define op_scale1(x) block[x] = clip_uint8( (block[x]*weight + offset) >> log2_denom )
-#define op_scale2(x) dst[x] = clip_uint8( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1))
+#define op_scale1(x) block[x] = av_clip_uint8( (block[x]*weight + offset) >> log2_denom )
+#define op_scale2(x) dst[x] = av_clip_uint8( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1))
#define H264_WEIGHT(W,H) \
static void weight_h264_pixels ## W ## x ## H ## _c(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \
int y; \
#undef H264_WEIGHT
static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int i;
for(i=0; i<h; i++){
}
#endif /* CONFIG_VC1_DECODER||CONFIG_WMV3_DECODER */
+void ff_intrax8dsp_init(DSPContext* c, AVCodecContext *avctx);
+
+/* H264 specific */
+void ff_h264dspenc_init(DSPContext* c, AVCodecContext *avctx);
+
static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int i;
for(i=0; i<w; i++){
}
static void h263_v_loop_filter_c(uint8_t *src, int stride, int qscale){
+ if(ENABLE_ANY_H263) {
int x;
const int strength= ff_h263_loop_filter_strength[qscale];
src[x-1*stride] = p1;
src[x+0*stride] = p2;
- ad1= ABS(d1)>>1;
+ ad1= FFABS(d1)>>1;
- d2= clip((p0-p3)/4, -ad1, ad1);
+ d2= av_clip((p0-p3)/4, -ad1, ad1);
src[x-2*stride] = p0 - d2;
src[x+ stride] = p3 + d2;
}
+ }
}
static void h263_h_loop_filter_c(uint8_t *src, int stride, int qscale){
+ if(ENABLE_ANY_H263) {
int y;
const int strength= ff_h263_loop_filter_strength[qscale];
src[y*stride-1] = p1;
src[y*stride+0] = p2;
- ad1= ABS(d1)>>1;
+ ad1= FFABS(d1)>>1;
- d2= clip((p0-p3)/4, -ad1, ad1);
+ d2= av_clip((p0-p3)/4, -ad1, ad1);
src[y*stride-2] = p0 - d2;
src[y*stride+1] = p3 + d2;
}
+ }
}
static void h261_loop_filter_c(uint8_t *src, int stride){
const int q1 = pix[1*xstride];
const int q2 = pix[2*xstride];
- if( ABS( p0 - q0 ) < alpha &&
- ABS( p1 - p0 ) < beta &&
- ABS( q1 - q0 ) < beta ) {
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
int tc = tc0[i];
int i_delta;
- if( ABS( p2 - p0 ) < beta ) {
- pix[-2*xstride] = p1 + clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] );
+ if( FFABS( p2 - p0 ) < beta ) {
+ pix[-2*xstride] = p1 + av_clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] );
tc++;
}
- if( ABS( q2 - q0 ) < beta ) {
- pix[ xstride] = q1 + clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] );
+ if( FFABS( q2 - q0 ) < beta ) {
+ pix[ xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] );
tc++;
}
- i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
- pix[-xstride] = clip_uint8( p0 + i_delta ); /* p0' */
- pix[0] = clip_uint8( q0 - i_delta ); /* q0' */
+ i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
+ pix[-xstride] = av_clip_uint8( p0 + i_delta ); /* p0' */
+ pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */
}
pix += ystride;
}
const int q0 = pix[0];
const int q1 = pix[1*xstride];
- if( ABS( p0 - q0 ) < alpha &&
- ABS( p1 - p0 ) < beta &&
- ABS( q1 - q0 ) < beta ) {
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
- int delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
+ int delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
- pix[-xstride] = clip_uint8( p0 + delta ); /* p0' */
- pix[0] = clip_uint8( q0 - delta ); /* q0' */
+ pix[-xstride] = av_clip_uint8( p0 + delta ); /* p0' */
+ pix[0] = av_clip_uint8( q0 - delta ); /* q0' */
}
pix += ystride;
}
const int q0 = pix[0];
const int q1 = pix[1*xstride];
- if( ABS( p0 - q0 ) < alpha &&
- ABS( p1 - p0 ) < beta &&
- ABS( q1 - q0 ) < beta ) {
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
pix[-xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */
pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */
}
if(y+1<h){
for(x=0; x<15; x++){
- score2+= ABS( s1[x ] - s1[x +stride]
+ score2+= FFABS( s1[x ] - s1[x +stride]
- s1[x+1] + s1[x+1+stride])
- -ABS( s2[x ] - s2[x +stride]
+ -FFABS( s2[x ] - s2[x +stride]
- s2[x+1] + s2[x+1+stride]);
}
}
s2+= stride;
}
- if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
- else return score1 + ABS(score2)*8;
+ if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
+ else return score1 + FFABS(score2)*8;
}
static int nsse8_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
}
if(y+1<h){
for(x=0; x<7; x++){
- score2+= ABS( s1[x ] - s1[x +stride]
+ score2+= FFABS( s1[x ] - s1[x +stride]
- s1[x+1] + s1[x+1+stride])
- -ABS( s2[x ] - s2[x +stride]
+ -FFABS( s2[x ] - s2[x +stride]
- s2[x+1] + s2[x+1+stride]);
}
}
s2+= stride;
}
- if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
- else return score1 + ABS(score2)*8;
+ if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
+ else return score1 + FFABS(score2)*8;
}
static int try_8x8basis_c(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
DCTELEM temp[64];
if(last<=0) return;
- //if(permutation[1]==1) return; //FIXME its ok but not clean and might fail for some perms
+ //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
for(i=0; i<=last; i++){
const int j= scantable[i];
}
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
- int i;
- for(i=0; i+7<w; i+=8){
- dst[i+0] += src[i+0];
- dst[i+1] += src[i+1];
- dst[i+2] += src[i+2];
- dst[i+3] += src[i+3];
- dst[i+4] += src[i+4];
- dst[i+5] += src[i+5];
- dst[i+6] += src[i+6];
- dst[i+7] += src[i+7];
+ long i;
+ for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
+ long a = *(long*)(src+i);
+ long b = *(long*)(dst+i);
+ *(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
}
for(; i<w; i++)
dst[i+0] += src[i+0];
}
+static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
+ long i;
+ for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
+ long a = *(long*)(src1+i);
+ long b = *(long*)(src2+i);
+ *(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
+ }
+ for(; i<w; i++)
+ dst[i] = src1[i]+src2[i];
+}
+
static void diff_bytes_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
- int i;
- for(i=0; i+7<w; i+=8){
- dst[i+0] = src1[i+0]-src2[i+0];
- dst[i+1] = src1[i+1]-src2[i+1];
- dst[i+2] = src1[i+2]-src2[i+2];
- dst[i+3] = src1[i+3]-src2[i+3];
- dst[i+4] = src1[i+4]-src2[i+4];
- dst[i+5] = src1[i+5]-src2[i+5];
- dst[i+6] = src1[i+6]-src2[i+6];
- dst[i+7] = src1[i+7]-src2[i+7];
+ long i;
+#ifndef HAVE_FAST_UNALIGNED
+ if((long)src2 & (sizeof(long)-1)){
+ for(i=0; i+7<w; i+=8){
+ dst[i+0] = src1[i+0]-src2[i+0];
+ dst[i+1] = src1[i+1]-src2[i+1];
+ dst[i+2] = src1[i+2]-src2[i+2];
+ dst[i+3] = src1[i+3]-src2[i+3];
+ dst[i+4] = src1[i+4]-src2[i+4];
+ dst[i+5] = src1[i+5]-src2[i+5];
+ dst[i+6] = src1[i+6]-src2[i+6];
+ dst[i+7] = src1[i+7]-src2[i+7];
+ }
+ }else
+#endif
+ for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
+ long a = *(long*)(src1+i);
+ long b = *(long*)(src2+i);
+ *(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
}
for(; i<w; i++)
dst[i+0] = src1[i+0]-src2[i+0];
y= a-b;\
}
-#define BUTTERFLYA(x,y) (ABS((x)+(y)) + ABS((x)-(y)))
+#define BUTTERFLYA(x,y) (FFABS((x)+(y)) + FFABS((x)-(y)))
static int hadamard8_diff8x8_c(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
int i;
+BUTTERFLYA(temp[8*3+i], temp[8*7+i]);
}
- sum -= ABS(temp[8*0] + temp[8*4]); // -mean
+ sum -= FFABS(temp[8*0] + temp[8*4]); // -mean
return sum;
}
static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
- DECLARE_ALIGNED_8(uint64_t, aligned_temp[sizeof(DCTELEM)*64/8]);
+ DECLARE_ALIGNED_16(uint64_t, aligned_temp[sizeof(DCTELEM)*64/8]);
DCTELEM * const temp= (DCTELEM*)aligned_temp;
- int sum=0, i;
assert(h==8);
s->dsp.diff_pixels(temp, src1, src2, stride);
s->dsp.fdct(temp);
-
- for(i=0; i<64; i++)
- sum+= ABS(temp[i]);
-
- return sum;
+ return s->dsp.sum_abs_dctelem(temp);
}
#ifdef CONFIG_GPL
static int dct264_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
- int16_t dct[8][8];
+ DCTELEM dct[8][8];
int i;
int sum=0;
- s->dsp.diff_pixels(dct, src1, src2, stride);
+ s->dsp.diff_pixels(dct[0], src1, src2, stride);
#define SRC(x) dct[i][x]
#define DST(x,v) dct[i][x]= v
#undef DST
#define SRC(x) dct[x][i]
-#define DST(x,v) sum += ABS(v)
+#define DST(x,v) sum += FFABS(v)
for( i = 0; i < 8; i++ )
DCT8_1D
#undef SRC
s->dsp.fdct(temp);
for(i=0; i<64; i++)
- sum= FFMAX(sum, ABS(temp[i]));
+ sum= FFMAX(sum, FFABS(temp[i]));
return sum;
}
-void simple_idct(DCTELEM *block); //FIXME
-
static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
DECLARE_ALIGNED_8 (uint64_t, aligned_temp[sizeof(DCTELEM)*64*2/8]);
s->block_last_index[0/*FIXME*/]= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
s->dct_unquantize_inter(s, temp, 0, s->qscale);
- simple_idct(temp); //FIXME
+ ff_simple_idct(temp); //FIXME
for(i=0; i<64; i++)
sum+= (temp[i]-bak[i])*(temp[i]-bak[i]);
DECLARE_ALIGNED_8 (uint64_t, aligned_bak[stride]);
DCTELEM * const temp= (DCTELEM*)aligned_temp;
uint8_t * const bak= (uint8_t*)aligned_bak;
- int i, last, run, bits, level, distoration, start_i;
+ int i, last, run, bits, level, distortion, start_i;
const int esc_length= s->ac_esc_length;
uint8_t * length;
uint8_t * last_length;
s->dsp.idct_add(bak, stride, temp);
- distoration= s->dsp.sse[1](NULL, bak, src1, stride, 8);
+ distortion= s->dsp.sse[1](NULL, bak, src1, stride, 8);
- return distoration + ((bits*s->qscale*s->qscale*109 + 64)>>7);
+ return distortion + ((bits*s->qscale*s->qscale*109 + 64)>>7);
}
static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
for(y=1; y<h; y++){
for(x=0; x<16; x+=4){
- score+= ABS(s[x ] - s[x +stride]) + ABS(s[x+1] - s[x+1+stride])
- +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]);
+ score+= FFABS(s[x ] - s[x +stride]) + FFABS(s[x+1] - s[x+1+stride])
+ +FFABS(s[x+2] - s[x+2+stride]) + FFABS(s[x+3] - s[x+3+stride]);
}
s+= stride;
}
for(y=1; y<h; y++){
for(x=0; x<16; x++){
- score+= ABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
+ score+= FFABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
}
s1+= stride;
s2+= stride;
return score;
}
-WARPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
-WARPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
-WARPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
+static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
+ int size){
+ int score=0;
+ int i;
+ for(i=0; i<size; i++)
+ score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
+ return score;
+}
+
+WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
+WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
+WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
#ifdef CONFIG_GPL
-WARPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
+WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
#endif
-WARPER8_16_SQ(dct_max8x8_c, dct_max16_c)
-WARPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
-WARPER8_16_SQ(rd8x8_c, rd16_c)
-WARPER8_16_SQ(bit8x8_c, bit16_c)
+WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
+WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
+WRAPPER8_16_SQ(rd8x8_c, rd16_c)
+WRAPPER8_16_SQ(bit8x8_c, bit16_c)
static void vector_fmul_c(float *dst, const float *src, int len){
int i;
void ff_float_to_int16_c(int16_t *dst, const float *src, int len){
int i;
for(i=0; i<len; i++) {
- int_fast32_t tmp = ((int32_t*)src)[i];
+ int_fast32_t tmp = ((const int32_t*)src)[i];
if(tmp & 0xf0000){
tmp = (0x43c0ffff - tmp)>>31;
// is this faster on some gcc/cpu combinations?
}
}
+#define W0 2048
+#define W1 2841 /* 2048*sqrt (2)*cos (1*pi/16) */
+#define W2 2676 /* 2048*sqrt (2)*cos (2*pi/16) */
+#define W3 2408 /* 2048*sqrt (2)*cos (3*pi/16) */
+#define W4 2048 /* 2048*sqrt (2)*cos (4*pi/16) */
+#define W5 1609 /* 2048*sqrt (2)*cos (5*pi/16) */
+#define W6 1108 /* 2048*sqrt (2)*cos (6*pi/16) */
+#define W7 565 /* 2048*sqrt (2)*cos (7*pi/16) */
+
+static void wmv2_idct_row(short * b)
+{
+ int s1,s2;
+ int a0,a1,a2,a3,a4,a5,a6,a7;
+ /*step 1*/
+ a1 = W1*b[1]+W7*b[7];
+ a7 = W7*b[1]-W1*b[7];
+ a5 = W5*b[5]+W3*b[3];
+ a3 = W3*b[5]-W5*b[3];
+ a2 = W2*b[2]+W6*b[6];
+ a6 = W6*b[2]-W2*b[6];
+ a0 = W0*b[0]+W0*b[4];
+ a4 = W0*b[0]-W0*b[4];
+ /*step 2*/
+ s1 = (181*(a1-a5+a7-a3)+128)>>8;//1,3,5,7,
+ s2 = (181*(a1-a5-a7+a3)+128)>>8;
+ /*step 3*/
+ b[0] = (a0+a2+a1+a5 + (1<<7))>>8;
+ b[1] = (a4+a6 +s1 + (1<<7))>>8;
+ b[2] = (a4-a6 +s2 + (1<<7))>>8;
+ b[3] = (a0-a2+a7+a3 + (1<<7))>>8;
+ b[4] = (a0-a2-a7-a3 + (1<<7))>>8;
+ b[5] = (a4-a6 -s2 + (1<<7))>>8;
+ b[6] = (a4+a6 -s1 + (1<<7))>>8;
+ b[7] = (a0+a2-a1-a5 + (1<<7))>>8;
+}
+static void wmv2_idct_col(short * b)
+{
+ int s1,s2;
+ int a0,a1,a2,a3,a4,a5,a6,a7;
+ /*step 1, with extended precision*/
+ a1 = (W1*b[8*1]+W7*b[8*7] + 4)>>3;
+ a7 = (W7*b[8*1]-W1*b[8*7] + 4)>>3;
+ a5 = (W5*b[8*5]+W3*b[8*3] + 4)>>3;
+ a3 = (W3*b[8*5]-W5*b[8*3] + 4)>>3;
+ a2 = (W2*b[8*2]+W6*b[8*6] + 4)>>3;
+ a6 = (W6*b[8*2]-W2*b[8*6] + 4)>>3;
+ a0 = (W0*b[8*0]+W0*b[8*4] )>>3;
+ a4 = (W0*b[8*0]-W0*b[8*4] )>>3;
+ /*step 2*/
+ s1 = (181*(a1-a5+a7-a3)+128)>>8;
+ s2 = (181*(a1-a5-a7+a3)+128)>>8;
+ /*step 3*/
+ b[8*0] = (a0+a2+a1+a5 + (1<<13))>>14;
+ b[8*1] = (a4+a6 +s1 + (1<<13))>>14;
+ b[8*2] = (a4-a6 +s2 + (1<<13))>>14;
+ b[8*3] = (a0-a2+a7+a3 + (1<<13))>>14;
+
+ b[8*4] = (a0-a2-a7-a3 + (1<<13))>>14;
+ b[8*5] = (a4-a6 -s2 + (1<<13))>>14;
+ b[8*6] = (a4+a6 -s1 + (1<<13))>>14;
+ b[8*7] = (a0+a2-a1-a5 + (1<<13))>>14;
+}
+void ff_wmv2_idct_c(short * block){
+ int i;
+
+ for(i=0;i<64;i+=8){
+ wmv2_idct_row(block+i);
+ }
+ for(i=0;i<8;i++){
+ wmv2_idct_col(block+i);
+ }
+}
/* XXX: those functions should be suppressed ASAP when all IDCTs are
converted */
+static void ff_wmv2_idct_put_c(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_wmv2_idct_c(block);
+ put_pixels_clamped_c(block, dest, line_size);
+}
+static void ff_wmv2_idct_add_c(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_wmv2_idct_c(block);
+ add_pixels_clamped_c(block, dest, line_size);
+}
static void ff_jref_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
{
j_rev_dct (block);
static void ff_jref_idct1_put(uint8_t *dest, int line_size, DCTELEM *block)
{
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
dest[0] = cm[(block[0] + 4)>>3];
}
static void ff_jref_idct1_add(uint8_t *dest, int line_size, DCTELEM *block)
{
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
dest[0] = cm[dest[0] + ((block[0] + 4)>>3)];
}
-static void just_return() { return; }
+static void just_return(void *mem av_unused, int stride av_unused, int h av_unused) { return; }
/* init static data */
void dsputil_static_init(void)
{
int i;
- for(i=0;i<256;i++) cropTbl[i + MAX_NEG_CROP] = i;
+ for(i=0;i<256;i++) ff_cropTbl[i + MAX_NEG_CROP] = i;
for(i=0;i<MAX_NEG_CROP;i++) {
- cropTbl[i] = 0;
- cropTbl[i + MAX_NEG_CROP + 256] = 255;
+ ff_cropTbl[i] = 0;
+ ff_cropTbl[i + MAX_NEG_CROP + 256] = 255;
}
for(i=0;i<512;i++) {
- squareTbl[i] = (i - 256) * (i - 256);
+ ff_squareTbl[i] = (i - 256) * (i - 256);
}
for(i=0; i<64; i++) inv_zigzag_direct16[ff_zigzag_direct[i]]= i+1;
}
+int ff_check_alignment(void){
+ static int did_fail=0;
+ DECLARE_ALIGNED_16(int, aligned);
+
+ if((long)&aligned & 15){
+ if(!did_fail){
+#if defined(HAVE_MMX) || defined(HAVE_ALTIVEC)
+ av_log(NULL, AV_LOG_ERROR,
+ "Compiler did not align stack variables. Libavcodec has been miscompiled\n"
+ "and may be very slow or crash. This is not a bug in libavcodec,\n"
+ "but in the compiler. You may try recompiling using gcc >= 4.2.\n"
+ "Do not report crashes to FFmpeg developers.\n");
+#endif
+ did_fail=1;
+ }
+ return -1;
+ }
+ return 0;
+}
void dsputil_init(DSPContext* c, AVCodecContext *avctx)
{
int i;
+ ff_check_alignment();
+
#ifdef CONFIG_ENCODERS
if(avctx->dct_algo==FF_DCT_FASTINT) {
c->fdct = fdct_ifast;
#endif //CONFIG_ENCODERS
if(avctx->lowres==1){
- if(avctx->idct_algo==FF_IDCT_INT || avctx->idct_algo==FF_IDCT_AUTO){
+ if(avctx->idct_algo==FF_IDCT_INT || avctx->idct_algo==FF_IDCT_AUTO || !ENABLE_H264_DECODER){
c->idct_put= ff_jref_idct4_put;
c->idct_add= ff_jref_idct4_add;
}else{
c->idct_add= ff_jref_idct_add;
c->idct = j_rev_dct;
c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
- }else if(avctx->idct_algo==FF_IDCT_VP3){
+ }else if((ENABLE_VP3_DECODER || ENABLE_VP5_DECODER || ENABLE_VP6_DECODER || ENABLE_THEORA_DECODER ) &&
+ avctx->idct_algo==FF_IDCT_VP3){
c->idct_put= ff_vp3_idct_put_c;
c->idct_add= ff_vp3_idct_add_c;
c->idct = ff_vp3_idct_c;
c->idct_permutation_type= FF_NO_IDCT_PERM;
+ }else if(avctx->idct_algo==FF_IDCT_WMV2){
+ c->idct_put= ff_wmv2_idct_put_c;
+ c->idct_add= ff_wmv2_idct_add_c;
+ c->idct = ff_wmv2_idct_c;
+ c->idct_permutation_type= FF_NO_IDCT_PERM;
+ }else if(avctx->idct_algo==FF_IDCT_FAAN){
+ c->idct_put= ff_faanidct_put;
+ c->idct_add= ff_faanidct_add;
+ c->idct = ff_faanidct;
+ c->idct_permutation_type= FF_NO_IDCT_PERM;
}else{ //accurate/default
- c->idct_put= simple_idct_put;
- c->idct_add= simple_idct_add;
- c->idct = simple_idct;
+ c->idct_put= ff_simple_idct_put;
+ c->idct_add= ff_simple_idct_add;
+ c->idct = ff_simple_idct;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}
}
- c->h264_idct_add= ff_h264_idct_add_c;
- c->h264_idct8_add= ff_h264_idct8_add_c;
- c->h264_idct_dc_add= ff_h264_idct_dc_add_c;
- c->h264_idct8_dc_add= ff_h264_idct8_dc_add_c;
+ if (ENABLE_H264_DECODER) {
+ c->h264_idct_add= ff_h264_idct_add_c;
+ c->h264_idct8_add= ff_h264_idct8_add_c;
+ c->h264_idct_dc_add= ff_h264_idct_dc_add_c;
+ c->h264_idct8_dc_add= ff_h264_idct8_dc_add_c;
+ }
c->get_pixels = get_pixels_c;
c->diff_pixels = diff_pixels_c;
c->add_pixels_clamped = add_pixels_clamped_c;
c->add_pixels8 = add_pixels8_c;
c->add_pixels4 = add_pixels4_c;
+ c->sum_abs_dctelem = sum_abs_dctelem_c;
c->gmc1 = gmc1_c;
c->gmc = ff_gmc_c;
c->clear_blocks = clear_blocks_c;
c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_c;
c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_c;
c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_c;
+ c->put_no_rnd_h264_chroma_pixels_tab[0]= put_no_rnd_h264_chroma_mc8_c;
c->weight_h264_pixels_tab[0]= weight_h264_pixels16x16_c;
c->weight_h264_pixels_tab[1]= weight_h264_pixels16x8_c;
c->biweight_h264_pixels_tab[8]= biweight_h264_pixels2x4_c;
c->biweight_h264_pixels_tab[9]= biweight_h264_pixels2x2_c;
+ c->draw_edges = draw_edges_c;
+
#ifdef CONFIG_CAVS_DECODER
ff_cavsdsp_init(c,avctx);
#endif
#if defined(CONFIG_VC1_DECODER) || defined(CONFIG_WMV3_DECODER)
ff_vc1dsp_init(c,avctx);
#endif
+#if defined(CONFIG_WMV2_DECODER) || defined(CONFIG_VC1_DECODER) || defined(CONFIG_WMV3_DECODER)
+ ff_intrax8dsp_init(c,avctx);
+#endif
+#if defined(CONFIG_H264_ENCODER)
+ ff_h264dspenc_init(c,avctx);
+#endif
c->put_mspel_pixels_tab[0]= put_mspel8_mc00_c;
c->put_mspel_pixels_tab[1]= put_mspel8_mc10_c;
c->w97[1]= w97_8_c;
#endif
+ c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c;
+
c->add_bytes= add_bytes_c;
+ c->add_bytes_l2= add_bytes_l2_c;
c->diff_bytes= diff_bytes_c;
c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_c;
c->bswap_buf= bswap_buf;
+#ifdef CONFIG_PNG_DECODER
+ c->add_png_paeth_prediction= ff_add_png_paeth_prediction;
+#endif
c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_c;
c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_c;
c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_c;
c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_c;
c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_c;
+ c->h264_loop_filter_strength= NULL;
- c->h263_h_loop_filter= h263_h_loop_filter_c;
- c->h263_v_loop_filter= h263_v_loop_filter_c;
+ if (ENABLE_ANY_H263) {
+ c->h263_h_loop_filter= h263_h_loop_filter_c;
+ c->h263_v_loop_filter= h263_v_loop_filter_c;
+ }
c->h261_loop_filter= h261_loop_filter_c;
c->try_8x8basis= try_8x8basis_c;
c->add_8x8basis= add_8x8basis_c;
-#ifdef CONFIG_SNOW_ENCODER
+#ifdef CONFIG_SNOW_DECODER
c->vertical_compose97i = ff_snow_vertical_compose97i;
c->horizontal_compose97i = ff_snow_horizontal_compose97i;
c->inner_add_yblock = ff_snow_inner_add_yblock;
#ifdef CONFIG_VORBIS_DECODER
c->vorbis_inverse_coupling = vorbis_inverse_coupling;
+#endif
+#ifdef CONFIG_FLAC_ENCODER
+ c->flac_compute_autocorr = ff_flac_compute_autocorr;
#endif
c->vector_fmul = vector_fmul_c;
c->vector_fmul_reverse = vector_fmul_reverse_c;
c->prefetch= just_return;
-#ifdef HAVE_MMX
- dsputil_init_mmx(c, avctx);
-#endif
-#ifdef ARCH_ARMV4L
- dsputil_init_armv4l(c, avctx);
-#endif
-#ifdef HAVE_MLIB
- dsputil_init_mlib(c, avctx);
-#endif
-#ifdef ARCH_SPARC
- dsputil_init_vis(c,avctx);
-#endif
-#ifdef ARCH_ALPHA
- dsputil_init_alpha(c, avctx);
-#endif
-#ifdef ARCH_POWERPC
- dsputil_init_ppc(c, avctx);
-#endif
-#ifdef HAVE_MMI
- dsputil_init_mmi(c, avctx);
-#endif
-#ifdef ARCH_SH4
- dsputil_init_sh4(c,avctx);
-#endif
+ memset(c->put_2tap_qpel_pixels_tab, 0, sizeof(c->put_2tap_qpel_pixels_tab));
+ memset(c->avg_2tap_qpel_pixels_tab, 0, sizeof(c->avg_2tap_qpel_pixels_tab));
+
+ if (ENABLE_MMX) dsputil_init_mmx (c, avctx);
+ if (ENABLE_ARMV4L) dsputil_init_armv4l(c, avctx);
+ if (ENABLE_MLIB) dsputil_init_mlib (c, avctx);
+ if (ENABLE_VIS) dsputil_init_vis (c, avctx);
+ if (ENABLE_ALPHA) dsputil_init_alpha (c, avctx);
+ if (ENABLE_POWERPC) dsputil_init_ppc (c, avctx);
+ if (ENABLE_MMI) dsputil_init_mmi (c, avctx);
+ if (ENABLE_SH4) dsputil_init_sh4 (c, avctx);
+ if (ENABLE_BFIN) dsputil_init_bfin (c, avctx);
+
+ for(i=0; i<64; i++){
+ if(!c->put_2tap_qpel_pixels_tab[0][i])
+ c->put_2tap_qpel_pixels_tab[0][i]= c->put_h264_qpel_pixels_tab[0][i];
+ if(!c->avg_2tap_qpel_pixels_tab[0][i])
+ c->avg_2tap_qpel_pixels_tab[0][i]= c->avg_h264_qpel_pixels_tab[0][i];
+ }
switch(c->idct_permutation_type){
case FF_NO_IDCT_PERM:
for(i=0; i<64; i++)
c->idct_permutation[i]= (i&0x24) | ((i&3)<<3) | ((i>>3)&3);
break;
+ case FF_SSE2_IDCT_PERM:
+ for(i=0; i<64; i++)
+ c->idct_permutation[i]= (i&0x38) | idct_sse2_row_perm[i&7];
+ break;
default:
av_log(avctx, AV_LOG_ERROR, "Internal error, IDCT permutation not set\n");
}