* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/cpu.h"
#include "libavcodec/dsputil.h"
#include "libavcodec/h264data.h"
+#include "libavcodec/h264dsp.h"
-#include "gcc_fixes.h"
-
-#include "dsputil_ppc.h"
#include "dsputil_altivec.h"
#include "util_altivec.h"
#include "types_altivec.h"
#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
#define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
+#define PREFIX_no_rnd_vc1_chroma_mc8_altivec put_no_rnd_vc1_chroma_mc8_altivec
#define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
#define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
#define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
#include "h264_template_altivec.c"
#undef OP_U8_ALTIVEC
#undef PREFIX_h264_chroma_mc8_altivec
+#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
#undef PREFIX_h264_chroma_mc8_num
#undef PREFIX_h264_qpel16_h_lowpass_altivec
#undef PREFIX_h264_qpel16_h_lowpass_num
#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
#define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
+#define PREFIX_no_rnd_vc1_chroma_mc8_altivec avg_no_rnd_vc1_chroma_mc8_altivec
#define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
#define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
#define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
#include "h264_template_altivec.c"
#undef OP_U8_ALTIVEC
#undef PREFIX_h264_chroma_mc8_altivec
+#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
#undef PREFIX_h264_chroma_mc8_num
#undef PREFIX_h264_qpel16_h_lowpass_altivec
#undef PREFIX_h264_qpel16_h_lowpass_num
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
- DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+ DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
}\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+ DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+ DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
}\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+ DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
+ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
+ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
+ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
+ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
+ DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
+ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
+ DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
+ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
+ DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
+ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
+ DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
+ DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
}\
-/* this code assume that stride % 16 == 0 */
-void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
- DECLARE_ALIGNED_16(signed int, ABCD[4]) =
- {((8 - x) * (8 - y)),
- ((x) * (8 - y)),
- ((8 - x) * (y)),
- ((x) * (y))};
- register int i;
- vec_u8 fperm;
- const vec_s32 vABCD = vec_ld(0, ABCD);
- const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
- const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
- const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
- const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
- LOAD_ZERO;
- const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
- const vec_u16 v6us = vec_splat_u16(6);
- register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
- register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
-
- vec_u8 vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
- vec_u8 vsrc0uc, vsrc1uc;
- vec_s16 vsrc0ssH, vsrc1ssH;
- vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
- vec_s16 vsrc2ssH, vsrc3ssH, psum;
- vec_u8 vdst, ppsum, fsum;
-
- if (((unsigned long)dst) % 16 == 0) {
- fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
- 0x14, 0x15, 0x16, 0x17,
- 0x08, 0x09, 0x0A, 0x0B,
- 0x0C, 0x0D, 0x0E, 0x0F};
- } else {
- fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
- 0x04, 0x05, 0x06, 0x07,
- 0x18, 0x19, 0x1A, 0x1B,
- 0x1C, 0x1D, 0x1E, 0x1F};
- }
-
- vsrcAuc = vec_ld(0, src);
-
- if (loadSecond)
- vsrcBuc = vec_ld(16, src);
- vsrcperm0 = vec_lvsl(0, src);
- vsrcperm1 = vec_lvsl(1, src);
-
- vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc1uc = vsrcBuc;
- else
- vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
-
- vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc);
- vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc);
-
- if (!loadSecond) {// -> !reallyBadAlign
- for (i = 0 ; i < h ; i++) {
-
-
- vsrcCuc = vec_ld(stride + 0, src);
-
- vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
- vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
- vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc2uc);
- vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc3uc);
-
- psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
- psum = vec_mladd(vB, vsrc1ssH, psum);
- psum = vec_mladd(vC, vsrc2ssH, psum);
- psum = vec_mladd(vD, vsrc3ssH, psum);
- psum = vec_add(v28ss, psum);
- psum = vec_sra(psum, v6us);
-
- vdst = vec_ld(0, dst);
- ppsum = (vec_u8)vec_packsu(psum, psum);
- fsum = vec_perm(vdst, ppsum, fperm);
-
- vec_st(fsum, 0, dst);
-
- vsrc0ssH = vsrc2ssH;
- vsrc1ssH = vsrc3ssH;
-
- dst += stride;
- src += stride;
- }
- } else {
- vec_u8 vsrcDuc;
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrcDuc = vec_ld(stride + 16, src);
-
- vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc3uc = vsrcDuc;
- else
- vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
- vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc2uc);
- vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc3uc);
-
- psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
- psum = vec_mladd(vB, vsrc1ssH, psum);
- psum = vec_mladd(vC, vsrc2ssH, psum);
- psum = vec_mladd(vD, vsrc3ssH, psum);
- psum = vec_add(v28ss, psum);
- psum = vec_sr(psum, v6us);
-
- vdst = vec_ld(0, dst);
- ppsum = (vec_u8)vec_pack(psum, psum);
- fsum = vec_perm(vdst, ppsum, fperm);
-
- vec_st(fsum, 0, dst);
-
- vsrc0ssH = vsrc2ssH;
- vsrc1ssH = vsrc3ssH;
-
- dst += stride;
- src += stride;
- }
- }
-}
-
static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
const uint8_t * src2, int dst_stride,
int src_stride1, int h)
vec_st( hv, 0, dest ); \
}
-void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
+static void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
vec_s16 s0, s1, s2, s3, s4, s5, s6, s7;
vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
}
-// TODO: implement this in AltiVec
-static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride) {
- int i, j;
- uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
- int dc = (block[0] + 32) >> 6;
- for( j = 0; j < 8; j++ )
- {
- for( i = 0; i < 8; i++ )
- dst[i] = cm[ dst[i] + dc ];
- dst += stride;
+static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, DCTELEM *block, int stride, int size)
+{
+ vec_s16 dc16;
+ vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
+ LOAD_ZERO;
+ DECLARE_ALIGNED(16, int, dc);
+ int i;
+
+ dc = (block[0] + 32) >> 6;
+ dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1);
+
+ if (size == 4)
+ dc16 = vec_sld(dc16, zero_s16v, 8);
+ dcplus = vec_packsu(dc16, zero_s16v);
+ dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v);
+
+ aligner = vec_lvsr(0, dst);
+ dcplus = vec_perm(dcplus, dcplus, aligner);
+ dcminus = vec_perm(dcminus, dcminus, aligner);
+
+ for (i = 0; i < size; i += 4) {
+ v0 = vec_ld(0, dst+0*stride);
+ v1 = vec_ld(0, dst+1*stride);
+ v2 = vec_ld(0, dst+2*stride);
+ v3 = vec_ld(0, dst+3*stride);
+
+ v0 = vec_adds(v0, dcplus);
+ v1 = vec_adds(v1, dcplus);
+ v2 = vec_adds(v2, dcplus);
+ v3 = vec_adds(v3, dcplus);
+
+ v0 = vec_subs(v0, dcminus);
+ v1 = vec_subs(v1, dcminus);
+ v2 = vec_subs(v2, dcminus);
+ v3 = vec_subs(v3, dcminus);
+
+ vec_st(v0, 0, dst+0*stride);
+ vec_st(v1, 0, dst+1*stride);
+ vec_st(v2, 0, dst+2*stride);
+ vec_st(v3, 0, dst+3*stride);
+
+ dst += 4*stride;
+ }
+}
+
+static void h264_idct_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
+{
+ h264_idct_dc_add_internal(dst, block, stride, 4);
+}
+
+static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
+{
+ h264_idct_dc_add_internal(dst, block, stride, 8);
+}
+
+static void ff_h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i++){
+ int nnz = nnzc[ scan8[i] ];
+ if(nnz){
+ if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
+ else ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
+ }
+ }
+}
+
+static void ff_h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=0; i<16; i++){
+ if(nnzc[ scan8[i] ]) ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
+ else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
}
}
}
}
+static void ff_h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
+ int i;
+ for(i=16; i<16+8; i++){
+ if(nnzc[ scan8[i] ])
+ ff_h264_idct_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
+ else if(block[i*16])
+ h264_idct_dc_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
+ }
+}
+
#define transpose4x16(r0, r1, r2, r3) { \
register vec_u8 r4; \
register vec_u8 r5; \
static inline void write16x4(uint8_t *dst, int dst_stride,
register vec_u8 r0, register vec_u8 r1,
register vec_u8 r2, register vec_u8 r3) {
- DECLARE_ALIGNED_16(unsigned char, result[64]);
+ DECLARE_ALIGNED(16, unsigned char, result)[64];
uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
int int_dst_stride = dst_stride/4;
}
#define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
- DECLARE_ALIGNED_16(unsigned char, temp[16]); \
+ DECLARE_ALIGNED(16, unsigned char, temp)[16]; \
register vec_u8 alphavec; \
register vec_u8 betavec; \
register vec_u8 mask; \
write16x4(pix-2, stride, line1, line2, line3, line4);
}
+static av_always_inline
+void weight_h264_WxH_altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset, int w, int h)
+{
+ int y, aligned;
+ vec_u8 vblock;
+ vec_s16 vtemp, vweight, voffset, v0, v1;
+ vec_u16 vlog2_denom;
+ DECLARE_ALIGNED(16, int32_t, temp)[4];
+ LOAD_ZERO;
+
+ offset <<= log2_denom;
+ if(log2_denom) offset += 1<<(log2_denom-1);
+ temp[0] = log2_denom;
+ temp[1] = weight;
+ temp[2] = offset;
+
+ vtemp = (vec_s16)vec_ld(0, temp);
+ vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
+ vweight = vec_splat(vtemp, 3);
+ voffset = vec_splat(vtemp, 5);
+ aligned = !((unsigned long)block & 0xf);
+
+ for (y=0; y<h; y++) {
+ vblock = vec_ld(0, block);
+
+ v0 = (vec_s16)vec_mergeh(zero_u8v, vblock);
+ v1 = (vec_s16)vec_mergel(zero_u8v, vblock);
+
+ if (w == 16 || aligned) {
+ v0 = vec_mladd(v0, vweight, zero_s16v);
+ v0 = vec_adds(v0, voffset);
+ v0 = vec_sra(v0, vlog2_denom);
+ }
+ if (w == 16 || !aligned) {
+ v1 = vec_mladd(v1, vweight, zero_s16v);
+ v1 = vec_adds(v1, voffset);
+ v1 = vec_sra(v1, vlog2_denom);
+ }
+ vblock = vec_packsu(v0, v1);
+ vec_st(vblock, 0, block);
+
+ block += stride;
+ }
+}
+
+static av_always_inline
+void biweight_h264_WxH_altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom,
+ int weightd, int weights, int offset, int w, int h)
+{
+ int y, dst_aligned, src_aligned;
+ vec_u8 vsrc, vdst;
+ vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3;
+ vec_u16 vlog2_denom;
+ DECLARE_ALIGNED(16, int32_t, temp)[4];
+ LOAD_ZERO;
+
+ offset = ((offset + 1) | 1) << log2_denom;
+ temp[0] = log2_denom+1;
+ temp[1] = weights;
+ temp[2] = weightd;
+ temp[3] = offset;
+
+ vtemp = (vec_s16)vec_ld(0, temp);
+ vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
+ vweights = vec_splat(vtemp, 3);
+ vweightd = vec_splat(vtemp, 5);
+ voffset = vec_splat(vtemp, 7);
+ dst_aligned = !((unsigned long)dst & 0xf);
+ src_aligned = !((unsigned long)src & 0xf);
+
+ for (y=0; y<h; y++) {
+ vdst = vec_ld(0, dst);
+ vsrc = vec_ld(0, src);
+
+ v0 = (vec_s16)vec_mergeh(zero_u8v, vdst);
+ v1 = (vec_s16)vec_mergel(zero_u8v, vdst);
+ v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc);
+ v3 = (vec_s16)vec_mergel(zero_u8v, vsrc);
+
+ if (w == 8) {
+ if (src_aligned)
+ v3 = v2;
+ else
+ v2 = v3;
+ }
+
+ if (w == 16 || dst_aligned) {
+ v0 = vec_mladd(v0, vweightd, zero_s16v);
+ v2 = vec_mladd(v2, vweights, zero_s16v);
+
+ v0 = vec_adds(v0, voffset);
+ v0 = vec_adds(v0, v2);
+ v0 = vec_sra(v0, vlog2_denom);
+ }
+ if (w == 16 || !dst_aligned) {
+ v1 = vec_mladd(v1, vweightd, zero_s16v);
+ v3 = vec_mladd(v3, vweights, zero_s16v);
+
+ v1 = vec_adds(v1, voffset);
+ v1 = vec_adds(v1, v3);
+ v1 = vec_sra(v1, vlog2_denom);
+ }
+ vdst = vec_packsu(v0, v1);
+ vec_st(vdst, 0, dst);
+
+ dst += stride;
+ src += stride;
+ }
+}
+
+#define H264_WEIGHT(W,H) \
+static void ff_weight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \
+ weight_h264_WxH_altivec(block, stride, log2_denom, weight, offset, W, H); \
+}\
+static void ff_biweight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
+ biweight_h264_WxH_altivec(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
+}
+
+H264_WEIGHT(16,16)
+H264_WEIGHT(16, 8)
+H264_WEIGHT( 8,16)
+H264_WEIGHT( 8, 8)
+H264_WEIGHT( 8, 4)
+
void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
- if (has_altivec()) {
+ if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
- c->put_no_rnd_h264_chroma_pixels_tab[0] = put_no_rnd_h264_chroma_mc8_altivec;
c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
-/* ff_h264_idct_add_altivec may be re-enabled once AltiVec versions of
- h264_idct_add16, h264_idct_add16intra, h264_idct_add8 are implemented
- c->h264_idct_add = ff_h264_idct_add_altivec;
-*/
- c->h264_idct8_add = ff_h264_idct8_add_altivec;
- c->h264_idct8_add4 = ff_h264_idct8_add4_altivec;
- c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
- c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
+ c->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_altivec;
+ c->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_altivec;
#define dspfunc(PFX, IDX, NUM) \
c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
#undef dspfunc
}
}
+
+void ff_h264dsp_init_ppc(H264DSPContext *c)
+{
+ if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
+ c->h264_idct_add = ff_h264_idct_add_altivec;
+ c->h264_idct_add8 = ff_h264_idct_add8_altivec;
+ c->h264_idct_add16 = ff_h264_idct_add16_altivec;
+ c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec;
+ c->h264_idct_dc_add= h264_idct_dc_add_altivec;
+ c->h264_idct8_dc_add = ff_h264_idct8_dc_add_altivec;
+ c->h264_idct8_add = ff_h264_idct8_add_altivec;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_altivec;
+ c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
+ c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
+
+ c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels16x16_altivec;
+ c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels16x8_altivec;
+ c->weight_h264_pixels_tab[2] = ff_weight_h264_pixels8x16_altivec;
+ c->weight_h264_pixels_tab[3] = ff_weight_h264_pixels8x8_altivec;
+ c->weight_h264_pixels_tab[4] = ff_weight_h264_pixels8x4_altivec;
+ c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels16x16_altivec;
+ c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels16x8_altivec;
+ c->biweight_h264_pixels_tab[2] = ff_biweight_h264_pixels8x16_altivec;
+ c->biweight_h264_pixels_tab[3] = ff_biweight_h264_pixels8x8_altivec;
+ c->biweight_h264_pixels_tab[4] = ff_biweight_h264_pixels8x4_altivec;
+ }
+}