#include "dsputil_mmx.h"
DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
-DECLARE_ALIGNED(8, static const uint64_t, ff_pb_7_3 ) = 0x0307030703070307ULL;
/***********************************/
/* IDCT */
/***********************************/
/* deblocking */
+#define h264_loop_filter_strength_iteration_mmx2(bS, nz, ref, mv, bidir, edges, step, mask_mv, dir, d_idx, mask_dir) \
+ do { \
+ x86_reg b_idx; \
+ mask_mv <<= 3; \
+ for( b_idx=0; b_idx<edges; b_idx+=step ) { \
+ if (!mask_dir) \
+ __asm__ volatile( \
+ "pxor %%mm0, %%mm0 \n\t" \
+ :: \
+ ); \
+ if(!(mask_mv & b_idx)) { \
+ if(bidir) { \
+ __asm__ volatile( \
+ "movd %a3(%0,%2), %%mm2 \n" \
+ "punpckldq %a4(%0,%2), %%mm2 \n" /* { ref0[bn], ref1[bn] } */ \
+ "pshufw $0x44, 12(%0,%2), %%mm0 \n" /* { ref0[b], ref0[b] } */ \
+ "pshufw $0x44, 52(%0,%2), %%mm1 \n" /* { ref1[b], ref1[b] } */ \
+ "pshufw $0x4E, %%mm2, %%mm3 \n" \
+ "psubb %%mm2, %%mm0 \n" /* { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] } */ \
+ "psubb %%mm3, %%mm1 \n" /* { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] } */ \
+ \
+ "por %%mm1, %%mm0 \n" \
+ "movq %a5(%1,%2,4), %%mm1 \n" \
+ "movq %a6(%1,%2,4), %%mm2 \n" \
+ "movq %%mm1, %%mm3 \n" \
+ "movq %%mm2, %%mm4 \n" \
+ "psubw 48(%1,%2,4), %%mm1 \n" \
+ "psubw 56(%1,%2,4), %%mm2 \n" \
+ "psubw 208(%1,%2,4), %%mm3 \n" \
+ "psubw 216(%1,%2,4), %%mm4 \n" \
+ "packsswb %%mm2, %%mm1 \n" \
+ "packsswb %%mm4, %%mm3 \n" \
+ "paddb %%mm6, %%mm1 \n" \
+ "paddb %%mm6, %%mm3 \n" \
+ "psubusb %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
+ "psubusb %%mm5, %%mm3 \n" \
+ "packsswb %%mm3, %%mm1 \n" \
+ \
+ "por %%mm1, %%mm0 \n" \
+ "movq %a7(%1,%2,4), %%mm1 \n" \
+ "movq %a8(%1,%2,4), %%mm2 \n" \
+ "movq %%mm1, %%mm3 \n" \
+ "movq %%mm2, %%mm4 \n" \
+ "psubw 48(%1,%2,4), %%mm1 \n" \
+ "psubw 56(%1,%2,4), %%mm2 \n" \
+ "psubw 208(%1,%2,4), %%mm3 \n" \
+ "psubw 216(%1,%2,4), %%mm4 \n" \
+ "packsswb %%mm2, %%mm1 \n" \
+ "packsswb %%mm4, %%mm3 \n" \
+ "paddb %%mm6, %%mm1 \n" \
+ "paddb %%mm6, %%mm3 \n" \
+ "psubusb %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
+ "psubusb %%mm5, %%mm3 \n" \
+ "packsswb %%mm3, %%mm1 \n" \
+ \
+ "pshufw $0x4E, %%mm1, %%mm1 \n" \
+ "por %%mm1, %%mm0 \n" \
+ "pshufw $0x4E, %%mm0, %%mm1 \n" \
+ "pminub %%mm1, %%mm0 \n" \
+ ::"r"(ref), \
+ "r"(mv), \
+ "r"(b_idx), \
+ "i"(d_idx+12), \
+ "i"(d_idx+52), \
+ "i"(d_idx*4+48), \
+ "i"(d_idx*4+56), \
+ "i"(d_idx*4+208), \
+ "i"(d_idx*4+216) \
+ ); \
+ } else { \
+ __asm__ volatile( \
+ "movd 12(%0,%2), %%mm0 \n" \
+ "psubb %a3(%0,%2), %%mm0 \n" /* ref[b] != ref[bn] */ \
+ "movq 48(%1,%2,4), %%mm1 \n" \
+ "movq 56(%1,%2,4), %%mm2 \n" \
+ "psubw %a4(%1,%2,4), %%mm1 \n" \
+ "psubw %a5(%1,%2,4), %%mm2 \n" \
+ "packsswb %%mm2, %%mm1 \n" \
+ "paddb %%mm6, %%mm1 \n" \
+ "psubusb %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
+ "packsswb %%mm1, %%mm1 \n" \
+ "por %%mm1, %%mm0 \n" \
+ ::"r"(ref), \
+ "r"(mv), \
+ "r"(b_idx), \
+ "i"(d_idx+12), \
+ "i"(d_idx*4+48), \
+ "i"(d_idx*4+56) \
+ ); \
+ } \
+ } \
+ __asm__ volatile( \
+ "movd 12(%0,%1), %%mm1 \n" \
+ "por %a2(%0,%1), %%mm1 \n" /* nnz[b] || nnz[bn] */ \
+ ::"r"(nnz), \
+ "r"(b_idx), \
+ "i"(d_idx+12) \
+ ); \
+ __asm__ volatile( \
+ "pminub %%mm7, %%mm1 \n" \
+ "pminub %%mm7, %%mm0 \n" \
+ "psllw $1, %%mm1 \n" \
+ "pxor %%mm2, %%mm2 \n" \
+ "pmaxub %%mm0, %%mm1 \n" \
+ "punpcklbw %%mm2, %%mm1 \n" \
+ "movq %%mm1, %a1(%0,%2) \n" \
+ ::"r"(bS), \
+ "i"(32*dir), \
+ "r"(b_idx) \
+ :"memory" \
+ ); \
+ } \
+ } while (0)
+
static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
- int dir;
__asm__ volatile(
"movq %0, %%mm7 \n"
"movq %1, %%mm6 \n"
// could do a special case for dir==0 && edges==1, but it only reduces the
// average filter time by 1.2%
- for( dir=1; dir>=0; dir-- ) {
- const x86_reg d_idx = dir ? -8 : -1;
- const int mask_mv = dir ? mask_mv1 : mask_mv0;
- DECLARE_ALIGNED(8, const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
- int b_idx, edge;
- for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
- __asm__ volatile(
- "pand %0, %%mm0 \n\t"
- ::"m"(mask_dir)
- );
- if(!(mask_mv & edge)) {
- if(bidir) {
- __asm__ volatile(
- "movd (%1,%0), %%mm2 \n"
- "punpckldq 40(%1,%0), %%mm2 \n" // { ref0[bn], ref1[bn] }
- "pshufw $0x44, (%1), %%mm0 \n" // { ref0[b], ref0[b] }
- "pshufw $0x44, 40(%1), %%mm1 \n" // { ref1[b], ref1[b] }
- "pshufw $0x4E, %%mm2, %%mm3 \n"
- "psubb %%mm2, %%mm0 \n" // { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] }
- "psubb %%mm3, %%mm1 \n" // { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] }
- "1: \n"
- "por %%mm1, %%mm0 \n"
- "movq (%2,%0,4), %%mm1 \n"
- "movq 8(%2,%0,4), %%mm2 \n"
- "movq %%mm1, %%mm3 \n"
- "movq %%mm2, %%mm4 \n"
- "psubw (%2), %%mm1 \n"
- "psubw 8(%2), %%mm2 \n"
- "psubw 160(%2), %%mm3 \n"
- "psubw 168(%2), %%mm4 \n"
- "packsswb %%mm2, %%mm1 \n"
- "packsswb %%mm4, %%mm3 \n"
- "paddb %%mm6, %%mm1 \n"
- "paddb %%mm6, %%mm3 \n"
- "psubusb %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit
- "psubusb %%mm5, %%mm3 \n"
- "packsswb %%mm3, %%mm1 \n"
- "add $40, %0 \n"
- "cmp $40, %0 \n"
- "jl 1b \n"
- "sub $80, %0 \n"
- "pshufw $0x4E, %%mm1, %%mm1 \n"
- "por %%mm1, %%mm0 \n"
- "pshufw $0x4E, %%mm0, %%mm1 \n"
- "pminub %%mm1, %%mm0 \n"
- ::"r"(d_idx),
- "r"(ref[0]+b_idx),
- "r"(mv[0]+b_idx)
- );
- } else {
- __asm__ volatile(
- "movd (%1), %%mm0 \n"
- "psubb (%1,%0), %%mm0 \n" // ref[b] != ref[bn]
- "movq (%2), %%mm1 \n"
- "movq 8(%2), %%mm2 \n"
- "psubw (%2,%0,4), %%mm1 \n"
- "psubw 8(%2,%0,4), %%mm2 \n"
- "packsswb %%mm2, %%mm1 \n"
- "paddb %%mm6, %%mm1 \n"
- "psubusb %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit
- "packsswb %%mm1, %%mm1 \n"
- "por %%mm1, %%mm0 \n"
- ::"r"(d_idx),
- "r"(ref[0]+b_idx),
- "r"(mv[0]+b_idx)
- );
- }
- }
- __asm__ volatile(
- "movd %0, %%mm1 \n"
- "por %1, %%mm1 \n" // nnz[b] || nnz[bn]
- ::"m"(nnz[b_idx]),
- "m"(nnz[b_idx+d_idx])
- );
- __asm__ volatile(
- "pminub %%mm7, %%mm1 \n"
- "pminub %%mm7, %%mm0 \n"
- "psllw $1, %%mm1 \n"
- "pxor %%mm2, %%mm2 \n"
- "pmaxub %%mm0, %%mm1 \n"
- "punpcklbw %%mm2, %%mm1 \n"
- "movq %%mm1, %0 \n"
- :"=m"(*bS[dir][edge])
- ::"memory"
- );
- }
- edges = 4;
- step = 1;
- }
+ step <<= 3;
+ edges <<= 3;
+ h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1, -8, 0);
+ h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, 32, 8, mask_mv0, 0, -1, -1);
+
__asm__ volatile(
"movq (%0), %%mm0 \n\t"
"movq 8(%0), %%mm1 \n\t"
c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_sse2;
c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_sse2;
+#if HAVE_ALIGNED_STACK
c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
+#endif
c->h264_idct_add16 = ff_h264_idct_add16_sse2;
c->h264_idct_add8 = ff_h264_idct_add8_sse2;