]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/h264.c
refill cabac variables in 16bit steps, 3% faster get_cabac()
[ffmpeg] / libavcodec / h264.c
index 962d0b6325a5cf64f8f3c34715662a696c6e5626..6a308a6a8fa441e01aa05306b84f8998b9a4cf0e 100644 (file)
@@ -2,18 +2,20 @@
  * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  *
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
  *
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  *
  */
@@ -33,7 +35,7 @@
 
 #include "cabac.h"
 
-#undef NDEBUG
+//#undef NDEBUG
 #include <assert.h>
 
 #define interlaced_dct interlaced_dct_is_a_bad_name
 
 #define MAX_MMCO_COUNT 66
 
+/* Compiling in interlaced support reduces the speed
+ * of progressive decoding by about 2%. */
+#define ALLOW_INTERLACE
+
+#ifdef ALLOW_INTERLACE
+#define MB_MBAFF h->mb_mbaff
+#define MB_FIELD h->mb_field_decoding_flag
+#define FRAME_MBAFF h->mb_aff_frame
+#else
+#define MB_MBAFF 0
+#define MB_FIELD 0
+#define FRAME_MBAFF 0
+#undef  IS_INTERLACED
+#define IS_INTERLACED(mb_type) 0
+#endif
+
 /**
  * Sequence parameter set
  */
@@ -173,7 +191,8 @@ typedef struct H264Context{
 
     int chroma_qp; //QPc
 
-    int prev_mb_skipped; //FIXME remove (IMHO not used)
+    int prev_mb_skipped;
+    int next_mb_skipped;
 
     //prediction stuff
     int chroma_pred_mode;
@@ -231,6 +250,12 @@ typedef struct H264Context{
     int b_stride; //FIXME use s->b4_stride
     int b8_stride;
 
+    int mb_linesize;   ///< may be equal to s->linesize or s->linesize*2, for mbaff
+    int mb_uvlinesize;
+
+    int emu_edge_width;
+    int emu_edge_height;
+
     int halfpel_flag;
     int thirdpel_flag;
 
@@ -254,13 +279,14 @@ typedef struct H264Context{
 
     int slice_num;
     uint8_t *slice_table_base;
-    uint8_t *slice_table;      ///< slice_table_base + mb_stride + 1
+    uint8_t *slice_table;      ///< slice_table_base + 2*mb_stride + 1
     int slice_type;
     int slice_type_fixed;
 
     //interlacing specific flags
     int mb_aff_frame;
     int mb_field_decoding_flag;
+    int mb_mbaff;              ///< mb_aff_frame && mb_field_decoding_flag
 
     int sub_mb_type[4];
 
@@ -291,11 +317,11 @@ typedef struct H264Context{
     int use_weight_chroma;
     int luma_log2_weight_denom;
     int chroma_log2_weight_denom;
-    int luma_weight[2][16];
-    int luma_offset[2][16];
-    int chroma_weight[2][16][2];
-    int chroma_offset[2][16][2];
-    int implicit_weight[16][16];
+    int luma_weight[2][48];
+    int luma_offset[2][48];
+    int chroma_weight[2][48][2];
+    int chroma_offset[2][48][2];
+    int implicit_weight[48][48];
 
     //deblock
     int deblocking_filter;         ///< disable_deblocking_filter_idc with 1<->0
@@ -306,17 +332,18 @@ typedef struct H264Context{
 
     int direct_spatial_mv_pred;
     int dist_scale_factor[16];
+    int dist_scale_factor_field[32];
     int map_col_to_list0[2][16];
+    int map_col_to_list0_field[2][32];
 
     /**
      * num_ref_idx_l0/1_active_minus1 + 1
      */
-    int ref_count[2];// FIXME split for AFF
+    int ref_count[2];            ///< counts frames or fields, depending on current mb mode
     Picture *short_ref[32];
     Picture *long_ref[32];
     Picture default_ref_list[2][32];
-    Picture ref_list[2][32]; //FIXME size?
-    Picture field_ref_list[2][32]; //FIXME size?
+    Picture ref_list[2][48];     ///< 0..15: frame refs, 16..47: mbaff field refs
     Picture *delayed_pic[16]; //FIXME size?
     Picture *delayed_output_pic;
 
@@ -346,6 +373,7 @@ typedef struct H264Context{
 
     /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0,1,2), 0x0? luma_cbp */
     uint16_t     *cbp_table;
+    int cbp;
     int top_cbp;
     int left_cbp;
     /* chroma_pred_mode for i4x4 or i16x16, else 0 */
@@ -357,9 +385,17 @@ typedef struct H264Context{
     uint8_t     direct_cache[5*8];
 
     uint8_t zigzag_scan[16];
+    uint8_t zigzag_scan8x8[64];
+    uint8_t zigzag_scan8x8_cavlc[64];
     uint8_t field_scan[16];
+    uint8_t field_scan8x8[64];
+    uint8_t field_scan8x8_cavlc[64];
     const uint8_t *zigzag_scan_q0;
+    const uint8_t *zigzag_scan8x8_q0;
+    const uint8_t *zigzag_scan8x8_cavlc_q0;
     const uint8_t *field_scan_q0;
+    const uint8_t *field_scan8x8_q0;
+    const uint8_t *field_scan8x8_cavlc_q0;
 
     int x264_build;
 }H264Context;
@@ -376,6 +412,7 @@ static VLC run7_vlc;
 static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp);
 static void svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc);
 static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize);
+static void filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize);
 
 static always_inline uint32_t pack16to32(int a, int b){
 #ifdef WORDS_BIGENDIAN
@@ -394,60 +431,83 @@ static always_inline uint32_t pack16to32(int a, int b){
 static always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){
     uint8_t *p= (uint8_t*)vp;
     assert(size==1 || size==4);
+    assert(w<=4);
 
     w      *= size;
     stride *= size;
 
     assert((((long)vp)&(FFMIN(w, STRIDE_ALIGN)-1)) == 0);
     assert((stride&(w-1))==0);
-//FIXME check what gcc generates for 64 bit on x86 and possibly write a 32 bit ver of it
-    if(w==2 && h==2){
-        *(uint16_t*)(p + 0)=
-        *(uint16_t*)(p + stride)= size==4 ? val : val*0x0101;
-    }else if(w==2 && h==4){
-        *(uint16_t*)(p + 0*stride)=
-        *(uint16_t*)(p + 1*stride)=
+    if(w==2){
+        const uint16_t v= size==4 ? val : val*0x0101;
+        *(uint16_t*)(p + 0*stride)= v;
+        if(h==1) return;
+        *(uint16_t*)(p + 1*stride)= v;
+        if(h==2) return;
         *(uint16_t*)(p + 2*stride)=
-        *(uint16_t*)(p + 3*stride)= size==4 ? val : val*0x0101;
-    }else if(w==4 && h==1){
-        *(uint32_t*)(p + 0*stride)= size==4 ? val : val*0x01010101;
-    }else if(w==4 && h==2){
-        *(uint32_t*)(p + 0*stride)=
-        *(uint32_t*)(p + 1*stride)= size==4 ? val : val*0x01010101;
-    }else if(w==4 && h==4){
-        *(uint32_t*)(p + 0*stride)=
-        *(uint32_t*)(p + 1*stride)=
+        *(uint16_t*)(p + 3*stride)= v;
+    }else if(w==4){
+        const uint32_t v= size==4 ? val : val*0x01010101;
+        *(uint32_t*)(p + 0*stride)= v;
+        if(h==1) return;
+        *(uint32_t*)(p + 1*stride)= v;
+        if(h==2) return;
         *(uint32_t*)(p + 2*stride)=
-        *(uint32_t*)(p + 3*stride)= size==4 ? val : val*0x01010101;
-    }else if(w==8 && h==1){
-        *(uint32_t*)(p + 0)=
-        *(uint32_t*)(p + 4)= size==4 ? val : val*0x01010101;
-    }else if(w==8 && h==2){
-        *(uint32_t*)(p + 0 + 0*stride)=
-        *(uint32_t*)(p + 4 + 0*stride)=
-        *(uint32_t*)(p + 0 + 1*stride)=
-        *(uint32_t*)(p + 4 + 1*stride)=  size==4 ? val : val*0x01010101;
-    }else if(w==8 && h==4){
-        *(uint64_t*)(p + 0*stride)=
-        *(uint64_t*)(p + 1*stride)=
+        *(uint32_t*)(p + 3*stride)= v;
+    }else if(w==8){
+    //gcc can't optimize 64bit math on x86_32
+#if defined(ARCH_X86_64) || (defined(MP_WORDSIZE) && MP_WORDSIZE >= 64)
+        const uint64_t v= val*0x0100000001ULL;
+        *(uint64_t*)(p + 0*stride)= v;
+        if(h==1) return;
+        *(uint64_t*)(p + 1*stride)= v;
+        if(h==2) return;
         *(uint64_t*)(p + 2*stride)=
-        *(uint64_t*)(p + 3*stride)= size==4 ? val*0x0100000001ULL : val*0x0101010101010101ULL;
-    }else if(w==16 && h==2){
-        *(uint64_t*)(p + 0+0*stride)=
-        *(uint64_t*)(p + 8+0*stride)=
-        *(uint64_t*)(p + 0+1*stride)=
-        *(uint64_t*)(p + 8+1*stride)= size==4 ? val*0x0100000001ULL : val*0x0101010101010101ULL;
-    }else if(w==16 && h==4){
+        *(uint64_t*)(p + 3*stride)= v;
+    }else if(w==16){
+        const uint64_t v= val*0x0100000001ULL;
         *(uint64_t*)(p + 0+0*stride)=
         *(uint64_t*)(p + 8+0*stride)=
         *(uint64_t*)(p + 0+1*stride)=
-        *(uint64_t*)(p + 8+1*stride)=
+        *(uint64_t*)(p + 8+1*stride)= v;
+        if(h==2) return;
         *(uint64_t*)(p + 0+2*stride)=
         *(uint64_t*)(p + 8+2*stride)=
         *(uint64_t*)(p + 0+3*stride)=
-        *(uint64_t*)(p + 8+3*stride)= size==4 ? val*0x0100000001ULL : val*0x0101010101010101ULL;
+        *(uint64_t*)(p + 8+3*stride)= v;
+#else
+        *(uint32_t*)(p + 0+0*stride)=
+        *(uint32_t*)(p + 4+0*stride)= val;
+        if(h==1) return;
+        *(uint32_t*)(p + 0+1*stride)=
+        *(uint32_t*)(p + 4+1*stride)= val;
+        if(h==2) return;
+        *(uint32_t*)(p + 0+2*stride)=
+        *(uint32_t*)(p + 4+2*stride)=
+        *(uint32_t*)(p + 0+3*stride)=
+        *(uint32_t*)(p + 4+3*stride)= val;
+    }else if(w==16){
+        *(uint32_t*)(p + 0+0*stride)=
+        *(uint32_t*)(p + 4+0*stride)=
+        *(uint32_t*)(p + 8+0*stride)=
+        *(uint32_t*)(p +12+0*stride)=
+        *(uint32_t*)(p + 0+1*stride)=
+        *(uint32_t*)(p + 4+1*stride)=
+        *(uint32_t*)(p + 8+1*stride)=
+        *(uint32_t*)(p +12+1*stride)= val;
+        if(h==2) return;
+        *(uint32_t*)(p + 0+2*stride)=
+        *(uint32_t*)(p + 4+2*stride)=
+        *(uint32_t*)(p + 8+2*stride)=
+        *(uint32_t*)(p +12+2*stride)=
+        *(uint32_t*)(p + 0+3*stride)=
+        *(uint32_t*)(p + 4+3*stride)=
+        *(uint32_t*)(p + 8+3*stride)=
+        *(uint32_t*)(p +12+3*stride)= val;
+#endif
     }else
         assert(0);
+    assert(h==4);
 }
 
 static void fill_caches(H264Context *h, int mb_type, int for_deblock){
@@ -458,10 +518,8 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
     int left_block[8];
     int i;
 
-    //FIXME deblocking can skip fill_caches much of the time with multiple slices too.
-    // the actual condition is whether we're on the edge of a slice,
-    // and even then the intra and nnz parts are unnecessary.
-    if(for_deblock && h->slice_num == 1)
+    //FIXME deblocking could skip the intra and nnz parts.
+    if(for_deblock && (h->slice_num == 1 || h->slice_table[mb_xy] == h->slice_table[mb_xy-s->mb_stride]) && !FRAME_MBAFF)
         return;
 
     //wow what a mess, why didn't they simplify the interlacing&intra stuff, i can't imagine that these complex rules are worth it
@@ -478,7 +536,7 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
     left_block[5]= 10;
     left_block[6]= 8;
     left_block[7]= 11;
-    if(h->mb_aff_frame){
+    if(FRAME_MBAFF){
         const int pair_xy          = s->mb_x     + (s->mb_y & ~1)*s->mb_stride;
         const int top_pair_xy      = pair_xy     - s->mb_stride;
         const int topleft_pair_xy  = top_pair_xy - 1;
@@ -548,11 +606,39 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
     h->left_mb_xy[0] = left_xy[0];
     h->left_mb_xy[1] = left_xy[1];
     if(for_deblock){
-        topleft_type = h->slice_table[topleft_xy ] < 255 ? s->current_picture.mb_type[topleft_xy] : 0;
+        topleft_type = 0;
+        topright_type = 0;
         top_type     = h->slice_table[top_xy     ] < 255 ? s->current_picture.mb_type[top_xy]     : 0;
-        topright_type= h->slice_table[topright_xy] < 255 ? s->current_picture.mb_type[topright_xy]: 0;
         left_type[0] = h->slice_table[left_xy[0] ] < 255 ? s->current_picture.mb_type[left_xy[0]] : 0;
         left_type[1] = h->slice_table[left_xy[1] ] < 255 ? s->current_picture.mb_type[left_xy[1]] : 0;
+
+        if(FRAME_MBAFF && !IS_INTRA(mb_type)){
+            int list;
+            int v = *(uint16_t*)&h->non_zero_count[mb_xy][14];
+            for(i=0; i<16; i++)
+                h->non_zero_count_cache[scan8[i]] = (v>>i)&1;
+            for(list=0; list<1+(h->slice_type==B_TYPE); list++){
+                if(USES_LIST(mb_type,list)){
+                    uint32_t *src = (uint32_t*)s->current_picture.motion_val[list][h->mb2b_xy[mb_xy]];
+                    uint32_t *dst = (uint32_t*)h->mv_cache[list][scan8[0]];
+                    int8_t *ref = &s->current_picture.ref_index[list][h->mb2b8_xy[mb_xy]];
+                    for(i=0; i<4; i++, dst+=8, src+=h->b_stride){
+                        dst[0] = src[0];
+                        dst[1] = src[1];
+                        dst[2] = src[2];
+                        dst[3] = src[3];
+                    }
+                    *(uint32_t*)&h->ref_cache[list][scan8[ 0]] =
+                    *(uint32_t*)&h->ref_cache[list][scan8[ 2]] = pack16to32(ref[0],ref[1])*0x0101;
+                    ref += h->b8_stride;
+                    *(uint32_t*)&h->ref_cache[list][scan8[ 8]] =
+                    *(uint32_t*)&h->ref_cache[list][scan8[10]] = pack16to32(ref[0],ref[1])*0x0101;
+                }else{
+                    fill_rectangle(&h-> mv_cache[list][scan8[ 0]], 4, 4, 8, 0, 4);
+                    fill_rectangle(&h->ref_cache[list][scan8[ 0]], 4, 4, 8, (uint8_t)LIST_NOT_USED, 1);
+                }
+            }
+        }
     }else{
         topleft_type = h->slice_table[topleft_xy ] == h->slice_num ? s->current_picture.mb_type[topleft_xy] : 0;
         top_type     = h->slice_table[top_xy     ] == h->slice_num ? s->current_picture.mb_type[top_xy]     : 0;
@@ -697,7 +783,6 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
     }
 
 #if 1
-    //FIXME direct mb can skip much of this
     if(IS_INTER(mb_type) || IS_DIRECT(mb_type)){
         int list;
         for(list=0; list<1+(h->slice_type==B_TYPE); list++){
@@ -711,7 +796,7 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
             }
             h->mv_cache_clean[list]= 0;
 
-            if(IS_INTER(top_type)){
+            if(USES_LIST(top_type, list)){
                 const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
                 const int b8_xy= h->mb2b8_xy[top_xy] + h->b8_stride;
                 *(uint32_t*)h->mv_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 0];
@@ -731,13 +816,13 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
             }
 
             //FIXME unify cleanup or sth
-            if(IS_INTER(left_type[0])){
+            if(USES_LIST(left_type[0], list)){
                 const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
                 const int b8_xy= h->mb2b8_xy[left_xy[0]] + 1;
                 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0]];
                 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1]];
-                h->ref_cache[list][scan8[0] - 1 + 0*8]=
-                h->ref_cache[list][scan8[0] - 1 + 1*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0]>>1)];
+                h->ref_cache[list][scan8[0] - 1 + 0*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0]>>1)];
+                h->ref_cache[list][scan8[0] - 1 + 1*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[1]>>1)];
             }else{
                 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 0*8]=
                 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 1*8]= 0;
@@ -745,13 +830,13 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
                 h->ref_cache[list][scan8[0] - 1 + 1*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
             }
 
-            if(IS_INTER(left_type[1])){
+            if(USES_LIST(left_type[1], list)){
                 const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
                 const int b8_xy= h->mb2b8_xy[left_xy[1]] + 1;
                 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[2]];
                 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[3]];
-                h->ref_cache[list][scan8[0] - 1 + 2*8]=
-                h->ref_cache[list][scan8[0] - 1 + 3*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[2]>>1)];
+                h->ref_cache[list][scan8[0] - 1 + 2*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[2]>>1)];
+                h->ref_cache[list][scan8[0] - 1 + 3*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[3]>>1)];
             }else{
                 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 2*8]=
                 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 3*8]= 0;
@@ -760,10 +845,10 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
                 assert((!left_type[0]) == (!left_type[1]));
             }
 
-            if(for_deblock || (IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred))
+            if((for_deblock || (IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred)) && !FRAME_MBAFF)
                 continue;
 
-            if(IS_INTER(topleft_type)){
+            if(USES_LIST(topleft_type, list)){
                 const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride;
                 const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + h->b8_stride;
                 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
@@ -773,7 +858,7 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
                 h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
             }
 
-            if(IS_INTER(topright_type)){
+            if(USES_LIST(topright_type, list)){
                 const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride;
                 const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride;
                 *(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
@@ -783,6 +868,8 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
                 h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
             }
 
+            if((IS_SKIP(mb_type) || IS_DIRECT(mb_type)) && !FRAME_MBAFF)
+                continue;
 
             h->ref_cache[list][scan8[5 ]+1] =
             h->ref_cache[list][scan8[7 ]+1] =
@@ -797,14 +884,7 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
 
             if( h->pps.cabac ) {
                 /* XXX beurk, Load mvd */
-                if(IS_INTER(topleft_type)){
-                    const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride;
-                    *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy];
-                }else{
-                    *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 - 1*8]= 0;
-                }
-
-                if(IS_INTER(top_type)){
+                if(USES_LIST(top_type, list)){
                     const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
                     *(uint32_t*)h->mvd_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 0];
                     *(uint32_t*)h->mvd_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 1];
@@ -816,7 +896,7 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
                     *(uint32_t*)h->mvd_cache [list][scan8[0] + 2 - 1*8]=
                     *(uint32_t*)h->mvd_cache [list][scan8[0] + 3 - 1*8]= 0;
                 }
-                if(IS_INTER(left_type[0])){
+                if(USES_LIST(left_type[0], list)){
                     const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
                     *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[0]];
                     *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[1]];
@@ -824,7 +904,7 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
                     *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 0*8]=
                     *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 1*8]= 0;
                 }
-                if(IS_INTER(left_type[1])){
+                if(USES_LIST(left_type[1], list)){
                     const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
                     *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[2]];
                     *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[3]];
@@ -851,18 +931,52 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
                         *(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0;
                     }
 
-                    //FIXME interlacing
-                    if(IS_DIRECT(left_type[0])){
-                        h->direct_cache[scan8[0] - 1 + 0*8]=
+                    if(IS_DIRECT(left_type[0]))
+                        h->direct_cache[scan8[0] - 1 + 0*8]= 1;
+                    else if(IS_8X8(left_type[0]))
+                        h->direct_cache[scan8[0] - 1 + 0*8]= h->direct_table[h->mb2b8_xy[left_xy[0]] + 1 + h->b8_stride*(left_block[0]>>1)];
+                    else
+                        h->direct_cache[scan8[0] - 1 + 0*8]= 0;
+
+                    if(IS_DIRECT(left_type[1]))
                         h->direct_cache[scan8[0] - 1 + 2*8]= 1;
-                    }else if(IS_8X8(left_type[0])){
-                        int b8_xy = h->mb2b8_xy[left_xy[0]] + 1;
-                        h->direct_cache[scan8[0] - 1 + 0*8]= h->direct_table[b8_xy];
-                        h->direct_cache[scan8[0] - 1 + 2*8]= h->direct_table[b8_xy + h->b8_stride];
-                    }else{
-                        h->direct_cache[scan8[0] - 1 + 0*8]=
+                    else if(IS_8X8(left_type[1]))
+                        h->direct_cache[scan8[0] - 1 + 2*8]= h->direct_table[h->mb2b8_xy[left_xy[1]] + 1 + h->b8_stride*(left_block[2]>>1)];
+                    else
                         h->direct_cache[scan8[0] - 1 + 2*8]= 0;
+                }
+            }
+
+            if(FRAME_MBAFF){
+#define MAP_MVS\
+                    MAP_F2F(scan8[0] - 1 - 1*8, topleft_type)\
+                    MAP_F2F(scan8[0] + 0 - 1*8, top_type)\
+                    MAP_F2F(scan8[0] + 1 - 1*8, top_type)\
+                    MAP_F2F(scan8[0] + 2 - 1*8, top_type)\
+                    MAP_F2F(scan8[0] + 3 - 1*8, top_type)\
+                    MAP_F2F(scan8[0] + 4 - 1*8, topright_type)\
+                    MAP_F2F(scan8[0] - 1 + 0*8, left_type[0])\
+                    MAP_F2F(scan8[0] - 1 + 1*8, left_type[0])\
+                    MAP_F2F(scan8[0] - 1 + 2*8, left_type[1])\
+                    MAP_F2F(scan8[0] - 1 + 3*8, left_type[1])
+                if(MB_FIELD){
+#define MAP_F2F(idx, mb_type)\
+                    if(!IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
+                        h->ref_cache[list][idx] <<= 1;\
+                        h->mv_cache[list][idx][1] /= 2;\
+                        h->mvd_cache[list][idx][1] /= 2;\
                     }
+                    MAP_MVS
+#undef MAP_F2F
+                }else{
+#define MAP_F2F(idx, mb_type)\
+                    if(IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
+                        h->ref_cache[list][idx] >>= 1;\
+                        h->mv_cache[list][idx][1] <<= 1;\
+                        h->mvd_cache[list][idx][1] <<= 1;\
+                    }
+                    MAP_MVS
+#undef MAP_F2F
                 }
             }
         }
@@ -987,6 +1101,14 @@ static inline void write_back_non_zero_count(H264Context *h){
     h->non_zero_count[mb_xy][12]=h->non_zero_count_cache[1+8*5];
     h->non_zero_count[mb_xy][11]=h->non_zero_count_cache[2+8*5];
     h->non_zero_count[mb_xy][10]=h->non_zero_count_cache[2+8*4];
+
+    if(FRAME_MBAFF){
+        // store all luma nnzs, for deblocking
+        int v = 0, i;
+        for(i=0; i<16; i++)
+            v += (!!h->non_zero_count_cache[scan8[i]]) << i;
+        *(uint16_t*)&h->non_zero_count[mb_xy][14] = v;
+    }
 }
 
 /**
@@ -1009,6 +1131,49 @@ static inline int pred_non_zero_count(H264Context *h, int n){
 static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, int list, int part_width){
     const int topright_ref= h->ref_cache[list][ i - 8 + part_width ];
 
+    /* there is no consistent mapping of mvs to neighboring locations that will
+     * make mbaff happy, so we can't move all this logic to fill_caches */
+    if(FRAME_MBAFF){
+        MpegEncContext *s = &h->s;
+        const uint32_t *mb_types = s->current_picture_ptr->mb_type;
+        const int16_t *mv;
+        *(uint32_t*)h->mv_cache[list][scan8[0]-2] = 0;
+        *C = h->mv_cache[list][scan8[0]-2];
+
+        if(!MB_FIELD
+           && (s->mb_y&1) && i < scan8[0]+8 && topright_ref != PART_NOT_AVAILABLE){
+            int topright_xy = s->mb_x + (s->mb_y-1)*s->mb_stride + (i == scan8[0]+3);
+            if(IS_INTERLACED(mb_types[topright_xy])){
+#define SET_DIAG_MV(MV_OP, REF_OP, X4, Y4)\
+                const int x4 = X4, y4 = Y4;\
+                const int mb_type = mb_types[(x4>>2)+(y4>>2)*s->mb_stride];\
+                if(!USES_LIST(mb_type,list) && !IS_8X8(mb_type))\
+                    return LIST_NOT_USED;\
+                mv = s->current_picture_ptr->motion_val[list][x4 + y4*h->b_stride];\
+                h->mv_cache[list][scan8[0]-2][0] = mv[0];\
+                h->mv_cache[list][scan8[0]-2][1] = mv[1] MV_OP;\
+                return s->current_picture_ptr->ref_index[list][(x4>>1) + (y4>>1)*h->b8_stride] REF_OP;
+
+                SET_DIAG_MV(*2, >>1, s->mb_x*4+(i&7)-4+part_width, s->mb_y*4-1);
+            }
+        }
+        if(topright_ref == PART_NOT_AVAILABLE
+           && ((s->mb_y&1) || i >= scan8[0]+8) && (i&7)==4
+           && h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){
+            if(!MB_FIELD
+               && IS_INTERLACED(mb_types[h->left_mb_xy[0]])){
+                SET_DIAG_MV(*2, >>1, s->mb_x*4-1, (s->mb_y|1)*4+(s->mb_y&1)*2+(i>>4)-1);
+            }
+            if(MB_FIELD
+               && !IS_INTERLACED(mb_types[h->left_mb_xy[0]])
+               && i >= scan8[0]+8){
+                // leftshift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's ok.
+                SET_DIAG_MV(>>1, <<1, s->mb_x*4-1, (s->mb_y&~1)*4 - 1 + ((i-scan8[0])>>3)*2);
+            }
+        }
+#undef SET_DIAG_MV
+    }
+
     if(topright_ref != PART_NOT_AVAILABLE){
         *C= h->mv_cache[list][ i - 8 + part_width ];
         return topright_ref;
@@ -1182,6 +1347,12 @@ static inline void direct_dist_scale_factor(H264Context * const h){
             h->dist_scale_factor[i] = clip((tb*tx + 32) >> 6, -1024, 1023);
         }
     }
+    if(FRAME_MBAFF){
+        for(i=0; i<h->ref_count[0]; i++){
+            h->dist_scale_factor_field[2*i] =
+            h->dist_scale_factor_field[2*i+1] = h->dist_scale_factor[i];
+        }
+    }
 }
 static inline void direct_ref_list_init(H264Context * const h){
     MpegEncContext * const s = &h->s;
@@ -1210,6 +1381,15 @@ static inline void direct_ref_list_init(H264Context * const h){
                 }
         }
     }
+    if(FRAME_MBAFF){
+        for(list=0; list<2; list++){
+            for(i=0; i<ref1->ref_count[list]; i++){
+                j = h->map_col_to_list0[list][i];
+                h->map_col_to_list0_field[list][2*i] = 2*j;
+                h->map_col_to_list0_field[list][2*i+1] = 2*j+1;
+            }
+        }
+    }
 }
 
 static inline void pred_direct_motion(H264Context * const h, int *mb_type){
@@ -1226,12 +1406,13 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){
     int sub_mb_type;
     int i8, i4;
 
+#define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16|MB_TYPE_INTRA4x4|MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)
     if(IS_8X8(mb_type_col) && !h->sps.direct_8x8_inference_flag){
         /* FIXME save sub mb types from previous frames (or derive from MVs)
          * so we know exactly what block size to use */
         sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */
         *mb_type =    MB_TYPE_8x8|MB_TYPE_L0L1;
-    }else if(!is_b8x8 && (IS_16X16(mb_type_col) || IS_INTRA(mb_type_col))){
+    }else if(!is_b8x8 && (mb_type_col & MB_TYPE_16x16_OR_INTRA)){
         sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
         *mb_type =    MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */
     }else{
@@ -1240,6 +1421,8 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){
     }
     if(!is_b8x8)
         *mb_type |= MB_TYPE_DIRECT2;
+    if(MB_FIELD)
+        *mb_type |= MB_TYPE_INTERLACED;
 
     tprintf("mb_type = %08x, sub_mb_type = %08x, is_b8x8 = %d, mb_type_col = %08x\n", *mb_type, sub_mb_type, is_b8x8, mb_type_col);
 
@@ -1248,6 +1431,8 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){
         int mv[2][2];
         int list;
 
+        /* FIXME interlacing + spatial direct uses wrong colocated block positions */
+
         /* ref = min(neighbors) */
         for(list=0; list<2; list++){
             int refa = h->ref_cache[list][scan8[0] - 1];
@@ -1345,6 +1530,107 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){
             }
         }
     }else{ /* direct temporal mv pred */
+        const int *map_col_to_list0[2] = {h->map_col_to_list0[0], h->map_col_to_list0[1]};
+        const int *dist_scale_factor = h->dist_scale_factor;
+
+        if(FRAME_MBAFF){
+            if(IS_INTERLACED(*mb_type)){
+                map_col_to_list0[0] = h->map_col_to_list0_field[0];
+                map_col_to_list0[1] = h->map_col_to_list0_field[1];
+                dist_scale_factor = h->dist_scale_factor_field;
+            }
+            if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col)){
+                /* FIXME assumes direct_8x8_inference == 1 */
+                const int pair_xy = s->mb_x + (s->mb_y&~1)*s->mb_stride;
+                int mb_types_col[2];
+                int y_shift;
+
+                *mb_type = MB_TYPE_8x8|MB_TYPE_L0L1
+                         | (is_b8x8 ? 0 : MB_TYPE_DIRECT2)
+                         | (*mb_type & MB_TYPE_INTERLACED);
+                sub_mb_type = MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2|MB_TYPE_16x16;
+
+                if(IS_INTERLACED(*mb_type)){
+                    /* frame to field scaling */
+                    mb_types_col[0] = h->ref_list[1][0].mb_type[pair_xy];
+                    mb_types_col[1] = h->ref_list[1][0].mb_type[pair_xy+s->mb_stride];
+                    if(s->mb_y&1){
+                        l1ref0 -= 2*h->b8_stride;
+                        l1ref1 -= 2*h->b8_stride;
+                        l1mv0 -= 4*h->b_stride;
+                        l1mv1 -= 4*h->b_stride;
+                    }
+                    y_shift = 0;
+
+                    if(   (mb_types_col[0] & MB_TYPE_16x16_OR_INTRA)
+                       && (mb_types_col[1] & MB_TYPE_16x16_OR_INTRA)
+                       && !is_b8x8)
+                        *mb_type |= MB_TYPE_16x8;
+                    else
+                        *mb_type |= MB_TYPE_8x8;
+                }else{
+                    /* field to frame scaling */
+                    /* col_mb_y = (mb_y&~1) + (topAbsDiffPOC < bottomAbsDiffPOC ? 0 : 1)
+                     * but in MBAFF, top and bottom POC are equal */
+                    int dy = (s->mb_y&1) ? 1 : 2;
+                    mb_types_col[0] =
+                    mb_types_col[1] = h->ref_list[1][0].mb_type[pair_xy+s->mb_stride];
+                    l1ref0 += dy*h->b8_stride;
+                    l1ref1 += dy*h->b8_stride;
+                    l1mv0 += 2*dy*h->b_stride;
+                    l1mv1 += 2*dy*h->b_stride;
+                    y_shift = 2;
+
+                    if((mb_types_col[0] & (MB_TYPE_16x16_OR_INTRA|MB_TYPE_16x8))
+                       && !is_b8x8)
+                        *mb_type |= MB_TYPE_16x16;
+                    else
+                        *mb_type |= MB_TYPE_8x8;
+                }
+
+                for(i8=0; i8<4; i8++){
+                    const int x8 = i8&1;
+                    const int y8 = i8>>1;
+                    int ref0, scale;
+                    const int16_t (*l1mv)[2]= l1mv0;
+
+                    if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
+                        continue;
+                    h->sub_mb_type[i8] = sub_mb_type;
+
+                    fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
+                    if(IS_INTRA(mb_types_col[y8])){
+                        fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
+                        fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
+                        fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
+                        continue;
+                    }
+
+                    ref0 = l1ref0[x8 + (y8*2>>y_shift)*h->b8_stride];
+                    if(ref0 >= 0)
+                        ref0 = map_col_to_list0[0][ref0*2>>y_shift];
+                    else{
+                        ref0 = map_col_to_list0[1][l1ref1[x8 + (y8*2>>y_shift)*h->b8_stride]*2>>y_shift];
+                        l1mv= l1mv1;
+                    }
+                    scale = dist_scale_factor[ref0];
+                    fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
+
+                    {
+                        const int16_t *mv_col = l1mv[x8*3 + (y8*6>>y_shift)*h->b_stride];
+                        int my_col = (mv_col[1]<<y_shift)/2;
+                        int mx = (scale * mv_col[0] + 128) >> 8;
+                        int my = (scale * my_col + 128) >> 8;
+                        fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
+                        fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-my_col), 4);
+                    }
+                }
+                return;
+            }
+        }
+
+        /* one-to-one mv scaling */
+
         if(IS_16X16(*mb_type)){
             fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
             if(IS_INTRA(mb_type_col)){
@@ -1352,13 +1638,13 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){
                 fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
                 fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
             }else{
-                const int ref0 = l1ref0[0] >= 0 ? h->map_col_to_list0[0][l1ref0[0]]
-                                                : h->map_col_to_list0[1][l1ref1[0]];
-                const int dist_scale_factor = h->dist_scale_factor[ref0];
+                const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0]]
+                                                : map_col_to_list0[1][l1ref1[0]];
+                const int scale = dist_scale_factor[ref0];
                 const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
                 int mv_l0[2];
-                mv_l0[0] = (dist_scale_factor * mv_col[0] + 128) >> 8;
-                mv_l0[1] = (dist_scale_factor * mv_col[1] + 128) >> 8;
+                mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
+                mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
                 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref0, 1);
                 fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0],mv_l0[1]), 4);
                 fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]), 4);
@@ -1367,15 +1653,15 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){
             for(i8=0; i8<4; i8++){
                 const int x8 = i8&1;
                 const int y8 = i8>>1;
-                int ref0, dist_scale_factor;
+                int ref0, scale;
                 const int16_t (*l1mv)[2]= l1mv0;
 
                 if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
                     continue;
                 h->sub_mb_type[i8] = sub_mb_type;
+                fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
                 if(IS_INTRA(mb_type_col)){
                     fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
-                    fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
                     fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
                     fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
                     continue;
@@ -1383,27 +1669,26 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){
 
                 ref0 = l1ref0[x8 + y8*h->b8_stride];
                 if(ref0 >= 0)
-                    ref0 = h->map_col_to_list0[0][ref0];
+                    ref0 = map_col_to_list0[0][ref0];
                 else{
-                    ref0 = h->map_col_to_list0[1][l1ref1[x8 + y8*h->b8_stride]];
+                    ref0 = map_col_to_list0[1][l1ref1[x8 + y8*h->b8_stride]];
                     l1mv= l1mv1;
                 }
-                dist_scale_factor = h->dist_scale_factor[ref0];
+                scale = dist_scale_factor[ref0];
 
                 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
-                fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
                 if(IS_SUB_8X8(sub_mb_type)){
                     const int16_t *mv_col = l1mv[x8*3 + y8*3*h->b_stride];
-                    int mx = (dist_scale_factor * mv_col[0] + 128) >> 8;
-                    int my = (dist_scale_factor * mv_col[1] + 128) >> 8;
+                    int mx = (scale * mv_col[0] + 128) >> 8;
+                    int my = (scale * mv_col[1] + 128) >> 8;
                     fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
                     fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-mv_col[1]), 4);
                 }else
                 for(i4=0; i4<4; i4++){
                     const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride];
                     int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]];
-                    mv_l0[0] = (dist_scale_factor * mv_col[0] + 128) >> 8;
-                    mv_l0[1] = (dist_scale_factor * mv_col[1] + 128) >> 8;
+                    mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
+                    mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
                     *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] =
                         pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
                 }
@@ -1418,50 +1703,43 @@ static inline void write_back_motion(H264Context *h, int mb_type){
     const int b8_xy= 2*s->mb_x + 2*s->mb_y*h->b8_stride;
     int list;
 
+    if(!USES_LIST(mb_type, 0))
+        fill_rectangle(&s->current_picture.ref_index[0][b8_xy], 2, 2, h->b8_stride, (uint8_t)LIST_NOT_USED, 1);
+
     for(list=0; list<2; list++){
         int y;
-        if(!USES_LIST(mb_type, list)){
-            if(1){ //FIXME skip or never read if mb_type doesn't use it
-                for(y=0; y<4; y++){
-                    *(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]=
-                    *(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= 0;
-                }
-                if( h->pps.cabac ) {
-                    /* FIXME needed ? */
-                    for(y=0; y<4; y++){
-                        *(uint64_t*)h->mvd_table[list][b_xy + 0 + y*h->b_stride]=
-                        *(uint64_t*)h->mvd_table[list][b_xy + 2 + y*h->b_stride]= 0;
-                    }
-                }
-                for(y=0; y<2; y++){
-                    s->current_picture.ref_index[list][b8_xy + 0 + y*h->b8_stride]=
-                    s->current_picture.ref_index[list][b8_xy + 1 + y*h->b8_stride]= LIST_NOT_USED;
-                }
-            }
+        if(!USES_LIST(mb_type, list))
             continue;
-        }
 
         for(y=0; y<4; y++){
             *(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+0 + 8*y];
             *(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+2 + 8*y];
         }
         if( h->pps.cabac ) {
+            if(IS_SKIP(mb_type))
+                fill_rectangle(h->mvd_table[list][b_xy], 4, 4, h->b_stride, 0, 4);
+            else
             for(y=0; y<4; y++){
                 *(uint64_t*)h->mvd_table[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+0 + 8*y];
                 *(uint64_t*)h->mvd_table[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+2 + 8*y];
             }
         }
-        for(y=0; y<2; y++){
-            s->current_picture.ref_index[list][b8_xy + 0 + y*h->b8_stride]= h->ref_cache[list][scan8[0]+0 + 16*y];
-            s->current_picture.ref_index[list][b8_xy + 1 + y*h->b8_stride]= h->ref_cache[list][scan8[0]+2 + 16*y];
+
+        {
+            int8_t *ref_index = &s->current_picture.ref_index[list][b8_xy];
+            ref_index[0+0*h->b8_stride]= h->ref_cache[list][scan8[0]];
+            ref_index[1+0*h->b8_stride]= h->ref_cache[list][scan8[4]];
+            ref_index[0+1*h->b8_stride]= h->ref_cache[list][scan8[8]];
+            ref_index[1+1*h->b8_stride]= h->ref_cache[list][scan8[12]];
         }
     }
 
     if(h->slice_type == B_TYPE && h->pps.cabac){
         if(IS_8X8(mb_type)){
-            h->direct_table[b8_xy+1+0*h->b8_stride] = IS_DIRECT(h->sub_mb_type[1]) ? 1 : 0;
-            h->direct_table[b8_xy+0+1*h->b8_stride] = IS_DIRECT(h->sub_mb_type[2]) ? 1 : 0;
-            h->direct_table[b8_xy+1+1*h->b8_stride] = IS_DIRECT(h->sub_mb_type[3]) ? 1 : 0;
+            uint8_t *direct_table = &h->direct_table[b8_xy];
+            direct_table[1+0*h->b8_stride] = IS_DIRECT(h->sub_mb_type[1]) ? 1 : 0;
+            direct_table[0+1*h->b8_stride] = IS_DIRECT(h->sub_mb_type[2]) ? 1 : 0;
+            direct_table[1+1*h->b8_stride] = IS_DIRECT(h->sub_mb_type[3]) ? 1 : 0;
         }
     }
 }
@@ -2594,18 +2872,17 @@ static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square,
                            qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op){
     MpegEncContext * const s = &h->s;
     const int mx= h->mv_cache[list][ scan8[n] ][0] + src_x_offset*8;
-    const int my= h->mv_cache[list][ scan8[n] ][1] + src_y_offset*8;
+    int my=       h->mv_cache[list][ scan8[n] ][1] + src_y_offset*8;
     const int luma_xy= (mx&3) + ((my&3)<<2);
-    uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*s->linesize;
-    uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*s->uvlinesize;
-    uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*s->uvlinesize;
-    int extra_width= (s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16; //FIXME increase edge?, IMHO not worth it
-    int extra_height= extra_width;
+    uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->mb_linesize;
+    uint8_t * src_cb, * src_cr;
+    int extra_width= h->emu_edge_width;
+    int extra_height= h->emu_edge_height;
     int emu=0;
     const int full_mx= mx>>2;
     const int full_my= my>>2;
     const int pic_width  = 16*s->mb_width;
-    const int pic_height = 16*s->mb_height;
+    const int pic_height = 16*s->mb_height >> MB_MBAFF;
 
     if(!pic->data[0])
         return;
@@ -2617,29 +2894,37 @@ static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square,
        || full_my < 0-extra_height
        || full_mx + 16/*FIXME*/ > pic_width + extra_width
        || full_my + 16/*FIXME*/ > pic_height + extra_height){
-        ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*s->linesize, s->linesize, 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
-            src_y= s->edge_emu_buffer + 2 + 2*s->linesize;
+        ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*h->mb_linesize, h->mb_linesize, 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
+            src_y= s->edge_emu_buffer + 2 + 2*h->mb_linesize;
         emu=1;
     }
 
-    qpix_op[luma_xy](dest_y, src_y, s->linesize); //FIXME try variable height perhaps?
+    qpix_op[luma_xy](dest_y, src_y, h->mb_linesize); //FIXME try variable height perhaps?
     if(!square){
-        qpix_op[luma_xy](dest_y + delta, src_y + delta, s->linesize);
+        qpix_op[luma_xy](dest_y + delta, src_y + delta, h->mb_linesize);
     }
 
     if(s->flags&CODEC_FLAG_GRAY) return;
 
+    if(MB_MBAFF){
+        // chroma offset when predicting from a field of opposite parity
+        my += 2 * ((s->mb_y & 1) - (h->ref_cache[list][scan8[n]] & 1));
+        emu |= (my>>3) < 0 || (my>>3) + 8 >= (pic_height>>1);
+    }
+    src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->mb_uvlinesize;
+    src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->mb_uvlinesize;
+
     if(emu){
-        ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
+        ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, h->mb_uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
             src_cb= s->edge_emu_buffer;
     }
-    chroma_op(dest_cb, src_cb, s->uvlinesize, chroma_height, mx&7, my&7);
+    chroma_op(dest_cb, src_cb, h->mb_uvlinesize, chroma_height, mx&7, my&7);
 
     if(emu){
-        ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
+        ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, h->mb_uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
             src_cr= s->edge_emu_buffer;
     }
-    chroma_op(dest_cr, src_cr, s->uvlinesize, chroma_height, mx&7, my&7);
+    chroma_op(dest_cr, src_cr, h->mb_uvlinesize, chroma_height, mx&7, my&7);
 }
 
 static inline void mc_part_std(H264Context *h, int n, int square, int chroma_height, int delta,
@@ -2652,11 +2937,11 @@ static inline void mc_part_std(H264Context *h, int n, int square, int chroma_hei
     qpel_mc_func *qpix_op=  qpix_put;
     h264_chroma_mc_func chroma_op= chroma_put;
 
-    dest_y  += 2*x_offset + 2*y_offset*s->  linesize;
-    dest_cb +=   x_offset +   y_offset*s->uvlinesize;
-    dest_cr +=   x_offset +   y_offset*s->uvlinesize;
+    dest_y  += 2*x_offset + 2*y_offset*h->  mb_linesize;
+    dest_cb +=   x_offset +   y_offset*h->mb_uvlinesize;
+    dest_cr +=   x_offset +   y_offset*h->mb_uvlinesize;
     x_offset += 8*s->mb_x;
-    y_offset += 8*s->mb_y;
+    y_offset += 8*(s->mb_y >> MB_MBAFF);
 
     if(list0){
         Picture *ref= &h->ref_list[0][ h->ref_cache[0][ scan8[n] ] ];
@@ -2685,18 +2970,18 @@ static inline void mc_part_weighted(H264Context *h, int n, int square, int chrom
                            int list0, int list1){
     MpegEncContext * const s = &h->s;
 
-    dest_y  += 2*x_offset + 2*y_offset*s->  linesize;
-    dest_cb +=   x_offset +   y_offset*s->uvlinesize;
-    dest_cr +=   x_offset +   y_offset*s->uvlinesize;
+    dest_y  += 2*x_offset + 2*y_offset*h->  mb_linesize;
+    dest_cb +=   x_offset +   y_offset*h->mb_uvlinesize;
+    dest_cr +=   x_offset +   y_offset*h->mb_uvlinesize;
     x_offset += 8*s->mb_x;
-    y_offset += 8*s->mb_y;
+    y_offset += 8*(s->mb_y >> MB_MBAFF);
 
     if(list0 && list1){
         /* don't optimize for luma-only case, since B-frames usually
          * use implicit weights => chroma too. */
         uint8_t *tmp_cb = s->obmc_scratchpad;
-        uint8_t *tmp_cr = tmp_cb + 8*s->uvlinesize;
-        uint8_t *tmp_y  = tmp_cr + 8*s->uvlinesize;
+        uint8_t *tmp_cr = s->obmc_scratchpad + 8;
+        uint8_t *tmp_y  = s->obmc_scratchpad + 8*h->mb_uvlinesize;
         int refn0 = h->ref_cache[0][ scan8[n] ];
         int refn1 = h->ref_cache[1][ scan8[n] ];
 
@@ -2710,17 +2995,17 @@ static inline void mc_part_weighted(H264Context *h, int n, int square, int chrom
         if(h->use_weight == 2){
             int weight0 = h->implicit_weight[refn0][refn1];
             int weight1 = 64 - weight0;
-            luma_weight_avg(  dest_y,  tmp_y,  s->  linesize, 5, weight0, weight1, 0);
-            chroma_weight_avg(dest_cb, tmp_cb, s->uvlinesize, 5, weight0, weight1, 0);
-            chroma_weight_avg(dest_cr, tmp_cr, s->uvlinesize, 5, weight0, weight1, 0);
+            luma_weight_avg(  dest_y,  tmp_y,  h->  mb_linesize, 5, weight0, weight1, 0);
+            chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, 5, weight0, weight1, 0);
+            chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, 5, weight0, weight1, 0);
         }else{
-            luma_weight_avg(dest_y, tmp_y, s->linesize, h->luma_log2_weight_denom,
+            luma_weight_avg(dest_y, tmp_y, h->mb_linesize, h->luma_log2_weight_denom,
                             h->luma_weight[0][refn0], h->luma_weight[1][refn1],
                             h->luma_offset[0][refn0] + h->luma_offset[1][refn1]);
-            chroma_weight_avg(dest_cb, tmp_cb, s->uvlinesize, h->chroma_log2_weight_denom,
+            chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, h->chroma_log2_weight_denom,
                             h->chroma_weight[0][refn0][0], h->chroma_weight[1][refn1][0],
                             h->chroma_offset[0][refn0][0] + h->chroma_offset[1][refn1][0]);
-            chroma_weight_avg(dest_cr, tmp_cr, s->uvlinesize, h->chroma_log2_weight_denom,
+            chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, h->chroma_log2_weight_denom,
                             h->chroma_weight[0][refn0][1], h->chroma_weight[1][refn1][1],
                             h->chroma_offset[0][refn0][1] + h->chroma_offset[1][refn1][1]);
         }
@@ -2732,12 +3017,12 @@ static inline void mc_part_weighted(H264Context *h, int n, int square, int chrom
                     dest_y, dest_cb, dest_cr, x_offset, y_offset,
                     qpix_put, chroma_put);
 
-        luma_weight_op(dest_y, s->linesize, h->luma_log2_weight_denom,
+        luma_weight_op(dest_y, h->mb_linesize, h->luma_log2_weight_denom,
                        h->luma_weight[list][refn], h->luma_offset[list][refn]);
         if(h->use_weight_chroma){
-            chroma_weight_op(dest_cb, s->uvlinesize, h->chroma_log2_weight_denom,
+            chroma_weight_op(dest_cb, h->mb_uvlinesize, h->chroma_log2_weight_denom,
                              h->chroma_weight[list][refn][0], h->chroma_offset[list][refn][0]);
-            chroma_weight_op(dest_cr, s->uvlinesize, h->chroma_log2_weight_denom,
+            chroma_weight_op(dest_cr, h->mb_uvlinesize, h->chroma_log2_weight_denom,
                              h->chroma_weight[list][refn][1], h->chroma_offset[list][refn][1]);
         }
     }
@@ -2761,6 +3046,22 @@ static inline void mc_part(H264Context *h, int n, int square, int chroma_height,
                     x_offset, y_offset, qpix_put, chroma_put, qpix_avg, chroma_avg, list0, list1);
 }
 
+static inline void prefetch_motion(H264Context *h, int list){
+    /* fetch pixels for estimated mv 4 macroblocks ahead
+     * optimized for 64byte cache lines */
+    MpegEncContext * const s = &h->s;
+    const int refn = h->ref_cache[list][scan8[0]];
+    if(refn >= 0){
+        const int mx= (h->mv_cache[list][scan8[0]][0]>>2) + 16*s->mb_x + 8;
+        const int my= (h->mv_cache[list][scan8[0]][1]>>2) + 16*s->mb_y;
+        uint8_t **src= h->ref_list[list][refn].data;
+        int off= mx + (my + (s->mb_x&3)*4)*h->mb_linesize + 64;
+        s->dsp.prefetch(src[0]+off, s->linesize, 4);
+        off= (mx>>1) + ((my>>1) + (s->mb_x&7))*s->uvlinesize + 64;
+        s->dsp.prefetch(src[1]+off, src[2]-src[1], 2);
+    }
+}
+
 static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
                       qpel_mc_func (*qpix_put)[16], h264_chroma_mc_func (*chroma_put),
                       qpel_mc_func (*qpix_avg)[16], h264_chroma_mc_func (*chroma_avg),
@@ -2771,6 +3072,8 @@ static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t
 
     assert(IS_INTER(mb_type));
 
+    prefetch_motion(h, 0);
+
     if(IS_16X16(mb_type)){
         mc_part(h, 0, 1, 8, 0, dest_y, dest_cb, dest_cr, 0, 0,
                 qpix_put[0], chroma_put[0], qpix_avg[0], chroma_avg[0],
@@ -2786,11 +3089,11 @@ static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t
                 &weight_op[1], &weight_avg[1],
                 IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
     }else if(IS_8X16(mb_type)){
-        mc_part(h, 0, 0, 8, 8*s->linesize, dest_y, dest_cb, dest_cr, 0, 0,
+        mc_part(h, 0, 0, 8, 8*h->mb_linesize, dest_y, dest_cb, dest_cr, 0, 0,
                 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
                 &weight_op[2], &weight_avg[2],
                 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
-        mc_part(h, 4, 0, 8, 8*s->linesize, dest_y, dest_cb, dest_cr, 4, 0,
+        mc_part(h, 4, 0, 8, 8*h->mb_linesize, dest_y, dest_cb, dest_cr, 4, 0,
                 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
                 &weight_op[2], &weight_avg[2],
                 IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
@@ -2820,11 +3123,11 @@ static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t
                     &weight_op[4], &weight_avg[4],
                     IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
             }else if(IS_SUB_4X8(sub_mb_type)){
-                mc_part(h, n  , 0, 4, 4*s->linesize, dest_y, dest_cb, dest_cr, x_offset, y_offset,
+                mc_part(h, n  , 0, 4, 4*h->mb_linesize, dest_y, dest_cb, dest_cr, x_offset, y_offset,
                     qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
                     &weight_op[5], &weight_avg[5],
                     IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
-                mc_part(h, n+1, 0, 4, 4*s->linesize, dest_y, dest_cb, dest_cr, x_offset+2, y_offset,
+                mc_part(h, n+1, 0, 4, 4*h->mb_linesize, dest_y, dest_cb, dest_cr, x_offset+2, y_offset,
                     qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
                     &weight_op[5], &weight_avg[5],
                     IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
@@ -2842,6 +3145,8 @@ static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t
             }
         }
     }
+
+    prefetch_motion(h, 1);
 }
 
 static void decode_init_vlc(H264Context *h){
@@ -2953,6 +3258,7 @@ static void free_tables(H264Context *h){
 
 static void init_dequant8_coeff_table(H264Context *h){
     int i,q,x;
+    const int transpose = (h->s.dsp.h264_idct8_add != ff_h264_idct8_add_c); //FIXME ugly
     h->dequant8_coeff[0] = h->dequant8_buffer[0];
     h->dequant8_coeff[1] = h->dequant8_buffer[1];
 
@@ -2966,8 +3272,9 @@ static void init_dequant8_coeff_table(H264Context *h){
             int shift = div6[q];
             int idx = rem6[q];
             for(x=0; x<64; x++)
-                h->dequant8_coeff[i][q][x] = ((uint32_t)dequant8_coeff_init[idx][
-                    dequant8_coeff_init_scan[((x>>1)&12) | (x&3)] ] * h->pps.scaling_matrix8[i][x]) << shift;
+                h->dequant8_coeff[i][q][transpose ? (x>>3)|((x&7)<<3) : x] =
+                    ((uint32_t)dequant8_coeff_init[idx][ dequant8_coeff_init_scan[((x>>1)&12) | (x&3)] ] *
+                    h->pps.scaling_matrix8[i][x]) << shift;
         }
     }
 }
@@ -3026,7 +3333,7 @@ static int alloc_tables(H264Context *h){
     CHECKED_ALLOCZ(h->intra4x4_pred_mode, big_mb_num * 8  * sizeof(uint8_t))
 
     CHECKED_ALLOCZ(h->non_zero_count    , big_mb_num * 16 * sizeof(uint8_t))
-    CHECKED_ALLOCZ(h->slice_table_base  , big_mb_num * sizeof(uint8_t))
+    CHECKED_ALLOCZ(h->slice_table_base  , (big_mb_num+s->mb_stride) * sizeof(uint8_t))
     CHECKED_ALLOCZ(h->top_borders[0]    , s->mb_width * (16+8+8) * sizeof(uint8_t))
     CHECKED_ALLOCZ(h->top_borders[1]    , s->mb_width * (16+8+8) * sizeof(uint8_t))
     CHECKED_ALLOCZ(h->cbp_table, big_mb_num * sizeof(uint16_t))
@@ -3038,8 +3345,8 @@ static int alloc_tables(H264Context *h){
         CHECKED_ALLOCZ(h->direct_table, 32*big_mb_num * sizeof(uint8_t));
     }
 
-    memset(h->slice_table_base, -1, big_mb_num  * sizeof(uint8_t));
-    h->slice_table= h->slice_table_base + s->mb_stride + 1;
+    memset(h->slice_table_base, -1, (big_mb_num+s->mb_stride)  * sizeof(uint8_t));
+    h->slice_table= h->slice_table_base + s->mb_stride*2 + 1;
 
     CHECKED_ALLOCZ(h->mb2b_xy  , big_mb_num * sizeof(uint32_t));
     CHECKED_ALLOCZ(h->mb2b8_xy , big_mb_num * sizeof(uint32_t));
@@ -3136,7 +3443,11 @@ static int frame_start(H264Context *h){
     /* can't be in alloc_tables because linesize isn't known there.
      * FIXME: redo bipred weight to not require extra buffer? */
     if(!s->obmc_scratchpad)
-        s->obmc_scratchpad = av_malloc(16*s->linesize + 2*8*s->uvlinesize);
+        s->obmc_scratchpad = av_malloc(16*2*s->linesize + 8*2*s->uvlinesize);
+
+    /* some macroblocks will be accessed before they're available */
+    if(FRAME_MBAFF)
+        memset(h->slice_table, -1, (s->mb_height*s->mb_stride-1) * sizeof(uint8_t));
 
 //    s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1;
     return 0;
@@ -3259,7 +3570,7 @@ static inline void xchg_pair_border(H264Context *h, uint8_t *src_y, uint8_t *src
     int temp8, i;
     uint64_t temp64;
     int deblock_left = (s->mb_x > 0);
-    int deblock_top  = (s->mb_y > 0);
+    int deblock_top  = (s->mb_y > 1);
 
     tprintf("xchg_pair_border: src_y:%p src_cb:%p src_cr:%p ls:%d uvls:%d\n", src_y, src_cb, src_cr, linesize, uvlinesize);
 
@@ -3284,6 +3595,10 @@ b= t;
         XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+8), *(uint64_t*)(src_y +9), temp64, 1);
         XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+0), *(uint64_t*)(src_y +1 +linesize), temp64, xchg);
         XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+8), *(uint64_t*)(src_y +9 +linesize), temp64, 1);
+        if(s->mb_x+1 < s->mb_width){
+            XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x+1]), *(uint64_t*)(src_y +17), temp64, 1);
+            XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x+1]), *(uint64_t*)(src_y +17 +linesize), temp64, 1);
+        }
     }
 
     if(!(s->flags&CODEC_FLAG_GRAY)){
@@ -3324,18 +3639,36 @@ static void hl_decode_mb(H264Context *h){
     dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
     dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
 
-    if (h->mb_field_decoding_flag) {
-        linesize = s->linesize * 2;
-        uvlinesize = s->uvlinesize * 2;
+    if (MB_FIELD) {
+        linesize   = h->mb_linesize   = s->linesize * 2;
+        uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2;
         block_offset = &h->block_offset[24];
         if(mb_y&1){ //FIXME move out of this func?
             dest_y -= s->linesize*15;
             dest_cb-= s->uvlinesize*7;
             dest_cr-= s->uvlinesize*7;
         }
+        if(FRAME_MBAFF) {
+            int list;
+            for(list=0; list<2; list++){
+                if(!USES_LIST(mb_type, list))
+                    continue;
+                if(IS_16X16(mb_type)){
+                    int8_t *ref = &h->ref_cache[list][scan8[0]];
+                    fill_rectangle(ref, 4, 4, 8, 16+*ref^(s->mb_y&1), 1);
+                }else{
+                    for(i=0; i<16; i+=4){
+                        //FIXME can refs be smaller than 8x8 when !direct_8x8_inference ?
+                        int ref = h->ref_cache[list][scan8[i]];
+                        if(ref >= 0)
+                            fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2, 8, 16+ref^(s->mb_y&1), 1);
+                    }
+                }
+            }
+        }
     } else {
-        linesize = s->linesize;
-        uvlinesize = s->uvlinesize;
+        linesize   = h->mb_linesize   = s->linesize;
+        uvlinesize = h->mb_uvlinesize = s->uvlinesize;
 //        dct_offset = s->linesize * 16;
     }
 
@@ -3350,6 +3683,15 @@ static void hl_decode_mb(H264Context *h){
         idct_add = s->dsp.h264_idct_add;
     }
 
+    if(FRAME_MBAFF && h->deblocking_filter && IS_INTRA(mb_type)
+       && (!bottom || !IS_INTRA(s->current_picture.mb_type[mb_xy-s->mb_stride]))){
+        int mbt_y = mb_y&~1;
+        uint8_t *top_y  = s->current_picture.data[0] + (mbt_y * 16* s->linesize  ) + mb_x * 16;
+        uint8_t *top_cb = s->current_picture.data[1] + (mbt_y * 8 * s->uvlinesize) + mb_x * 8;
+        uint8_t *top_cr = s->current_picture.data[2] + (mbt_y * 8 * s->uvlinesize) + mb_x * 8;
+        xchg_pair_border(h, top_y, top_cb, top_cr, s->linesize, s->uvlinesize, 1);
+    }
+
     if (IS_INTRA_PCM(mb_type)) {
         unsigned int x, y;
 
@@ -3378,14 +3720,8 @@ static void hl_decode_mb(H264Context *h){
         }
     } else {
         if(IS_INTRA(mb_type)){
-            if(h->deblocking_filter) {
-                if (h->mb_aff_frame) {
-                    if (!bottom)
-                        xchg_pair_border(h, dest_y, dest_cb, dest_cr, s->linesize, s->uvlinesize, 1);
-                } else {
-                    xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1);
-                }
-            }
+            if(h->deblocking_filter && !FRAME_MBAFF)
+                xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1);
 
             if(!(s->flags&CODEC_FLAG_GRAY)){
                 h->pred8x8[ h->chroma_pred_mode ](dest_cb, uvlinesize);
@@ -3447,24 +3783,12 @@ static void hl_decode_mb(H264Context *h){
                 }else
                     svq3_luma_dc_dequant_idct_c(h->mb, s->qscale);
             }
-            if(h->deblocking_filter) {
-                if (h->mb_aff_frame) {
-                    if (bottom) {
-                        uint8_t *pair_dest_y  = s->current_picture.data[0] + ((mb_y-1) * 16* s->linesize  ) + mb_x * 16;
-                        uint8_t *pair_dest_cb = s->current_picture.data[1] + ((mb_y-1) * 8 * s->uvlinesize) + mb_x * 8;
-                        uint8_t *pair_dest_cr = s->current_picture.data[2] + ((mb_y-1) * 8 * s->uvlinesize) + mb_x * 8;
-                        s->mb_y--;
-                        xchg_pair_border(h, pair_dest_y, pair_dest_cb, pair_dest_cr, s->linesize, s->uvlinesize, 0);
-                        s->mb_y++;
-                    }
-                } else {
-                    xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0);
-                }
-            }
+            if(h->deblocking_filter && !FRAME_MBAFF)
+                xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0);
         }else if(s->codec_id == CODEC_ID_H264){
             hl_motion(h, dest_y, dest_cb, dest_cr,
-                      s->dsp.put_h264_qpel_pixels_tab, s->dsp.put_h264_chroma_pixels_tab,
-                      s->dsp.avg_h264_qpel_pixels_tab, s->dsp.avg_h264_chroma_pixels_tab,
+                      s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab,
+                      s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab,
                       s->dsp.weight_h264_pixels_tab, s->dsp.biweight_h264_pixels_tab);
         }
 
@@ -3528,41 +3852,41 @@ static void hl_decode_mb(H264Context *h){
         }
     }
     if(h->deblocking_filter) {
-        if (h->mb_aff_frame) {
+        if (FRAME_MBAFF) {
+            //FIXME try deblocking one mb at a time?
+            // the reduction in load/storing mvs and such might outweigh the extra backup/xchg_border
             const int mb_y = s->mb_y - 1;
             uint8_t  *pair_dest_y, *pair_dest_cb, *pair_dest_cr;
             const int mb_xy= mb_x + mb_y*s->mb_stride;
             const int mb_type_top   = s->current_picture.mb_type[mb_xy];
             const int mb_type_bottom= s->current_picture.mb_type[mb_xy+s->mb_stride];
-            uint8_t tmp = s->current_picture.data[1][384];
             if (!bottom) return;
             pair_dest_y  = s->current_picture.data[0] + (mb_y * 16* s->linesize  ) + mb_x * 16;
             pair_dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
             pair_dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
 
+            if(IS_INTRA(mb_type_top | mb_type_bottom))
+                xchg_pair_border(h, pair_dest_y, pair_dest_cb, pair_dest_cr, s->linesize, s->uvlinesize, 0);
+
             backup_pair_border(h, pair_dest_y, pair_dest_cb, pair_dest_cr, s->linesize, s->uvlinesize);
-            // TODO deblock a pair
+            // deblock a pair
             // top
             s->mb_y--;
             tprintf("call mbaff filter_mb mb_x:%d mb_y:%d pair_dest_y = %p, dest_y = %p\n", mb_x, mb_y, pair_dest_y, dest_y);
             fill_caches(h, mb_type_top, 1); //FIXME don't fill stuff which isn't used by filter_mb
+            h->chroma_qp = get_chroma_qp(h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mb_xy]);
             filter_mb(h, mb_x, mb_y, pair_dest_y, pair_dest_cb, pair_dest_cr, linesize, uvlinesize);
-            if (tmp != s->current_picture.data[1][384]) {
-                tprintf("modified pixel 8,1 (1)\n");
-            }
             // bottom
             s->mb_y++;
             tprintf("call mbaff filter_mb\n");
             fill_caches(h, mb_type_bottom, 1); //FIXME don't fill stuff which isn't used by filter_mb
+            h->chroma_qp = get_chroma_qp(h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mb_xy+s->mb_stride]);
             filter_mb(h, mb_x, mb_y+1, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
-            if (tmp != s->current_picture.data[1][384]) {
-                tprintf("modified pixel 8,1 (2)\n");
-            }
         } else {
             tprintf("call filter_mb\n");
             backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
             fill_caches(h, mb_type, 1); //FIXME don't fill stuff which isn't used by filter_mb
-            filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
+            filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
         }
     }
 }
@@ -3781,6 +4105,35 @@ static int decode_ref_pic_list_reordering(H264Context *h){
     return 0;
 }
 
+static void fill_mbaff_ref_list(H264Context *h){
+    int list, i, j;
+    for(list=0; list<2; list++){
+        for(i=0; i<h->ref_count[list]; i++){
+            Picture *frame = &h->ref_list[list][i];
+            Picture *field = &h->ref_list[list][16+2*i];
+            field[0] = *frame;
+            for(j=0; j<3; j++)
+                field[0].linesize[j] <<= 1;
+            field[1] = field[0];
+            for(j=0; j<3; j++)
+                field[1].data[j] += frame->linesize[j];
+
+            h->luma_weight[list][16+2*i] = h->luma_weight[list][16+2*i+1] = h->luma_weight[list][i];
+            h->luma_offset[list][16+2*i] = h->luma_offset[list][16+2*i+1] = h->luma_offset[list][i];
+            for(j=0; j<2; j++){
+                h->chroma_weight[list][16+2*i][j] = h->chroma_weight[list][16+2*i+1][j] = h->chroma_weight[list][i][j];
+                h->chroma_offset[list][16+2*i][j] = h->chroma_offset[list][16+2*i+1][j] = h->chroma_offset[list][i][j];
+            }
+        }
+    }
+    for(j=0; j<h->ref_count[1]; j++){
+        for(i=0; i<h->ref_count[0]; i++)
+            h->implicit_weight[j][16+2*i] = h->implicit_weight[j][16+2*i+1] = h->implicit_weight[j][i];
+        memcpy(h->implicit_weight[16+2*j],   h->implicit_weight[j], sizeof(*h->implicit_weight));
+        memcpy(h->implicit_weight[16+2*j+1], h->implicit_weight[j], sizeof(*h->implicit_weight));
+    }
+}
+
 static int pred_weight_table(H264Context *h){
     MpegEncContext * const s = &h->s;
     int list, i;
@@ -3850,7 +4203,6 @@ static void implicit_weight_table(H264Context *h){
     h->luma_log2_weight_denom= 5;
     h->chroma_log2_weight_denom= 5;
 
-    /* FIXME: MBAFF */
     for(ref0=0; ref0 < h->ref_count[0]; ref0++){
         int poc0 = h->ref_list[0][ref0].poc;
         for(ref1=0; ref1 < h->ref_count[1]; ref1++){
@@ -3909,8 +4261,13 @@ static void idr(H264Context *h){
 static void flush_dpb(AVCodecContext *avctx){
     H264Context *h= avctx->priv_data;
     int i;
-    for(i=0; i<16; i++)
+    for(i=0; i<16; i++) {
+        if(h->delayed_pic[i])
+            h->delayed_pic[i]->reference= 0;
         h->delayed_pic[i]= NULL;
+    }
+    if(h->delayed_output_pic)
+        h->delayed_output_pic->reference= 0;
     h->delayed_output_pic= NULL;
     idr(h);
     if(h->s.current_picture_ptr)
@@ -4285,8 +4642,8 @@ static int decode_slice_header(H264Context *h){
     s->mb_width= h->sps.mb_width;
     s->mb_height= h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
 
-    h->b_stride=  s->mb_width*4 + 1;
-    h->b8_stride= s->mb_width*2 + 1;
+    h->b_stride=  s->mb_width*4;
+    h->b8_stride= s->mb_width*2;
 
     s->width = 16*s->mb_width - 2*(h->sps.crop_left + h->sps.crop_right );
     if(h->sps.frame_mbs_only_flag)
@@ -4312,14 +4669,39 @@ static int decode_slice_header(H264Context *h){
 #define T(x) (x>>2) | ((x<<2) & 0xF)
                 h->zigzag_scan[i] = T(zigzag_scan[i]);
                 h-> field_scan[i] = T( field_scan[i]);
+#undef T
+            }
+        }
+        if(s->dsp.h264_idct8_add == ff_h264_idct8_add_c){
+            memcpy(h->zigzag_scan8x8,       zigzag_scan8x8,       64*sizeof(uint8_t));
+            memcpy(h->zigzag_scan8x8_cavlc, zigzag_scan8x8_cavlc, 64*sizeof(uint8_t));
+            memcpy(h->field_scan8x8,        field_scan8x8,        64*sizeof(uint8_t));
+            memcpy(h->field_scan8x8_cavlc,  field_scan8x8_cavlc,  64*sizeof(uint8_t));
+        }else{
+            int i;
+            for(i=0; i<64; i++){
+#define T(x) (x>>3) | ((x&7)<<3)
+                h->zigzag_scan8x8[i]       = T(zigzag_scan8x8[i]);
+                h->zigzag_scan8x8_cavlc[i] = T(zigzag_scan8x8_cavlc[i]);
+                h->field_scan8x8[i]        = T(field_scan8x8[i]);
+                h->field_scan8x8_cavlc[i]  = T(field_scan8x8_cavlc[i]);
+#undef T
             }
         }
         if(h->sps.transform_bypass){ //FIXME same ugly
-            h->zigzag_scan_q0 = zigzag_scan;
-            h->field_scan_q0 = field_scan;
+            h->zigzag_scan_q0          = zigzag_scan;
+            h->zigzag_scan8x8_q0       = zigzag_scan8x8;
+            h->zigzag_scan8x8_cavlc_q0 = zigzag_scan8x8_cavlc;
+            h->field_scan_q0           = field_scan;
+            h->field_scan8x8_q0        = field_scan8x8;
+            h->field_scan8x8_cavlc_q0  = field_scan8x8_cavlc;
         }else{
-            h->zigzag_scan_q0 = h->zigzag_scan;
-            h->field_scan_q0 = h->field_scan;
+            h->zigzag_scan_q0          = h->zigzag_scan;
+            h->zigzag_scan8x8_q0       = h->zigzag_scan8x8;
+            h->zigzag_scan8x8_cavlc_q0 = h->zigzag_scan8x8_cavlc;
+            h->field_scan_q0           = h->field_scan;
+            h->field_scan8x8_q0        = h->field_scan8x8;
+            h->field_scan8x8_cavlc_q0  = h->field_scan8x8_cavlc;
         }
 
         alloc_tables(h);
@@ -4347,21 +4729,22 @@ static int decode_slice_header(H264Context *h){
     s->current_picture_ptr->frame_num= //FIXME frame_num cleanup
     h->frame_num= get_bits(&s->gb, h->sps.log2_max_frame_num);
 
+    h->mb_mbaff = 0;
     h->mb_aff_frame = 0;
     if(h->sps.frame_mbs_only_flag){
         s->picture_structure= PICT_FRAME;
     }else{
         if(get_bits1(&s->gb)) { //field_pic_flag
             s->picture_structure= PICT_TOP_FIELD + get_bits1(&s->gb); //bottom_field_flag
+            av_log(h->s.avctx, AV_LOG_ERROR, "PAFF interlacing is not implemented\n");
         } else {
             s->picture_structure= PICT_FRAME;
-            first_mb_in_slice <<= h->sps.mb_aff;
             h->mb_aff_frame = h->sps.mb_aff;
         }
     }
 
     s->resync_mb_x = s->mb_x = first_mb_in_slice % s->mb_width;
-    s->resync_mb_y = s->mb_y = first_mb_in_slice / s->mb_width;
+    s->resync_mb_y = s->mb_y = (first_mb_in_slice / s->mb_width) << h->mb_aff_frame;
     if(s->mb_y >= s->mb_height){
         return -1;
     }
@@ -4406,6 +4789,8 @@ static int decode_slice_header(H264Context *h){
     if(h->slice_type == P_TYPE || h->slice_type == SP_TYPE || h->slice_type == B_TYPE){
         if(h->slice_type == B_TYPE){
             h->direct_spatial_mv_pred= get_bits1(&s->gb);
+            if(h->sps.mb_aff && h->direct_spatial_mv_pred)
+                av_log(h->s.avctx, AV_LOG_ERROR, "MBAFF + spatial direct mode is not implemented\n");
         }
         num_ref_idx_active_override_flag= get_bits1(&s->gb);
 
@@ -4439,6 +4824,9 @@ static int decode_slice_header(H264Context *h){
     if(s->current_picture.reference)
         decode_ref_pic_marking(h);
 
+    if(FRAME_MBAFF)
+        fill_mbaff_ref_list(h);
+
     if( h->slice_type != I_TYPE && h->slice_type != SI_TYPE && h->pps.cabac )
         h->cabac_init_idc = get_ue_golomb(&s->gb);
 
@@ -4483,6 +4871,9 @@ static int decode_slice_header(H264Context *h){
 
     h->slice_num++;
 
+    h->emu_edge_width= (s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
+    h->emu_edge_height= FRAME_MBAFF ? 0 : h->emu_edge_width;
+
     if(s->avctx->debug&FF_DEBUG_PICT_INFO){
         av_log(h->s.avctx, AV_LOG_DEBUG, "slice:%d %s mb:%d %c pps:%d frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s\n",
                h->slice_num,
@@ -4499,6 +4890,14 @@ static int decode_slice_header(H264Context *h){
                );
     }
 
+    if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !s->current_picture.reference){
+        s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab;
+        s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab;
+    }else{
+        s->me.qpel_put= s->dsp.put_h264_qpel_pixels_tab;
+        s->me.qpel_avg= s->dsp.avg_h264_qpel_pixels_tab;
+    }
+
     return 0;
 }
 
@@ -4684,6 +5083,17 @@ static int decode_residual(H264Context *h, GetBitContext *gb, DCTELEM *block, in
     return 0;
 }
 
+static void predict_field_decoding_flag(H264Context *h){
+    MpegEncContext * const s = &h->s;
+    const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
+    int mb_type = (h->slice_table[mb_xy-1] == h->slice_num)
+                ? s->current_picture.mb_type[mb_xy-1]
+                : (h->slice_table[mb_xy-s->mb_stride] == h->slice_num)
+                ? s->current_picture.mb_type[mb_xy-s->mb_stride]
+                : 0;
+    h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
+}
+
 /**
  * decodes a P_SKIP or B_SKIP macroblock
  */
@@ -4695,10 +5105,7 @@ static void decode_mb_skip(H264Context *h){
     memset(h->non_zero_count[mb_xy], 0, 16);
     memset(h->non_zero_count_cache + 8, 0, 8*5); //FIXME ugly, remove pfui
 
-    if(h->mb_aff_frame && s->mb_skip_run==0 && (s->mb_y&1)==0){
-        h->mb_field_decoding_flag= get_bits1(&s->gb);
-    }
-    if(h->mb_field_decoding_flag)
+    if(MB_FIELD)
         mb_type|= MB_TYPE_INTERLACED;
 
     if( h->slice_type == B_TYPE )
@@ -4708,10 +5115,7 @@ static void decode_mb_skip(H264Context *h){
 
         fill_caches(h, mb_type, 0); //FIXME check what is needed and what not ...
         pred_direct_motion(h, &mb_type);
-        if(h->pps.cabac){
-            fill_rectangle(h->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 4);
-            fill_rectangle(h->mvd_cache[1][scan8[0]], 4, 4, 8, 0, 4);
-        }
+        mb_type|= MB_TYPE_SKIP;
     }
     else
     {
@@ -4722,12 +5126,10 @@ static void decode_mb_skip(H264Context *h){
         pred_pskip_motion(h, &mx, &my);
         fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
         fill_rectangle(  h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx,my), 4);
-        if(h->pps.cabac)
-            fill_rectangle(h->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 4);
     }
 
     write_back_motion(h, mb_type);
-    s->current_picture.mb_type[mb_xy]= mb_type|MB_TYPE_SKIP;
+    s->current_picture.mb_type[mb_xy]= mb_type;
     s->current_picture.qscale_table[mb_xy]= s->qscale;
     h->slice_table[ mb_xy ]= h->slice_num;
     h->prev_mb_skipped= 1;
@@ -4753,13 +5155,19 @@ static int decode_mb_cavlc(H264Context *h){
             s->mb_skip_run= get_ue_golomb(&s->gb);
 
         if (s->mb_skip_run--) {
+            if(FRAME_MBAFF && (s->mb_y&1) == 0){
+                if(s->mb_skip_run==0)
+                    h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb);
+                else
+                    predict_field_decoding_flag(h);
+            }
             decode_mb_skip(h);
             return 0;
         }
     }
-    if(h->mb_aff_frame){
-        if ( ((s->mb_y&1) == 0) || h->prev_mb_skipped)
-            h->mb_field_decoding_flag = get_bits1(&s->gb);
+    if(FRAME_MBAFF){
+        if( (s->mb_y&1) == 0 )
+            h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb);
     }else
         h->mb_field_decoding_flag= (s->picture_structure!=PICT_FRAME);
 
@@ -4795,7 +5203,7 @@ decode_intra_mb:
         mb_type= i_mb_type_info[mb_type].type;
     }
 
-    if(h->mb_field_decoding_flag)
+    if(MB_FIELD)
         mb_type |= MB_TYPE_INTERLACED;
 
     h->slice_table[ mb_xy ]= h->slice_num;
@@ -4839,6 +5247,11 @@ decode_intra_mb:
         return 0;
     }
 
+    if(MB_MBAFF){
+        h->ref_count[0] <<= 1;
+        h->ref_count[1] <<= 1;
+    }
+
     fill_caches(h, mb_type, 0);
 
     //mb_pred
@@ -4854,18 +5267,11 @@ decode_intra_mb:
 
 //                fill_intra4x4_pred_table(h);
                 for(i=0; i<16; i+=di){
-                    const int mode_coded= !get_bits1(&s->gb);
-                    const int predicted_mode=  pred_intra_mode(h, i);
-                    int mode;
+                    int mode= pred_intra_mode(h, i);
 
-                    if(mode_coded){
+                    if(!get_bits1(&s->gb)){
                         const int rem_mode= get_bits(&s->gb, 3);
-                        if(rem_mode<predicted_mode)
-                            mode= rem_mode;
-                        else
-                            mode= rem_mode + 1;
-                    }else{
-                        mode= predicted_mode;
+                        mode = rem_mode + (rem_mode >= mode);
                     }
 
                     if(di==4)
@@ -4923,9 +5329,6 @@ decode_intra_mb:
         for(list=0; list<2; list++){
             int ref_count= IS_REF0(mb_type) ? 1 : h->ref_count[list];
             if(ref_count == 0) continue;
-            if (h->mb_aff_frame && h->mb_field_decoding_flag) {
-                ref_count <<= 1;
-            }
             for(i=0; i<4; i++){
                 if(IS_DIRECT(h->sub_mb_type[i])) continue;
                 if(IS_DIR(h->sub_mb_type[i], 0, list)){
@@ -5085,6 +5488,7 @@ decode_intra_mb:
         else
             cbp= golomb_to_inter_cbp[cbp];
     }
+    h->cbp = cbp;
 
     if(dct8x8_allowed && (cbp&15) && !IS_INTRA(mb_type)){
         if(get_bits1(&s->gb))
@@ -5096,14 +5500,16 @@ decode_intra_mb:
         int i8x8, i4x4, chroma_idx;
         int chroma_qp, dquant;
         GetBitContext *gb= IS_INTRA(mb_type) ? h->intra_gb_ptr : h->inter_gb_ptr;
-        const uint8_t *scan, *dc_scan;
+        const uint8_t *scan, *scan8x8, *dc_scan;
 
 //        fill_non_zero_count_cache(h);
 
         if(IS_INTERLACED(mb_type)){
+            scan8x8= s->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0;
             scan= s->qscale ? h->field_scan : h->field_scan_q0;
             dc_scan= luma_dc_field_scan;
         }else{
+            scan8x8= s->qscale ? h->zigzag_scan8x8_cavlc : h->zigzag_scan8x8_cavlc_q0;
             scan= s->qscale ? h->zigzag_scan : h->zigzag_scan_q0;
             dc_scan= luma_dc_zigzag_scan;
         }
@@ -5148,7 +5554,7 @@ decode_intra_mb:
                         DCTELEM *buf = &h->mb[64*i8x8];
                         uint8_t *nnz;
                         for(i4x4=0; i4x4<4; i4x4++){
-                            if( decode_residual(h, gb, buf, i4x4+4*i8x8, zigzag_scan8x8_cavlc+16*i4x4,
+                            if( decode_residual(h, gb, buf, i4x4+4*i8x8, scan8x8+16*i4x4,
                                                 h->dequant8_coeff[IS_INTRA( mb_type ) ? 0:1][s->qscale], 16) <0 )
                                 return -1;
                         }
@@ -5200,6 +5606,11 @@ decode_intra_mb:
     s->current_picture.qscale_table[mb_xy]= s->qscale;
     write_back_non_zero_count(h);
 
+    if(MB_MBAFF){
+        h->ref_count[0] >>= 1;
+        h->ref_count[1] >>= 1;
+    }
+
     return 0;
 }
 
@@ -5247,19 +5658,11 @@ static int decode_cabac_intra_mb_type(H264Context *h, int ctx_base, int intra_sl
         return 25;  /* PCM */
 
     mb_type = 1; /* I16x16 */
-    if( get_cabac( &h->cabac, &state[1] ) )
-        mb_type += 12;  /* cbp_luma != 0 */
-
-    if( get_cabac( &h->cabac, &state[2] ) ) {
-        if( get_cabac( &h->cabac, &state[2+intra_slice] ) )
-            mb_type += 4 * 2;   /* cbp_chroma == 2 */
-        else
-            mb_type += 4 * 1;   /* cbp_chroma == 1 */
-    }
-    if( get_cabac( &h->cabac, &state[3+intra_slice] ) )
-        mb_type += 2;
-    if( get_cabac( &h->cabac, &state[3+2*intra_slice] ) )
-        mb_type += 1;
+    mb_type += 12 * get_cabac( &h->cabac, &state[1] ); /* cbp_luma != 0 */
+    if( get_cabac( &h->cabac, &state[2] ) ) /* cbp_chroma */
+        mb_type += 4 + 4 * get_cabac( &h->cabac, &state[2+intra_slice] );
+    mb_type += 2 * get_cabac( &h->cabac, &state[3+intra_slice] );
+    mb_type += 1 * get_cabac( &h->cabac, &state[3+2*intra_slice] );
     return mb_type;
 }
 
@@ -5272,15 +5675,11 @@ static int decode_cabac_mb_type( H264Context *h ) {
         if( get_cabac( &h->cabac, &h->cabac_state[14] ) == 0 ) {
             /* P-type */
             if( get_cabac( &h->cabac, &h->cabac_state[15] ) == 0 ) {
-                if( get_cabac( &h->cabac, &h->cabac_state[16] ) == 0 )
-                    return 0; /* P_L0_D16x16; */
-                else
-                    return 3; /* P_8x8; */
+                /* P_L0_D16x16, P_8x8 */
+                return 3 * get_cabac( &h->cabac, &h->cabac_state[16] );
             } else {
-                if( get_cabac( &h->cabac, &h->cabac_state[17] ) == 0 )
-                    return 2; /* P_L0_D8x16; */
-                else
-                    return 1; /* P_L0_D16x8; */
+                /* P_L0_D8x16, P_L0_D16x8 */
+                return 2 - get_cabac( &h->cabac, &h->cabac_state[17] );
             }
         } else {
             return decode_cabac_intra_mb_type(h, 17, 0) + 5;
@@ -5291,11 +5690,9 @@ static int decode_cabac_mb_type( H264Context *h ) {
         int ctx = 0;
         int bits;
 
-        if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mba_xy] )
-                      && !IS_DIRECT( s->current_picture.mb_type[mba_xy] ) )
+        if( h->slice_table[mba_xy] == h->slice_num && !IS_DIRECT( s->current_picture.mb_type[mba_xy] ) )
             ctx++;
-        if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] )
-                      && !IS_DIRECT( s->current_picture.mb_type[mbb_xy] ) )
+        if( h->slice_table[mbb_xy] == h->slice_num && !IS_DIRECT( s->current_picture.mb_type[mbb_xy] ) )
             ctx++;
 
         if( !get_cabac( &h->cabac, &h->cabac_state[27+ctx] ) )
@@ -5326,22 +5723,40 @@ static int decode_cabac_mb_type( H264Context *h ) {
     }
 }
 
-static int decode_cabac_mb_skip( H264Context *h) {
+static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
     MpegEncContext * const s = &h->s;
-    const int mb_xy = s->mb_x + s->mb_y*s->mb_stride;
-    const int mba_xy = mb_xy - 1;
-    const int mbb_xy = mb_xy - s->mb_stride;
+    int mba_xy, mbb_xy;
     int ctx = 0;
 
+    if(FRAME_MBAFF){ //FIXME merge with the stuff in fill_caches?
+        int mb_xy = mb_x + (mb_y&~1)*s->mb_stride;
+        mba_xy = mb_xy - 1;
+        if( (mb_y&1)
+            && h->slice_table[mba_xy] == h->slice_num
+            && MB_FIELD == !!IS_INTERLACED( s->current_picture.mb_type[mba_xy] ) )
+            mba_xy += s->mb_stride;
+        if( MB_FIELD ){
+            mbb_xy = mb_xy - s->mb_stride;
+            if( !(mb_y&1)
+                && h->slice_table[mbb_xy] == h->slice_num
+                && IS_INTERLACED( s->current_picture.mb_type[mbb_xy] ) )
+                mbb_xy -= s->mb_stride;
+        }else
+            mbb_xy = mb_x + (mb_y-1)*s->mb_stride;
+    }else{
+        int mb_xy = mb_x + mb_y*s->mb_stride;
+        mba_xy = mb_xy - 1;
+        mbb_xy = mb_xy - s->mb_stride;
+    }
+
     if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mba_xy] ))
         ctx++;
     if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] ))
         ctx++;
 
-    if( h->slice_type == P_TYPE || h->slice_type == SP_TYPE)
-        return get_cabac( &h->cabac, &h->cabac_state[11+ctx] );
-    else /* B-frame */
-        return get_cabac( &h->cabac, &h->cabac_state[24+ctx] );
+    if( h->slice_type == B_TYPE )
+        ctx += 13;
+    return get_cabac( &h->cabac, &h->cabac_state[11+ctx] );
 }
 
 static int decode_cabac_mb_intra4x4_pred_mode( H264Context *h, int pred_mode ) {
@@ -5398,14 +5813,17 @@ static const uint8_t block_idx_xy[4][4] = {
 };
 
 static int decode_cabac_mb_cbp_luma( H264Context *h) {
-    MpegEncContext * const s = &h->s;
-
     int cbp = 0;
+    int cbp_b = -1;
     int i8x8;
 
+    if( h->slice_table[h->top_mb_xy] == h->slice_num ) {
+        cbp_b = h->top_cbp;
+        tprintf("cbp_b = top_cbp = %x\n", cbp_b);
+    }
+
     for( i8x8 = 0; i8x8 < 4; i8x8++ ) {
         int cbp_a = -1;
-        int cbp_b = -1;
         int x, y;
         int ctx = 0;
 
@@ -5414,17 +5832,13 @@ static int decode_cabac_mb_cbp_luma( H264Context *h) {
 
         if( x > 0 )
             cbp_a = cbp;
-        else if( s->mb_x > 0 && (h->slice_table[h->left_mb_xy[0]] == h->slice_num)) {
+        else if( h->slice_table[h->left_mb_xy[0]] == h->slice_num ) {
             cbp_a = h->left_cbp;
             tprintf("cbp_a = left_cbp = %x\n", cbp_a);
         }
 
         if( y > 0 )
             cbp_b = cbp;
-        else if( s->mb_y > 0 && (h->slice_table[h->top_mb_xy] == h->slice_num)) {
-            cbp_b = h->top_cbp;
-            tprintf("cbp_b = top_cbp = %x\n", cbp_b);
-        }
 
         /* No need to test for skip as we put 0 for skip block */
         /* No need to test for IPCM as we put 1 for IPCM block */
@@ -5475,7 +5889,7 @@ static int decode_cabac_mb_dqp( H264Context *h) {
     else
         mbn_xy = s->mb_width - 1 + (s->mb_y-1)*s->mb_stride;
 
-    if( h->last_qscale_diff != 0 && ( IS_INTRA16x16(s->current_picture.mb_type[mbn_xy] ) || (h->cbp_table[mbn_xy]&0x3f) ) )
+    if( h->last_qscale_diff != 0 )
         ctx++;
 
     while( get_cabac( &h->cabac, &h->cabac_state[60 + ctx] ) ) {
@@ -5620,16 +6034,26 @@ static int inline get_cabac_cbf_ctx( H264Context *h, int cat, int idx ) {
 
 static int decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff) {
     const int mb_xy  = h->s.mb_x + h->s.mb_y*h->s.mb_stride;
-    static const int significant_coeff_flag_field_offset[2] = { 105, 277 };
-    static const int last_significant_coeff_flag_field_offset[2] = { 166, 338 };
-    static const int significant_coeff_flag_offset[6] = { 0, 15, 29, 44, 47, 297 };
-    static const int last_significant_coeff_flag_offset[6] = { 0, 15, 29, 44, 47, 251 };
-    static const int coeff_abs_level_m1_offset[6] = { 227+0, 227+10, 227+20, 227+30, 227+39, 426 };
-    static const int significant_coeff_flag_offset_8x8[63] = {
-        0, 1, 2, 3, 4, 5, 5, 4, 4, 3, 3, 4, 4, 4, 5, 5,
+    static const int significant_coeff_flag_offset[2][6] = {
+      { 105+0, 105+15, 105+29, 105+44, 105+47, 402 },
+      { 277+0, 277+15, 277+29, 277+44, 277+47, 436 }
+    };
+    static const int last_coeff_flag_offset[2][6] = {
+      { 166+0, 166+15, 166+29, 166+44, 166+47, 417 },
+      { 338+0, 338+15, 338+29, 338+44, 338+47, 451 }
+    };
+    static const int coeff_abs_level_m1_offset[6] = {
+        227+0, 227+10, 227+20, 227+30, 227+39, 426
+    };
+    static const int significant_coeff_flag_offset_8x8[2][63] = {
+      { 0, 1, 2, 3, 4, 5, 5, 4, 4, 3, 3, 4, 4, 4, 5, 5,
         4, 4, 4, 4, 3, 3, 6, 7, 7, 7, 8, 9,10, 9, 8, 7,
         7, 6,11,12,13,11, 6, 7, 8, 9,14,10, 9, 8, 6,11,
-       12,13,11, 6, 9,14,10, 9,11,12,13,11,14,10,12
+       12,13,11, 6, 9,14,10, 9,11,12,13,11,14,10,12 },
+      { 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 7, 7, 8, 4, 5,
+        6, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,11,12,11,
+        9, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,13,13, 9,
+        9,10,10, 8,13,13, 9, 9,10,10,14,14,14,14,14 }
     };
     static const int last_coeff_flag_offset_8x8[63] = {
         0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -5671,11 +6095,9 @@ static int decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n
     }
 
     significant_coeff_ctx_base = h->cabac_state
-        + significant_coeff_flag_offset[cat]
-        + significant_coeff_flag_field_offset[h->mb_field_decoding_flag];
+        + significant_coeff_flag_offset[MB_FIELD][cat];
     last_coeff_ctx_base = h->cabac_state
-        + last_significant_coeff_flag_offset[cat]
-        + last_significant_coeff_flag_field_offset[h->mb_field_decoding_flag];
+        + last_coeff_flag_offset[MB_FIELD][cat];
     abs_level_m1_ctx_base = h->cabac_state
         + coeff_abs_level_m1_offset[cat];
 
@@ -5692,8 +6114,8 @@ static int decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n
                 } \
             } \
         }
-        DECODE_SIGNIFICANCE( 63, significant_coeff_flag_offset_8x8[last],
-                                 last_coeff_flag_offset_8x8[last] );
+        const int *sig_off = significant_coeff_flag_offset_8x8[MB_FIELD];
+        DECODE_SIGNIFICANCE( 63, sig_off[last], last_coeff_flag_offset_8x8[last] );
     } else {
         DECODE_SIGNIFICANCE( max_coeff - 1, last, last );
     }
@@ -5769,12 +6191,12 @@ static void inline compute_mb_neighbors(H264Context *h)
     const int mb_xy  = s->mb_x + s->mb_y*s->mb_stride;
     h->top_mb_xy     = mb_xy - s->mb_stride;
     h->left_mb_xy[0] = mb_xy - 1;
-    if(h->mb_aff_frame){
+    if(FRAME_MBAFF){
         const int pair_xy          = s->mb_x     + (s->mb_y & ~1)*s->mb_stride;
         const int top_pair_xy      = pair_xy     - s->mb_stride;
         const int top_mb_frame_flag      = !IS_INTERLACED(s->current_picture.mb_type[top_pair_xy]);
         const int left_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[pair_xy-1]);
-        const int curr_mb_frame_flag = !h->mb_field_decoding_flag;
+        const int curr_mb_frame_flag = !MB_FIELD;
         const int bottom = (s->mb_y & 1);
         if (bottom
                 ? !curr_mb_frame_flag // bottom macroblock
@@ -5803,8 +6225,25 @@ static int decode_mb_cabac(H264Context *h) {
 
     tprintf("pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y);
     if( h->slice_type != I_TYPE && h->slice_type != SI_TYPE ) {
+        int skip;
+        /* a skipped mb needs the aff flag from the following mb */
+        if( FRAME_MBAFF && s->mb_x==0 && (s->mb_y&1)==0 )
+            predict_field_decoding_flag(h);
+        if( FRAME_MBAFF && (s->mb_y&1)==1 && h->prev_mb_skipped )
+            skip = h->next_mb_skipped;
+        else
+            skip = decode_cabac_mb_skip( h, s->mb_x, s->mb_y );
         /* read skip flags */
-        if( decode_cabac_mb_skip( h ) ) {
+        if( skip ) {
+            if( FRAME_MBAFF && (s->mb_y&1)==0 ){
+                s->current_picture.mb_type[mb_xy] = MB_TYPE_SKIP;
+                h->next_mb_skipped = decode_cabac_mb_skip( h, s->mb_x, s->mb_y+1 );
+                if(h->next_mb_skipped)
+                    predict_field_decoding_flag(h);
+                else
+                    h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
+            }
+
             decode_mb_skip(h);
 
             h->cbp_table[mb_xy] = 0;
@@ -5815,8 +6254,9 @@ static int decode_mb_cabac(H264Context *h) {
 
         }
     }
-    if(h->mb_aff_frame){
-        if ( ((s->mb_y&1) == 0) || h->prev_mb_skipped)
+    if(FRAME_MBAFF){
+        if( (s->mb_y&1) == 0 )
+            h->mb_mbaff =
             h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
     }else
         h->mb_field_decoding_flag= (s->picture_structure!=PICT_FRAME);
@@ -5853,7 +6293,7 @@ decode_intra_mb:
         h->intra16x16_pred_mode= i_mb_type_info[mb_type].pred_mode;
         mb_type= i_mb_type_info[mb_type].type;
     }
-    if(h->mb_field_decoding_flag)
+    if(MB_FIELD)
         mb_type |= MB_TYPE_INTERLACED;
 
     h->slice_table[ mb_xy ]= h->slice_num;
@@ -5905,6 +6345,11 @@ decode_intra_mb:
         return 0;
     }
 
+    if(MB_MBAFF){
+        h->ref_count[0] <<= 1;
+        h->ref_count[1] <<= 1;
+    }
+
     fill_caches(h, mb_type, 0);
 
     if( IS_INTRA( mb_type ) ) {
@@ -5945,8 +6390,8 @@ decode_intra_mb:
                 sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
                 h->sub_mb_type[i]=      b_sub_mb_type_info[ h->sub_mb_type[i] ].type;
             }
-            if(   IS_DIRECT(h->sub_mb_type[0]) || IS_DIRECT(h->sub_mb_type[1])
-               || IS_DIRECT(h->sub_mb_type[2]) || IS_DIRECT(h->sub_mb_type[3])) {
+            if( IS_DIRECT(h->sub_mb_type[0] | h->sub_mb_type[1] |
+                          h->sub_mb_type[2] | h->sub_mb_type[3]) ) {
                 pred_direct_motion(h, &mb_type);
                 if( h->ref_count[0] > 1 || h->ref_count[1] > 1 ) {
                     for( i = 0; i < 4; i++ )
@@ -6146,7 +6591,7 @@ decode_intra_mb:
         cbp |= decode_cabac_mb_cbp_chroma( h ) << 4;
     }
 
-    h->cbp_table[mb_xy] = cbp;
+    h->cbp_table[mb_xy] = h->cbp = cbp;
 
     if( dct8x8_allowed && (cbp&15) && !IS_INTRA( mb_type ) ) {
         if( decode_cabac_mb_transform_size( h ) )
@@ -6155,13 +6600,15 @@ decode_intra_mb:
     s->current_picture.mb_type[mb_xy]= mb_type;
 
     if( cbp || IS_INTRA16x16( mb_type ) ) {
-        const uint8_t *scan, *dc_scan;
+        const uint8_t *scan, *scan8x8, *dc_scan;
         int dqp;
 
         if(IS_INTERLACED(mb_type)){
+            scan8x8= s->qscale ? h->field_scan8x8 : h->field_scan8x8_q0;
             scan= s->qscale ? h->field_scan : h->field_scan_q0;
             dc_scan= luma_dc_field_scan;
         }else{
+            scan8x8= s->qscale ? h->zigzag_scan8x8 : h->zigzag_scan8x8_q0;
             scan= s->qscale ? h->zigzag_scan : h->zigzag_scan_q0;
             dc_scan= luma_dc_zigzag_scan;
         }
@@ -6198,7 +6645,7 @@ decode_intra_mb:
                 if( cbp & (1<<i8x8) ) {
                     if( IS_8x8DCT(mb_type) ) {
                         if( decode_cabac_residual(h, h->mb + 64*i8x8, 5, 4*i8x8,
-                                zigzag_scan8x8, h->dequant8_coeff[IS_INTRA( mb_type ) ? 0:1][s->qscale], 64) < 0 )
+                            scan8x8, h->dequant8_coeff[IS_INTRA( mb_type ) ? 0:1][s->qscale], 64) < 0 )
                             return -1;
                     } else
                     for( i4x4 = 0; i4x4 < 4; i4x4++ ) {
@@ -6243,16 +6690,22 @@ decode_intra_mb:
         fill_rectangle(&nnz[scan8[0]], 4, 4, 8, 0, 1);
         nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] =
         nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0;
+        h->last_qscale_diff = 0;
     }
 
     s->current_picture.qscale_table[mb_xy]= s->qscale;
     write_back_non_zero_count(h);
 
+    if(MB_MBAFF){
+        h->ref_count[0] >>= 1;
+        h->ref_count[1] >>= 1;
+    }
+
     return 0;
 }
 
 
-static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
+static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int qp ) {
     int i, d;
     const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
     const int alpha = alpha_table[index_a];
@@ -6313,7 +6766,7 @@ static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int bS[4]
             }
     }
 }
-static void filter_mb_edgecv( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
+static void filter_mb_edgecv( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int qp ) {
     int i;
     const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
     const int alpha = alpha_table[index_a];
@@ -6329,7 +6782,7 @@ static void filter_mb_edgecv( H264Context *h, uint8_t *pix, int stride, int bS[4
     }
 }
 
-static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int bS[8], int qp[2] ) {
+static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[8], int qp[2] ) {
     int i;
     for( i = 0; i < 16; i++, pix += stride) {
         int index_a;
@@ -6338,7 +6791,7 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int
 
         int qp_index;
         int bS_index = (i >> 1);
-        if (h->mb_field_decoding_flag) {
+        if (!MB_FIELD) {
             bS_index &= ~1;
             bS_index |= (i & 1);
         }
@@ -6347,15 +6800,13 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int
             continue;
         }
 
-        qp_index = h->mb_field_decoding_flag ? (i & 1) : (i >> 3);
+        qp_index = MB_FIELD ? (i >> 3) : (i & 1);
         index_a = clip( qp[qp_index] + h->slice_alpha_c0_offset, 0, 51 );
         alpha = alpha_table[index_a];
         beta  = beta_table[clip( qp[qp_index] + h->slice_beta_offset, 0, 51 )];
 
-
         if( bS[bS_index] < 4 ) {
             const int tc0 = tc0_table[index_a][bS[bS_index] - 1];
-            /* 4px edge length */
             const int p0 = pix[-1];
             const int p1 = pix[-2];
             const int p2 = pix[-3];
@@ -6384,7 +6835,6 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int
                 tprintf("filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1);
             }
         }else{
-            /* 4px edge length */
             const int p0 = pix[-1];
             const int p1 = pix[-2];
             const int p2 = pix[-3];
@@ -6430,7 +6880,7 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int
         }
     }
 }
-static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp[2] ) {
+static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, int16_t bS[8], int qp[2] ) {
     int i;
     for( i = 0; i < 8; i++, pix += stride) {
         int index_a;
@@ -6444,13 +6894,13 @@ static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, in
             continue;
         }
 
-        qp_index = h->mb_field_decoding_flag ? (i & 1) : (i >> 3);
+        qp_index = MB_FIELD ? (i >> 2) : (i & 1);
         index_a = clip( qp[qp_index] + h->slice_alpha_c0_offset, 0, 51 );
         alpha = alpha_table[index_a];
         beta  = beta_table[clip( qp[qp_index] + h->slice_beta_offset, 0, 51 )];
+
         if( bS[bS_index] < 4 ) {
             const int tc = tc0_table[index_a][bS[bS_index] - 1] + 1;
-            /* 2px edge length (because we use same bS than the one for luma) */
             const int p0 = pix[-1];
             const int p1 = pix[-2];
             const int q0 = pix[0];
@@ -6483,7 +6933,7 @@ static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, in
     }
 }
 
-static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
+static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int qp ) {
     int i, d;
     const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
     const int alpha = alpha_table[index_a];
@@ -6543,7 +6993,7 @@ static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int bS[4]
     }
 }
 
-static void filter_mb_edgech( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
+static void filter_mb_edgech( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int qp ) {
     int i;
     const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
     const int alpha = alpha_table[index_a];
@@ -6559,19 +7009,130 @@ static void filter_mb_edgech( H264Context *h, uint8_t *pix, int stride, int bS[4
     }
 }
 
+static void filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
+    MpegEncContext * const s = &h->s;
+    int mb_xy, mb_type;
+    int qp, qp0, qp1, qpc, qpc0, qpc1, qp_thresh;
+
+    if(mb_x==0 || mb_y==0 || !s->dsp.h264_loop_filter_strength) {
+        filter_mb(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize);
+        return;
+    }
+    assert(!FRAME_MBAFF);
+
+    mb_xy = mb_x + mb_y*s->mb_stride;
+    mb_type = s->current_picture.mb_type[mb_xy];
+    qp = s->current_picture.qscale_table[mb_xy];
+    qp0 = s->current_picture.qscale_table[mb_xy-1];
+    qp1 = s->current_picture.qscale_table[h->top_mb_xy];
+    qpc = get_chroma_qp( h->pps.chroma_qp_index_offset, qp );
+    qpc0 = get_chroma_qp( h->pps.chroma_qp_index_offset, qp0 );
+    qpc1 = get_chroma_qp( h->pps.chroma_qp_index_offset, qp1 );
+    qp0 = (qp + qp0 + 1) >> 1;
+    qp1 = (qp + qp1 + 1) >> 1;
+    qpc0 = (qpc + qpc0 + 1) >> 1;
+    qpc1 = (qpc + qpc1 + 1) >> 1;
+    qp_thresh = 15 - h->slice_alpha_c0_offset;
+    if(qp <= qp_thresh && qp0 <= qp_thresh && qp1 <= qp_thresh &&
+       qpc <= qp_thresh && qpc0 <= qp_thresh && qpc1 <= qp_thresh)
+        return;
+
+    if( IS_INTRA(mb_type) ) {
+        int16_t bS4[4] = {4,4,4,4};
+        int16_t bS3[4] = {3,3,3,3};
+        if( IS_8x8DCT(mb_type) ) {
+            filter_mb_edgev( h, &img_y[4*0], linesize, bS4, qp0 );
+            filter_mb_edgev( h, &img_y[4*2], linesize, bS3, qp );
+            filter_mb_edgeh( h, &img_y[4*0*linesize], linesize, bS4, qp1 );
+            filter_mb_edgeh( h, &img_y[4*2*linesize], linesize, bS3, qp );
+        } else {
+            filter_mb_edgev( h, &img_y[4*0], linesize, bS4, qp0 );
+            filter_mb_edgev( h, &img_y[4*1], linesize, bS3, qp );
+            filter_mb_edgev( h, &img_y[4*2], linesize, bS3, qp );
+            filter_mb_edgev( h, &img_y[4*3], linesize, bS3, qp );
+            filter_mb_edgeh( h, &img_y[4*0*linesize], linesize, bS4, qp1 );
+            filter_mb_edgeh( h, &img_y[4*1*linesize], linesize, bS3, qp );
+            filter_mb_edgeh( h, &img_y[4*2*linesize], linesize, bS3, qp );
+            filter_mb_edgeh( h, &img_y[4*3*linesize], linesize, bS3, qp );
+        }
+        filter_mb_edgecv( h, &img_cb[2*0], uvlinesize, bS4, qpc0 );
+        filter_mb_edgecv( h, &img_cb[2*2], uvlinesize, bS3, qpc );
+        filter_mb_edgecv( h, &img_cr[2*0], uvlinesize, bS4, qpc0 );
+        filter_mb_edgecv( h, &img_cr[2*2], uvlinesize, bS3, qpc );
+        filter_mb_edgech( h, &img_cb[2*0*uvlinesize], uvlinesize, bS4, qpc1 );
+        filter_mb_edgech( h, &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc );
+        filter_mb_edgech( h, &img_cr[2*0*uvlinesize], uvlinesize, bS4, qpc1 );
+        filter_mb_edgech( h, &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc );
+        return;
+    } else {
+        DECLARE_ALIGNED_8(int16_t, bS[2][4][4]);
+        uint64_t (*bSv)[4] = (uint64_t(*)[4])bS;
+        int edges;
+        if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 ) {
+            edges = 4;
+            bSv[0][0] = bSv[0][2] = bSv[1][0] = bSv[1][2] = 0x0002000200020002ULL;
+        } else {
+            int mask_edge1 = (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 :
+                             (mb_type & MB_TYPE_16x8) ? 1 : 0;
+            int mask_edge0 = (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16))
+                             && (s->current_picture.mb_type[mb_xy-1] & (MB_TYPE_16x16 | MB_TYPE_8x16))
+                             ? 3 : 0;
+            int step = IS_8x8DCT(mb_type) ? 2 : 1;
+            edges = (mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4;
+            s->dsp.h264_loop_filter_strength( bS, h->non_zero_count_cache, h->ref_cache, h->mv_cache,
+                                              (h->slice_type == B_TYPE), edges, step, mask_edge0, mask_edge1 );
+        }
+        if( IS_INTRA(s->current_picture.mb_type[mb_xy-1]) )
+            bSv[0][0] = 0x0004000400040004ULL;
+        if( IS_INTRA(s->current_picture.mb_type[h->top_mb_xy]) )
+            bSv[1][0] = 0x0004000400040004ULL;
+
+#define FILTER(hv,dir,edge)\
+        if(bSv[dir][edge]) {\
+            filter_mb_edge##hv( h, &img_y[4*edge*(dir?linesize:1)], linesize, bS[dir][edge], edge ? qp : qp##dir );\
+            if(!(edge&1)) {\
+                filter_mb_edgec##hv( h, &img_cb[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir );\
+                filter_mb_edgec##hv( h, &img_cr[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir );\
+            }\
+        }
+        if( edges == 1 ) {
+            FILTER(v,0,0);
+            FILTER(h,1,0);
+        } else if( IS_8x8DCT(mb_type) ) {
+            FILTER(v,0,0);
+            FILTER(v,0,2);
+            FILTER(h,1,0);
+            FILTER(h,1,2);
+        } else {
+            FILTER(v,0,0);
+            FILTER(v,0,1);
+            FILTER(v,0,2);
+            FILTER(v,0,3);
+            FILTER(h,1,0);
+            FILTER(h,1,1);
+            FILTER(h,1,2);
+            FILTER(h,1,3);
+        }
+#undef FILTER
+    }
+}
+
 static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
     MpegEncContext * const s = &h->s;
     const int mb_xy= mb_x + mb_y*s->mb_stride;
+    const int mb_type = s->current_picture.mb_type[mb_xy];
+    const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
     int first_vertical_edge_done = 0;
     int dir;
     /* FIXME: A given frame may occupy more than one position in
      * the reference list. So ref2frm should be populated with
      * frame numbers, not indices. */
-    static const int ref2frm[18] = {-1,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+    static const int ref2frm[34] = {-1,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
+                                    16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31};
 
     //for sufficiently low qp, filtering wouldn't do anything
     //this is a conservative estimate: could also check beta_offset and more accurate chroma_qp
-    if(!h->mb_aff_frame){
+    if(!FRAME_MBAFF){
         int qp_thresh = 15 - h->slice_alpha_c0_offset - FFMAX(0, h->pps.chroma_qp_index_offset);
         int qp = s->current_picture.qscale_table[mb_xy];
         if(qp <= qp_thresh
@@ -6581,73 +7142,64 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8
         }
     }
 
-    if (h->mb_aff_frame
+    if (FRAME_MBAFF
             // left mb is in picture
             && h->slice_table[mb_xy-1] != 255
             // and current and left pair do not have the same interlaced type
-            && (IS_INTERLACED(s->current_picture.mb_type[mb_xy]) != IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]))
+            && (IS_INTERLACED(mb_type) != IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]))
             // and left mb is in the same slice if deblocking_filter == 2
             && (h->deblocking_filter!=2 || h->slice_table[mb_xy-1] == h->slice_table[mb_xy])) {
         /* First vertical edge is different in MBAFF frames
          * There are 8 different bS to compute and 2 different Qp
          */
-        int bS[8];
+        const int pair_xy = mb_x + (mb_y&~1)*s->mb_stride;
+        const int left_mb_xy[2] = { pair_xy-1, pair_xy-1+s->mb_stride };
+        int16_t bS[8];
         int qp[2];
         int chroma_qp[2];
-
+        int mb_qp, mbn0_qp, mbn1_qp;
         int i;
         first_vertical_edge_done = 1;
-        for( i = 0; i < 8; i++ ) {
-            int y = i>>1;
-            int b_idx= 8 + 4 + 8*y;
-            int bn_idx= b_idx - 1;
-
-            int mbn_xy = h->mb_field_decoding_flag ? h->left_mb_xy[i>>2] : h->left_mb_xy[i&1];
-
-            if( IS_INTRA( s->current_picture.mb_type[mb_xy] ) ||
-                IS_INTRA( s->current_picture.mb_type[mbn_xy] ) ) {
-                bS[i] = 4;
-            } else if( h->non_zero_count_cache[b_idx] != 0 ||
-                /* FIXME: with 8x8dct + cavlc, should check cbp instead of nnz */
-                h->non_zero_count_cache[bn_idx] != 0 ) {
-                bS[i] = 2;
-            } else {
-                int l;
-                bS[i] = 0;
-                for( l = 0; l < 1 + (h->slice_type == B_TYPE); l++ ) {
-                    if( ref2frm[h->ref_cache[l][b_idx]+2] != ref2frm[h->ref_cache[l][bn_idx]+2] ||
-                        ABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 ||
-                        ABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= 4 ) {
-                        bS[i] = 1;
-                        break;
-                    }
-                }
+
+        if( IS_INTRA(mb_type) )
+            bS[0] = bS[1] = bS[2] = bS[3] = bS[4] = bS[5] = bS[6] = bS[7] = 4;
+        else {
+            for( i = 0; i < 8; i++ ) {
+                int mbn_xy = MB_FIELD ? left_mb_xy[i>>2] : left_mb_xy[i&1];
+
+                if( IS_INTRA( s->current_picture.mb_type[mbn_xy] ) )
+                    bS[i] = 4;
+                else if( h->non_zero_count_cache[12+8*(i>>1)] != 0 ||
+                         /* FIXME: with 8x8dct + cavlc, should check cbp instead of nnz */
+                         h->non_zero_count[mbn_xy][MB_FIELD ? i&3 : (i>>2)+(mb_y&1)*2] )
+                    bS[i] = 2;
+                else
+                    bS[i] = 1;
             }
         }
-        if(bS[0]+bS[1]+bS[2]+bS[3] != 0) {
-            // Do not use s->qscale as luma quantizer because it has not the same
-            // value in IPCM macroblocks.
-            qp[0] = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[h->left_mb_xy[0]] + 1 ) >> 1;
-            chroma_qp[0] = ( get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mb_xy] ) +
-                             get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[h->left_mb_xy[0]] ) + 1 ) >> 1;
-            qp[1] = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[h->left_mb_xy[1]] + 1 ) >> 1;
-            chroma_qp[1] = ( get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mb_xy] ) +
-                             get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[h->left_mb_xy[1]] ) + 1 ) >> 1;
 
-            /* Filter edge */
-            tprintf("filter mb:%d/%d MBAFF, QPy:%d/%d, QPc:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], chroma_qp[0], chroma_qp[1], linesize, uvlinesize);
-            { int i; for (i = 0; i < 8; i++) tprintf(" bS[%d]:%d", i, bS[i]); tprintf("\n"); }
-            filter_mb_mbaff_edgev ( h, &img_y [0], linesize,   bS, qp );
-            filter_mb_mbaff_edgecv( h, &img_cb[0], uvlinesize, bS, chroma_qp );
-            filter_mb_mbaff_edgecv( h, &img_cr[0], uvlinesize, bS, chroma_qp );
-        }
+        mb_qp = s->current_picture.qscale_table[mb_xy];
+        mbn0_qp = s->current_picture.qscale_table[left_mb_xy[0]];
+        mbn1_qp = s->current_picture.qscale_table[left_mb_xy[1]];
+        qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
+        chroma_qp[0] = ( get_chroma_qp( h->pps.chroma_qp_index_offset, mb_qp ) +
+                         get_chroma_qp( h->pps.chroma_qp_index_offset, mbn0_qp ) + 1 ) >> 1;
+        qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1;
+        chroma_qp[1] = ( get_chroma_qp( h->pps.chroma_qp_index_offset, mb_qp ) +
+                         get_chroma_qp( h->pps.chroma_qp_index_offset, mbn1_qp ) + 1 ) >> 1;
+
+        /* Filter edge */
+        tprintf("filter mb:%d/%d MBAFF, QPy:%d/%d, QPc:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], chroma_qp[0], chroma_qp[1], linesize, uvlinesize);
+        { int i; for (i = 0; i < 8; i++) tprintf(" bS[%d]:%d", i, bS[i]); tprintf("\n"); }
+        filter_mb_mbaff_edgev ( h, &img_y [0], linesize,   bS, qp );
+        filter_mb_mbaff_edgecv( h, &img_cb[0], uvlinesize, bS, chroma_qp );
+        filter_mb_mbaff_edgecv( h, &img_cr[0], uvlinesize, bS, chroma_qp );
     }
     /* dir : 0 -> vertical edge, 1 -> horizontal edge */
     for( dir = 0; dir < 2; dir++ )
     {
         int edge;
         const int mbm_xy = dir == 0 ? mb_xy -1 : h->top_mb_xy;
-        const int mb_type = s->current_picture.mb_type[mb_xy];
         const int mbm_type = s->current_picture.mb_type[mbm_xy];
         int start = h->slice_table[mbm_xy] == 255 ? 1 : 0;
 
@@ -6667,78 +7219,68 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8
         if (h->deblocking_filter==2 && h->slice_table[mbm_xy] != h->slice_table[mb_xy])
             start = 1;
 
-        /* Calculate bS */
-        for( edge = start; edge < edges; edge++ ) {
-            /* mbn_xy: neighbor macroblock */
-            const int mbn_xy = edge > 0 ? mb_xy : mbm_xy;
-            const int mbn_type = s->current_picture.mb_type[mbn_xy];
-            int bS[4];
-            int qp;
-
-            if( (edge&1) && IS_8x8DCT(mb_type) )
-                continue;
-
-            if (h->mb_aff_frame && (dir == 1) && (edge == 0) && ((mb_y & 1) == 0)
-                && !IS_INTERLACED(mb_type)
-                && IS_INTERLACED(mbn_type)
-                ) {
-                // This is a special case in the norm where the filtering must
-                // be done twice (one each of the field) even if we are in a
-                // frame macroblock.
-                //
-                unsigned int tmp_linesize   = 2 *   linesize;
-                unsigned int tmp_uvlinesize = 2 * uvlinesize;
-                int mbn_xy = mb_xy - 2 * s->mb_stride;
-                int qp, chroma_qp;
-
-                // first filtering
+        if (FRAME_MBAFF && (dir == 1) && ((mb_y&1) == 0) && start == 0
+            && !IS_INTERLACED(mb_type)
+            && IS_INTERLACED(mbm_type)
+            ) {
+            // This is a special case in the norm where the filtering must
+            // be done twice (one each of the field) even if we are in a
+            // frame macroblock.
+            //
+            static const int nnz_idx[4] = {4,5,6,3};
+            unsigned int tmp_linesize   = 2 *   linesize;
+            unsigned int tmp_uvlinesize = 2 * uvlinesize;
+            int mbn_xy = mb_xy - 2 * s->mb_stride;
+            int qp, chroma_qp;
+            int i, j;
+            int16_t bS[4];
+
+            for(j=0; j<2; j++, mbn_xy += s->mb_stride){
                 if( IS_INTRA(mb_type) ||
                     IS_INTRA(s->current_picture.mb_type[mbn_xy]) ) {
                     bS[0] = bS[1] = bS[2] = bS[3] = 3;
                 } else {
-                    // TODO
-                    av_log(h->s.avctx, AV_LOG_ERROR, "both non intra (TODO)\n");
+                    const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy];
+                    for( i = 0; i < 4; i++ ) {
+                        if( h->non_zero_count_cache[scan8[0]+i] != 0 ||
+                            mbn_nnz[nnz_idx[i]] != 0 )
+                            bS[i] = 2;
+                        else
+                            bS[i] = 1;
+                    }
                 }
-                /* Filter edge */
                 // Do not use s->qscale as luma quantizer because it has not the same
                 // value in IPCM macroblocks.
                 qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1;
                 tprintf("filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
                 { int i; for (i = 0; i < 4; i++) tprintf(" bS[%d]:%d", i, bS[i]); tprintf("\n"); }
-                filter_mb_edgeh( h, &img_y[0], tmp_linesize, bS, qp );
+                filter_mb_edgeh( h, &img_y[j*linesize], tmp_linesize, bS, qp );
                 chroma_qp = ( h->chroma_qp +
                               get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
-                filter_mb_edgech( h, &img_cb[0], tmp_uvlinesize, bS, chroma_qp );
-                filter_mb_edgech( h, &img_cr[0], tmp_uvlinesize, bS, chroma_qp );
+                filter_mb_edgech( h, &img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp );
+                filter_mb_edgech( h, &img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp );
+            }
 
-                // second filtering
-                mbn_xy += s->mb_stride;
-                if( IS_INTRA(mb_type) ||
-                    IS_INTRA(mbn_type) ) {
-                    bS[0] = bS[1] = bS[2] = bS[3] = 3;
-                } else {
-                    // TODO
-                    av_log(h->s.avctx, AV_LOG_ERROR, "both non intra (TODO)\n");
-                }
-                /* Filter edge */
-                // Do not use s->qscale as luma quantizer because it has not the same
-                // value in IPCM macroblocks.
-                qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1;
-                tprintf("filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
-                { int i; for (i = 0; i < 4; i++) tprintf(" bS[%d]:%d", i, bS[i]); tprintf("\n"); }
-                filter_mb_edgeh( h, &img_y[linesize], tmp_linesize, bS, qp );
-                chroma_qp = ( h->chroma_qp +
-                              get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
-                filter_mb_edgech( h, &img_cb[uvlinesize], tmp_uvlinesize, bS, chroma_qp );
-                filter_mb_edgech( h, &img_cr[uvlinesize], tmp_uvlinesize, bS, chroma_qp );
+            start = 1;
+        }
+
+        /* Calculate bS */
+        for( edge = start; edge < edges; edge++ ) {
+            /* mbn_xy: neighbor macroblock */
+            const int mbn_xy = edge > 0 ? mb_xy : mbm_xy;
+            const int mbn_type = s->current_picture.mb_type[mbn_xy];
+            int16_t bS[4];
+            int qp;
+
+            if( (edge&1) && IS_8x8DCT(mb_type) )
                 continue;
-            }
+
             if( IS_INTRA(mb_type) ||
                 IS_INTRA(mbn_type) ) {
                 int value;
                 if (edge == 0) {
                     if (   (!IS_INTERLACED(mb_type) && !IS_INTERLACED(mbm_type))
-                        || ((h->mb_aff_frame || (s->picture_structure != PICT_FRAME)) && (dir == 0))
+                        || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0))
                     ) {
                         value = 4;
                     } else {
@@ -6756,6 +7298,10 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8
                     bS[0] = bS[1] = bS[2] = bS[3] = 0;
                     mv_done = 1;
                 }
+                else if( FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbn_type)) {
+                    bS[0] = bS[1] = bS[2] = bS[3] = 1;
+                    mv_done = 1;
+                }
                 else if( mask_par0 && (edge || (mbn_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) {
                     int b_idx= 8 + 4 + edge * (dir ? 8:1);
                     int bn_idx= b_idx - (dir ? 8:1);
@@ -6763,7 +7309,7 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8
                     for( l = 0; !v && l < 1 + (h->slice_type == B_TYPE); l++ ) {
                         v |= ref2frm[h->ref_cache[l][b_idx]+2] != ref2frm[h->ref_cache[l][bn_idx]+2] ||
                              ABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 ||
-                             ABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= 4;
+                             ABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= mvy_limit;
                     }
                     bS[0] = bS[1] = bS[2] = bS[3] = v;
                     mv_done = 1;
@@ -6787,7 +7333,7 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8
                         for( l = 0; l < 1 + (h->slice_type == B_TYPE); l++ ) {
                             if( ref2frm[h->ref_cache[l][b_idx]+2] != ref2frm[h->ref_cache[l][bn_idx]+2] ||
                                 ABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 ||
-                                ABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= 4 ) {
+                                ABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= mvy_limit ) {
                                 bS[i] = 1;
                                 break;
                             }
@@ -6864,8 +7410,7 @@ static int decode_slice(H264Context *h){
 
             if(ret>=0) hl_decode_mb(h);
 
-            /* XXX: useless as decode_mb_cabac it doesn't support that ... */
-            if( ret >= 0 && h->mb_aff_frame ) { //FIXME optimal? or let mb_decode decode 16x32 ?
+            if( ret >= 0 && FRAME_MBAFF ) { //FIXME optimal? or let mb_decode decode 16x32 ?
                 s->mb_y++;
 
                 if(ret>=0) ret = decode_mb_cabac(h);
@@ -6875,8 +7420,8 @@ static int decode_slice(H264Context *h){
             }
             eos = get_cabac_terminate( &h->cabac );
 
-            if( ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 1) {
-                av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
+            if( ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 2) {
+                av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d, bytestream (%d)\n", s->mb_x, s->mb_y, h->cabac.bytestream_end - h->cabac.bytestream);
                 ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
                 return -1;
             }
@@ -6885,7 +7430,7 @@ static int decode_slice(H264Context *h){
                 s->mb_x = 0;
                 ff_draw_horiz_band(s, 16*s->mb_y, 16);
                 ++s->mb_y;
-                if(h->mb_aff_frame) {
+                if(FRAME_MBAFF) {
                     ++s->mb_y;
                 }
             }
@@ -6903,7 +7448,7 @@ static int decode_slice(H264Context *h){
 
             if(ret>=0) hl_decode_mb(h);
 
-            if(ret>=0 && h->mb_aff_frame){ //FIXME optimal? or let mb_decode decode 16x32 ?
+            if(ret>=0 && FRAME_MBAFF){ //FIXME optimal? or let mb_decode decode 16x32 ?
                 s->mb_y++;
                 ret = decode_mb_cavlc(h);
 
@@ -6922,7 +7467,7 @@ static int decode_slice(H264Context *h){
                 s->mb_x=0;
                 ff_draw_horiz_band(s, 16*s->mb_y, 16);
                 ++s->mb_y;
-                if(h->mb_aff_frame) {
+                if(FRAME_MBAFF) {
                     ++s->mb_y;
                 }
                 if(s->mb_y >= s->mb_height){
@@ -6963,7 +7508,7 @@ static int decode_slice(H264Context *h){
             hl_decode_mb(h);
 
             if(ret<0){
-                fprintf(stderr, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
+                av_log(s->avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
                 ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
 
                 return -1;
@@ -7047,7 +7592,7 @@ static int decode_sei(H264Context *h){
 
         switch(type){
         case 5:
-            if(decode_unregistered_user_data(h, size) < 0);
+            if(decode_unregistered_user_data(h, size) < 0)
                 return -1;
             break;
         default:
@@ -7090,7 +7635,7 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps){
         if( aspect_ratio_idc == EXTENDED_SAR ) {
             sps->sar.num= get_bits(&s->gb, 16);
             sps->sar.den= get_bits(&s->gb, 16);
-        }else if(aspect_ratio_idc < 16){
+        }else if(aspect_ratio_idc < 14){
             sps->sar=  pixel_aspect[aspect_ratio_idc];
         }else{
             av_log(h->s.avctx, AV_LOG_ERROR, "illegal aspect ratio\n");
@@ -7266,6 +7811,13 @@ static inline int decode_seq_parameter_set(H264Context *h){
 
     sps->direct_8x8_inference_flag= get_bits1(&s->gb);
 
+#ifndef ALLOW_INTERLACE
+    if(sps->mb_aff)
+        av_log(h->s.avctx, AV_LOG_ERROR, "MBAFF support not included; enable it compilation time\n");
+#endif
+    if(!sps->direct_8x8_inference_flag && sps->mb_aff)
+        av_log(h->s.avctx, AV_LOG_ERROR, "MBAFF + !direct_8x8_inference is not implemented\n");
+
     sps->crop= get_bits1(&s->gb);
     if(sps->crop){
         sps->crop_left  = get_ue_golomb(&s->gb);
@@ -7364,6 +7916,8 @@ static inline int decode_picture_parameter_set(H264Context *h, int bit_length){
     pps->constrained_intra_pred= get_bits1(&s->gb);
     pps->redundant_pic_cnt_present = get_bits1(&s->gb);
 
+    pps->transform_8x8_mode= 0;
+    h->dequant_coeff_pps= -1; //contents of sps/pps can change even if id doesn't, so reinit
     memset(pps->scaling_matrix4, 16, 6*16*sizeof(uint8_t));
     memset(pps->scaling_matrix8, 16, 2*64*sizeof(uint8_t));
 
@@ -7436,6 +7990,7 @@ static int find_frame_end(H264Context *h, const uint8_t *buf, int buf_size){
     return END_NOT_FOUND;
 }
 
+#ifdef CONFIG_H264_PARSER
 static int h264_parse(AVCodecParserContext *s,
                       AVCodecContext *avctx,
                       uint8_t **poutbuf, int *poutbuf_size,
@@ -7481,7 +8036,7 @@ static int h264_split(AVCodecContext *avctx,
     }
     return 0;
 }
-
+#endif /* CONFIG_H264_PARSER */
 
 static int decode_nal_units(H264Context *h, uint8_t *buf, int buf_size){
     MpegEncContext * const s = &h->s;
@@ -7530,7 +8085,8 @@ static int decode_nal_units(H264Context *h, uint8_t *buf, int buf_size){
       }
 
         ptr= decode_nal(h, buf + buf_index, &dst_length, &consumed, h->is_avc ? nalsize : buf_size - buf_index);
-        if(ptr[dst_length - 1] == 0) dst_length--;
+        while(ptr[dst_length - 1] == 0 && dst_length > 1)
+            dst_length--;
         bit_length= 8*dst_length - decode_rbsp_trailing(ptr + dst_length - 1);
 
         if(s->avctx->debug&FF_DEBUG_STARTCODE){
@@ -7559,6 +8115,7 @@ static int decode_nal_units(H264Context *h, uint8_t *buf, int buf_size){
                 av_log(h->s.avctx, AV_LOG_ERROR, "decode_slice_header error\n");
                 break;
             }
+            s->current_picture_ptr->key_frame= (h->nal_unit_type == NAL_IDR_SLICE);
             if(h->redundant_pic_count==0 && s->hurry_up < 5
                && (avctx->skip_frame < AVDISCARD_NONREF || h->nal_ref_idc)
                && (avctx->skip_frame < AVDISCARD_BIDIR  || h->slice_type!=B_TYPE)
@@ -7628,7 +8185,6 @@ static int decode_nal_units(H264Context *h, uint8_t *buf, int buf_size){
 
     s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264;
     s->current_picture_ptr->pict_type= s->pict_type;
-    s->current_picture_ptr->key_frame= s->pict_type == I_TYPE && h->nal_unit_type == NAL_IDR_SLICE;
 
     h->prev_frame_num_offset= h->frame_num_offset;
     h->prev_frame_num= h->frame_num;
@@ -7754,12 +8310,7 @@ static int decode_frame(AVCodecContext *avctx,
         /* Sort B-frames into display order */
         Picture *cur = s->current_picture_ptr;
         Picture *prev = h->delayed_output_pic;
-        int out_idx = 0;
-        int pics = 0;
-        int out_of_order;
-        int cross_idr = 0;
-        int dropped_frame = 0;
-        int i;
+        int i, pics, cross_idr, out_of_order, out_idx;
 
         if(h->sps.bitstream_restriction_flag
            && s->avctx->has_b_frames < h->sps.num_reorder_frames){
@@ -7767,16 +8318,19 @@ static int decode_frame(AVCodecContext *avctx,
             s->low_delay = 0;
         }
 
+        pics = 0;
         while(h->delayed_pic[pics]) pics++;
         h->delayed_pic[pics++] = cur;
         if(cur->reference == 0)
             cur->reference = 1;
 
+        cross_idr = 0;
         for(i=0; h->delayed_pic[i]; i++)
             if(h->delayed_pic[i]->key_frame || h->delayed_pic[i]->poc==0)
                 cross_idr = 1;
 
         out = h->delayed_pic[0];
+        out_idx = 0;
         for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame; i++)
             if(h->delayed_pic[i]->poc < out->poc){
                 out = h->delayed_pic[i];
@@ -7801,12 +8355,11 @@ static int decode_frame(AVCodecContext *avctx,
             out = prev;
 
         if(out_of_order || pics > s->avctx->has_b_frames){
-            dropped_frame = (out != h->delayed_pic[out_idx]);
             for(i=out_idx; h->delayed_pic[i]; i++)
                 h->delayed_pic[i] = h->delayed_pic[i+1];
         }
 
-        if(prev == out && !dropped_frame)
+        if(prev == out)
             *data_size = 0;
         else
             *data_size = sizeof(AVFrame);
@@ -8054,6 +8607,7 @@ AVCodec h264_decoder = {
     .flush= flush_dpb,
 };
 
+#ifdef CONFIG_H264_PARSER
 AVCodecParser h264_parser = {
     { CODEC_ID_H264 },
     sizeof(H264Context),
@@ -8062,5 +8616,6 @@ AVCodecParser h264_parser = {
     ff_parse_close,
     h264_split,
 };
+#endif
 
 #include "svq3.c"