]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/vc1.c
optimize IDCT of rows with mostly zero coefficients
[ffmpeg] / libavcodec / vc1.c
index 5c6d5f6f87d2ea3ea11a1ff1c3319818265d419a..5649484a9fecdb64e9c9ea70f94ac28d8ff0647d 100644 (file)
@@ -3,18 +3,20 @@
  * Copyright (c) 2006 Konstantin Shishkov
  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
  *
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
  *
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  *
  */
@@ -332,6 +334,7 @@ typedef struct VC1Context{
     int dmb_is_raw;               ///< direct mb plane is raw
     int skip_is_raw;              ///< skip mb plane is not coded
     uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
+    int use_ic;                   ///< use intensity compensation in B-frames
     int rnd;                      ///< rounding control
 
     /** Frame decoding info for S/M profiles only */
@@ -787,6 +790,10 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
     }
     uvmx = (mx + ((mx & 3) == 3)) >> 1;
     uvmy = (my + ((my & 3) == 3)) >> 1;
+    if(v->fastuvmc) {
+        uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
+        uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
+    }
     if(!dir) {
         srcY = s->last_picture.data[0];
         srcU = s->last_picture.data[1];
@@ -875,11 +882,6 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
         srcY += s->mspel * (1 + s->linesize);
     }
 
-    if(v->fastuvmc) {
-        uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
-        uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
-    }
-
     if(s->mspel) {
         dxy = ((my & 3) << 2) | (mx & 3);
         dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0]    , srcY    , s->linesize, v->rnd);
@@ -1049,6 +1051,10 @@ static void vc1_mc_4mv_chroma(VC1Context *v)
     s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
     uvmx = (tx + ((tx&3) == 3)) >> 1;
     uvmy = (ty + ((ty&3) == 3)) >> 1;
+    if(v->fastuvmc) {
+        uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
+        uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
+    }
 
     uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
     uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
@@ -1099,11 +1105,6 @@ static void vc1_mc_4mv_chroma(VC1Context *v)
         }
     }
 
-    if(v->fastuvmc) {
-        uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
-        uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
-    }
-
     /* Chroma MC always uses qpel bilinear */
     uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
     uvmx = (uvmx&3)<<1;
@@ -1267,9 +1268,23 @@ static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
     v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
     v->broadcast = get_bits1(gb);
     v->interlace = get_bits1(gb);
+    if(v->interlace){
+        av_log(v->s.avctx, AV_LOG_ERROR, "Interlaced mode not supported (yet)\n");
+        return -1;
+    }
     v->tfcntrflag = get_bits1(gb);
     v->finterpflag = get_bits1(gb);
     get_bits1(gb); // reserved
+
+    av_log(v->s.avctx, AV_LOG_DEBUG,
+               "Advanced Profile level %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
+               "LoopFilter=%i, ChromaFormat=%i, Pulldown=%i, Interlace: %i\n"
+               "TFCTRflag=%i, FINTERPflag=%i\n",
+               v->level, v->frmrtq_postproc, v->bitrtq_postproc,
+               v->s.loop_filter, v->chromaformat, v->broadcast, v->interlace,
+               v->tfcntrflag, v->finterpflag
+               );
+
     v->psf = get_bits1(gb);
     if(v->psf) { //PsF, 6.1.13
         av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
@@ -1278,15 +1293,17 @@ static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
     if(get_bits1(gb)) { //Display Info - decoding is not affected by it
         int w, h, ar = 0;
         av_log(v->s.avctx, AV_LOG_INFO, "Display extended info:\n");
-        w = get_bits(gb, 14);
-        h = get_bits(gb, 14);
+        w = get_bits(gb, 14) + 1;
+        h = get_bits(gb, 14) + 1;
         av_log(v->s.avctx, AV_LOG_INFO, "Display dimensions: %ix%i\n", w, h);
-        //TODO: store aspect ratio in AVCodecContext
         if(get_bits1(gb))
             ar = get_bits(gb, 4);
-        if(ar == 15) {
+        if(ar && ar < 14){
+            v->s.avctx->sample_aspect_ratio = vc1_pixel_aspect[ar];
+        }else if(ar == 15){
             w = get_bits(gb, 8);
             h = get_bits(gb, 8);
+            v->s.avctx->sample_aspect_ratio = (AVRational){w, h};
         }
 
         if(get_bits1(gb)){ //framerate stuff
@@ -1322,13 +1339,13 @@ static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
 static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
 {
     VC1Context *v = avctx->priv_data;
-    int i;
+    int i, blink, refdist;
 
     av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
-    get_bits1(gb); // broken link
-    get_bits1(gb); // closed entry
+    blink = get_bits1(gb); // broken link
+    avctx->max_b_frames = 1 - get_bits1(gb); // 'closed entry' also signalize possible B-frames
     v->panscanflag = get_bits1(gb);
-    get_bits1(gb); // refdist flag
+    refdist = get_bits1(gb); // refdist flag
     v->s.loop_filter = get_bits1(gb);
     v->fastuvmc = get_bits1(gb);
     v->extended_mv = get_bits1(gb);
@@ -1358,6 +1375,13 @@ static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
         skip_bits(gb, 3); // UV range, ignored for now
     }
 
+    av_log(avctx, AV_LOG_DEBUG, "Entry point info:\n"
+        "BrokenLink=%i, ClosedEntry=%i, PanscanFlag=%i\n"
+        "RefDist=%i, Postproc=%i, FastUVMC=%i, ExtMV=%i\n"
+        "DQuant=%i, VSTransform=%i, Overlap=%i, Qmode=%i\n",
+        blink, 1 - avctx->max_b_frames, v->panscanflag, refdist, v->s.loop_filter,
+        v->fastuvmc, v->extended_mv, v->dquant, v->vstransform, v->overlap, v->quantizer_mode);
+
     return 0;
 }
 
@@ -1427,6 +1451,8 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
 //av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
 //        (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
 
+    if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
+
     switch(v->s.pict_type) {
     case P_TYPE:
         if (v->pq < 5) v->tt_index = 0;
@@ -1441,6 +1467,7 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
             v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
             v->lumscale = get_bits(gb, 6);
             v->lumshift = get_bits(gb, 6);
+            v->use_ic = 1;
             /* fill lookup tables for intensity compensation */
             if(!v->lumscale) {
                 scale = -64;
@@ -1585,15 +1612,13 @@ static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
         break;
     case 1:
         v->s.pict_type = B_TYPE;
-        return -1;
-//      break;
+        break;
     case 2:
         v->s.pict_type = I_TYPE;
         break;
     case 3:
         v->s.pict_type = BI_TYPE;
-        return -1;
-//      break;
+        break;
     case 4:
         v->s.pict_type = P_TYPE; // skipped pic
         v->p_frame_skipped = 1;
@@ -1616,6 +1641,13 @@ static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
     if(v->interlace)
         v->uvsamp = get_bits1(gb);
     if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
+    if(v->s.pict_type == B_TYPE) {
+        v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
+        v->bfraction = vc1_bfraction_lut[v->bfraction];
+        if(v->bfraction == 0) {
+            v->s.pict_type = BI_TYPE; /* XXX: should not happen here */
+        }
+    }
     pqindex = get_bits(gb, 5);
     v->pqindex = pqindex;
     if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
@@ -1636,6 +1668,7 @@ static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
 
     switch(v->s.pict_type) {
     case I_TYPE:
+    case BI_TYPE:
         status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
         if (status < 0) return -1;
         av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
@@ -1741,6 +1774,56 @@ static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
             v->ttfrm = TT_8X8;
         }
         break;
+    case B_TYPE:
+        if(v->postprocflag)
+            v->postproc = get_bits1(gb);
+        if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
+        else v->mvrange = 0;
+        v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
+        v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
+        v->range_x = 1 << (v->k_x - 1);
+        v->range_y = 1 << (v->k_y - 1);
+
+        if (v->pq < 5) v->tt_index = 0;
+        else if(v->pq < 13) v->tt_index = 1;
+        else v->tt_index = 2;
+
+        lowquant = (v->pq > 12) ? 0 : 1;
+        v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
+        v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
+        v->s.mspel = v->s.quarter_sample;
+
+        status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
+        if (status < 0) return -1;
+        av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
+               "Imode: %i, Invert: %i\n", status>>1, status&1);
+        status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
+        if (status < 0) return -1;
+        av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
+               "Imode: %i, Invert: %i\n", status>>1, status&1);
+
+        v->s.mv_table_index = get_bits(gb, 2);
+        v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
+
+        if (v->dquant)
+        {
+            av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+            vop_dquant_decoding(v);
+        }
+
+        v->ttfrm = 0;
+        if (v->vstransform)
+        {
+            v->ttmbf = get_bits(gb, 1);
+            if (v->ttmbf)
+            {
+                v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
+            }
+        } else {
+            v->ttmbf = 1;
+            v->ttfrm = TT_8X8;
+        }
+        break;
     }
 
     /* AC Syntax */
@@ -1751,11 +1834,16 @@ static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
     }
     /* DC Syntax */
     v->s.dc_table_index = get_bits(gb, 1);
-    if (v->s.pict_type == I_TYPE && v->dquant) {
+    if ((v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE) && v->dquant) {
         av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
         vop_dquant_decoding(v);
     }
 
+    v->bi_type = 0;
+    if(v->s.pict_type == BI_TYPE) {
+        v->s.pict_type = B_TYPE;
+        v->bi_type = 1;
+    }
     return 0;
 }
 
@@ -1939,9 +2027,9 @@ static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, i
     /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
     if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
         if(is_intra[xy - wrap])
-            sum = ABS(px) + ABS(py);
+            sum = FFABS(px) + FFABS(py);
         else
-            sum = ABS(px - A[0]) + ABS(py - A[1]);
+            sum = FFABS(px - A[0]) + FFABS(py - A[1]);
         if(sum > 32) {
             if(get_bits1(&s->gb)) {
                 px = A[0];
@@ -1952,9 +2040,9 @@ static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, i
             }
         } else {
             if(is_intra[xy - 1])
-                sum = ABS(px) + ABS(py);
+                sum = FFABS(px) + FFABS(py);
             else
-                sum = ABS(px - C[0]) + ABS(py - C[1]);
+                sum = FFABS(px - C[0]) + FFABS(py - C[1]);
             if(sum > 32) {
                 if(get_bits1(&s->gb)) {
                     px = A[0];
@@ -1994,6 +2082,10 @@ static void vc1_interp_mc(VC1Context *v)
     my = s->mv[1][0][1];
     uvmx = (mx + ((mx & 3) == 3)) >> 1;
     uvmy = (my + ((my & 3) == 3)) >> 1;
+    if(v->fastuvmc) {
+        uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
+        uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
+    }
     srcY = s->next_picture.data[0];
     srcU = s->next_picture.data[1];
     srcV = s->next_picture.data[2];
@@ -2023,7 +2115,8 @@ static void vc1_interp_mc(VC1Context *v)
        || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
         uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
 
-        ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17, 17,
+        srcY -= s->mspel * (1 + s->linesize);
+        ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
                             src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
         srcY = s->edge_emu_buffer;
         ff_emulated_edge_mc(uvbuf     , srcU, s->uvlinesize, 8+1, 8+1,
@@ -2038,8 +2131,8 @@ static void vc1_interp_mc(VC1Context *v)
             uint8_t *src, *src2;
 
             src = srcY;
-            for(j = 0; j < 17; j++) {
-                for(i = 0; i < 17; i++) src[i] = ((src[i] - 128) >> 1) + 128;
+            for(j = 0; j < 17 + s->mspel*2; j++) {
+                for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
                 src += s->linesize;
             }
             src = srcU; src2 = srcV;
@@ -2052,11 +2145,7 @@ static void vc1_interp_mc(VC1Context *v)
                 src2 += s->uvlinesize;
             }
         }
-    }
-
-    if(v->fastuvmc) {
-        uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
-        uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
+        srcY += s->mspel * (1 + s->linesize);
     }
 
     mx >>= 1;
@@ -2068,11 +2157,13 @@ static void vc1_interp_mc(VC1Context *v)
     if(s->flags & CODEC_FLAG_GRAY) return;
     /* Chroma MC always uses qpel blilinear */
     uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
-    dsp->avg_qpel_pixels_tab[1][uvdxy](s->dest[1], srcU, s->uvlinesize);
-    dsp->avg_qpel_pixels_tab[1][uvdxy](s->dest[2], srcV, s->uvlinesize);
+    uvmx = (uvmx&3)<<1;
+    uvmy = (uvmy&3)<<1;
+    dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
+    dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
 }
 
-static always_inline int scale_mv(int value, int bfrac, int inv, int qs)
+static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
 {
     int n = bfrac;
 
@@ -2095,18 +2186,26 @@ static always_inline int scale_mv(int value, int bfrac, int inv, int qs)
  */
 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
 {
+    if(v->use_ic) {
+        v->mv_mode2 = v->mv_mode;
+        v->mv_mode = MV_PMODE_INTENSITY_COMP;
+    }
     if(direct) {
         vc1_mc_1mv(v, 0);
         vc1_interp_mc(v);
+        if(v->use_ic) v->mv_mode = v->mv_mode2;
         return;
     }
     if(mode == BMV_TYPE_INTERPOLATED) {
         vc1_mc_1mv(v, 0);
         vc1_interp_mc(v);
+        if(v->use_ic) v->mv_mode = v->mv_mode2;
         return;
     }
 
-    vc1_mc_1mv(v, (mode == BMV_TYPE_FORWARD));
+    if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
+    vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
+    if(v->use_ic) v->mv_mode = v->mv_mode2;
 }
 
 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
@@ -2137,10 +2236,10 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int
         s->current_picture.motion_val[1][xy][1] = 0;
         return;
     }
-    s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
-    s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
-    s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
-    s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
+    s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
+    s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
+    s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
+    s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
     if(direct) {
         s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
         s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
@@ -2149,7 +2248,7 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int
         return;
     }
 
-    if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
+    if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
         C = s->current_picture.motion_val[0][xy - 2];
         A = s->current_picture.motion_val[0][xy - wrap*2];
         off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
@@ -2195,9 +2294,9 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int
         /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
         if(0 && !s->first_slice_line && s->mb_x) {
             if(is_intra[xy - wrap])
-                sum = ABS(px) + ABS(py);
+                sum = FFABS(px) + FFABS(py);
             else
-                sum = ABS(px - A[0]) + ABS(py - A[1]);
+                sum = FFABS(px - A[0]) + FFABS(py - A[1]);
             if(sum > 32) {
                 if(get_bits1(&s->gb)) {
                     px = A[0];
@@ -2208,9 +2307,9 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int
                 }
             } else {
                 if(is_intra[xy - 2])
-                    sum = ABS(px) + ABS(py);
+                    sum = FFABS(px) + FFABS(py);
                 else
-                    sum = ABS(px - C[0]) + ABS(py - C[1]);
+                    sum = FFABS(px - C[0]) + FFABS(py - C[1]);
                 if(sum > 32) {
                     if(get_bits1(&s->gb)) {
                         px = A[0];
@@ -2226,7 +2325,7 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int
         s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
         s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
     }
-    if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
+    if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
         C = s->current_picture.motion_val[1][xy - 2];
         A = s->current_picture.motion_val[1][xy - wrap*2];
         off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
@@ -2272,9 +2371,9 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int
         /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
         if(0 && !s->first_slice_line && s->mb_x) {
             if(is_intra[xy - wrap])
-                sum = ABS(px) + ABS(py);
+                sum = FFABS(px) + FFABS(py);
             else
-                sum = ABS(px - A[0]) + ABS(py - A[1]);
+                sum = FFABS(px - A[0]) + FFABS(py - A[1]);
             if(sum > 32) {
                 if(get_bits1(&s->gb)) {
                     px = A[0];
@@ -2285,9 +2384,9 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int
                 }
             } else {
                 if(is_intra[xy - 2])
-                    sum = ABS(px) + ABS(py);
+                    sum = FFABS(px) + FFABS(py);
                 else
-                    sum = ABS(px - C[0]) + ABS(py - C[1]);
+                    sum = FFABS(px - C[0]) + FFABS(py - C[1]);
                 if(sum > 32) {
                     if(get_bits1(&s->gb)) {
                         px = A[0];
@@ -2820,8 +2919,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int c
         if(use_pred) {
             /* scale predictors if needed*/
             if(q2 && q1!=q2) {
-                q1 = q1 * 2 - 1;
-                q2 = q2 * 2 - 1;
+                q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+                q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
 
                 if(dc_pred_dir) { //left
                     for(k = 1; k < 8; k++)
@@ -2863,8 +2962,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int c
             if(use_pred) {
                 memcpy(ac_val2, ac_val, 8 * 2);
                 if(q2 && q1!=q2) {
-                    q1 = q1 * 2 - 1;
-                    q2 = q2 * 2 - 1;
+                    q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+                    q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
                     for(k = 1; k < 8; k++)
                         ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
                 }
@@ -2873,8 +2972,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int c
             if(use_pred) {
                 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
                 if(q2 && q1!=q2) {
-                    q1 = q1 * 2 - 1;
-                    q2 = q2 * 2 - 1;
+                    q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+                    q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
                     for(k = 1; k < 8; k++)
                         ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
                 }
@@ -2996,8 +3095,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int c
         ac_val -= 16 * s->block_wrap[n];
 
     q1 = s->current_picture.qscale_table[mb_pos];
-    if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
-    if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
+    if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
+    if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
     if(n && n<4) q2 = q1;
 
     if(coded) {
@@ -3019,8 +3118,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int c
         if(use_pred) {
             /* scale predictors if needed*/
             if(q2 && q1!=q2) {
-                q1 = q1 * 2 - 1;
-                q2 = q2 * 2 - 1;
+                q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+                q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
 
                 if(dc_pred_dir) { //left
                     for(k = 1; k < 8; k++)
@@ -3062,8 +3161,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int c
             if(use_pred) {
                 memcpy(ac_val2, ac_val, 8 * 2);
                 if(q2 && q1!=q2) {
-                    q1 = q1 * 2 - 1;
-                    q2 = q2 * 2 - 1;
+                    q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+                    q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
                     for(k = 1; k < 8; k++)
                         ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
                 }
@@ -3072,8 +3171,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int c
             if(use_pred) {
                 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
                 if(q2 && q1!=q2) {
-                    q1 = q1 * 2 - 1;
-                    q2 = q2 * 2 - 1;
+                    q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+                    q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
                     for(k = 1; k < 8; k++)
                         ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
                 }
@@ -3317,10 +3416,10 @@ static int vc1_decode_p_mb(VC1Context *v)
                     for(j = 0; j < 64; j++) s->block[i][j] += 128;
                     s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
                     if(v->pq >= 9 && v->overlap) {
-                        if(v->a_avail)
-                            s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
                         if(v->c_avail)
-                            s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
+                            s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
+                        if(v->a_avail)
+                            s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
                     }
                 } else if(val) {
                     vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
@@ -3420,10 +3519,10 @@ static int vc1_decode_p_mb(VC1Context *v)
                     for(j = 0; j < 64; j++) s->block[i][j] += 128;
                     s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
                     if(v->pq >= 9 && v->overlap) {
-                        if(v->a_avail)
-                            s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
                         if(v->c_avail)
-                            s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
+                            s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
+                        if(v->a_avail)
+                            s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
                     }
                 } else if(is_coded[i]) {
                     status = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
@@ -3518,7 +3617,7 @@ static void vc1_decode_b_mb(VC1Context *v)
                 break;
             case 2:
                 bmvtype = BMV_TYPE_INTERPOLATED;
-                dmv_x[1] = dmv_y[1] = 0;
+                dmv_x[0] = dmv_y[0] = 0;
             }
         }
     }
@@ -3557,7 +3656,7 @@ static void vc1_decode_b_mb(VC1Context *v)
             vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
         } else {
             if(bmvtype == BMV_TYPE_INTERPOLATED) {
-                GET_MVDATA(dmv_x[1], dmv_y[1]);
+                GET_MVDATA(dmv_x[0], dmv_y[0]);
                 if(!mb_has_coeffs) {
                     /* interpolated skipped block */
                     vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
@@ -3662,6 +3761,8 @@ static void vc1_decode_i_blocks(VC1Context *v)
             mb_pos = s->mb_x + s->mb_y * s->mb_width;
             s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
             s->current_picture.qscale_table[mb_pos] = v->pq;
+            s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+            s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
 
             // do actual MB decoding and displaying
             cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
@@ -3687,26 +3788,26 @@ static void vc1_decode_i_blocks(VC1Context *v)
 
             vc1_put_block(v, s->block);
             if(v->pq >= 9 && v->overlap) {
-                if(!s->first_slice_line) {
-                    s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
-                    s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
+                if(s->mb_x) {
+                    s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
+                    s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
                     if(!(s->flags & CODEC_FLAG_GRAY)) {
-                        s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
-                        s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
+                        s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
+                        s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
                     }
                 }
-                s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
-                s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
-                if(s->mb_x) {
-                    s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
-                    s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
+                s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
+                s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
+                if(!s->first_slice_line) {
+                    s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
+                    s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
                     if(!(s->flags & CODEC_FLAG_GRAY)) {
-                        s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
-                        s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
+                        s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
+                        s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
                     }
                 }
-                s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
-                s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
+                s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
+                s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
             }
 
             if(get_bits_count(&s->gb) > v->bits) {
@@ -3758,10 +3859,6 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
         break;
     }
 
-    /* Set DC scale - y and c use the same */
-    s->y_dc_scale = s->y_dc_scale_table[v->pq];
-    s->c_dc_scale = s->c_dc_scale_table[v->pq];
-
     //do frame decode
     s->mb_x = s->mb_y = 0;
     s->mb_intra = 1;
@@ -3774,6 +3871,8 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
             s->dsp.clear_blocks(s->block[0]);
             mb_pos = s->mb_x + s->mb_y * s->mb_stride;
             s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
+            s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+            s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
 
             // do actual MB decoding and displaying
             cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
@@ -3793,6 +3892,9 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
             GET_MQUANT();
 
             s->current_picture.qscale_table[mb_pos] = mquant;
+            /* Set DC scale - y and c use the same */
+            s->y_dc_scale = s->y_dc_scale_table[mquant];
+            s->c_dc_scale = s->c_dc_scale_table[mquant];
 
             for(k = 0; k < 6; k++) {
                 val = ((cbp >> (5 - k)) & 1);
@@ -3815,26 +3917,26 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
 
             vc1_put_block(v, s->block);
             if(overlap) {
-                if(!s->first_slice_line) {
-                    s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
-                    s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
+                if(s->mb_x) {
+                    s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
+                    s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
                     if(!(s->flags & CODEC_FLAG_GRAY)) {
-                        s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
-                        s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
+                        s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
+                        s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
                     }
                 }
-                s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
-                s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
-                if(s->mb_x) {
-                    s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
-                    s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
+                s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
+                s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
+                if(!s->first_slice_line) {
+                    s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
+                    s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
                     if(!(s->flags & CODEC_FLAG_GRAY)) {
-                        s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
-                        s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
+                        s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
+                        s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
                     }
                 }
-                s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
-                s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
+                s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
+                s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
             }
 
             if(get_bits_count(&s->gb) > v->bits) {
@@ -3981,9 +4083,12 @@ static void vc1_decode_blocks(VC1Context *v)
             vc1_decode_p_blocks(v);
         break;
     case B_TYPE:
-        if(v->bi_type)
-            vc1_decode_i_blocks(v);
-        else
+        if(v->bi_type){
+            if(v->profile == PROFILE_ADVANCED)
+                vc1_decode_i_blocks_adv(v);
+            else
+                vc1_decode_i_blocks(v);
+        }else
             vc1_decode_b_blocks(v);
         break;
     }
@@ -4049,7 +4154,7 @@ static int vc1_decode_init(AVCodecContext *avctx)
         }
         while(edata_size > 8) {
             // test if we've found header
-            if(BE_32(edata) == 0x0000010F) {
+            if(AV_RB32(edata) == 0x0000010F) {
                 edata += 4;
                 edata_size -= 4;
                 break;
@@ -4065,7 +4170,7 @@ static int vc1_decode_init(AVCodecContext *avctx)
 
         while(edata_size > 8) {
             // test if we've found entry point
-            if(BE_32(edata) == 0x0000010E) {
+            if(AV_RB32(edata) == 0x0000010E) {
                 edata += 4;
                 edata_size -= 4;
                 break;
@@ -4080,6 +4185,7 @@ static int vc1_decode_init(AVCodecContext *avctx)
           return -1;
     }
     avctx->has_b_frames= !!(avctx->max_b_frames);
+    s->low_delay = !avctx->has_b_frames;
 
     s->mb_width = (avctx->coded_width+15)>>4;
     s->mb_height = (avctx->coded_height+15)>>4;
@@ -4140,8 +4246,6 @@ static int vc1_decode_frame(AVCodecContext *avctx,
         s->current_picture_ptr= &s->picture[i];
     }
 
-    avctx->has_b_frames= !s->low_delay;
-
     //for advanced profile we need to unescape buffer
     if (avctx->codec_id == CODEC_ID_VC1) {
         int i, buf_size2;
@@ -4160,18 +4264,18 @@ static int vc1_decode_frame(AVCodecContext *avctx,
     // do parse frame header
     if(v->profile < PROFILE_ADVANCED) {
         if(vc1_parse_frame_header(v, &s->gb) == -1) {
-            if(buf2)av_free(buf2);
+            av_free(buf2);
             return -1;
         }
     } else {
         if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
-            if(buf2)av_free(buf2);
+            av_free(buf2);
             return -1;
         }
     }
 
     if(s->pict_type != I_TYPE && !v->res_rtm_flag){
-        if(buf2)av_free(buf2);
+        av_free(buf2);
         return -1;
     }
 
@@ -4181,7 +4285,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
 
     /* skip B-frames if we don't have reference frames */
     if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){
-        if(buf2)av_free(buf2);
+        av_free(buf2);
         return -1;//buf_size;
     }
     /* skip b frames if we are in a hurry */
@@ -4189,12 +4293,12 @@ static int vc1_decode_frame(AVCodecContext *avctx,
     if(   (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
        || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
        ||  avctx->skip_frame >= AVDISCARD_ALL) {
-        if(buf2)av_free(buf2);
+        av_free(buf2);
         return buf_size;
     }
     /* skip everything if we are in a hurry>=5 */
     if(avctx->hurry_up>=5) {
-        if(buf2)av_free(buf2);
+        av_free(buf2);
         return -1;//buf_size;
     }
 
@@ -4206,7 +4310,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
     }
 
     if(MPV_frame_start(s, avctx) < 0) {
-        if(buf2)av_free(buf2);
+        av_free(buf2);
         return -1;
     }
 
@@ -4238,7 +4342,7 @@ assert(s->current_picture.pict_type == s->pict_type);
     /* we substract 1 because it is added on utils.c    */
     avctx->frame_number = s->picture_number - 1;
 
-    if(buf2)av_free(buf2);
+    av_free(buf2);
     return buf_size;
 }