]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/vp3.c
x86: h264qpel: Move stray comment to the right spot and clarify it
[ffmpeg] / libavcodec / vp3.c
index 44f0986baa3276cc787a791b2f4cb5ac8d17bb89..f46ac32a9aa092cafc0dbe767b0cb074885e59b2 100644 (file)
@@ -1,25 +1,25 @@
 /*
  * Copyright (C) 2003-2004 the ffmpeg project
  *
- * This file is part of FFmpeg.
+ * This file is part of Libav.
  *
- * FFmpeg is free software; you can redistribute it and/or
+ * Libav is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
  * version 2.1 of the License, or (at your option) any later version.
  *
- * FFmpeg is distributed in the hope that it will be useful,
+ * Libav is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
+ * License along with Libav; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
 /**
- * @file libavcodec/vp3.c
+ * @file
  * On2 VP3 Video Decoder
  *
  * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
 #include <stdlib.h>
 #include <string.h>
 
+#include "libavutil/imgutils.h"
 #include "avcodec.h"
+#include "internal.h"
 #include "dsputil.h"
 #include "get_bits.h"
-
+#include "videodsp.h"
 #include "vp3data.h"
+#include "vp3dsp.h"
 #include "xiph.h"
+#include "thread.h"
 
 #define FRAGMENT_PIXELS 8
 
-static av_cold int vp3_decode_end(AVCodecContext *avctx);
-
 //FIXME split things out into their own arrays
 typedef struct Vp3Fragment {
     int16_t dc;
     uint8_t coding_method;
-    int8_t motion_x;
-    int8_t motion_y;
     uint8_t qpi;
 } Vp3Fragment;
 
@@ -130,13 +130,18 @@ typedef struct Vp3DecodeContext {
     int theora, theora_tables;
     int version;
     int width, height;
+    int chroma_x_shift, chroma_y_shift;
     AVFrame golden_frame;
     AVFrame last_frame;
     AVFrame current_frame;
     int keyframe;
     DSPContext dsp;
+    VideoDSPContext vdsp;
+    VP3DSPContext vp3dsp;
+    DECLARE_ALIGNED(16, int16_t, block)[64];
     int flipped_image;
     int last_slice_end;
+    int skip_loop_filter;
 
     int qps[3];
     int nqps;
@@ -158,13 +163,15 @@ typedef struct Vp3DecodeContext {
     int macroblock_height;
 
     int fragment_count;
-    int fragment_width;
-    int fragment_height;
+    int fragment_width[2];
+    int fragment_height[2];
 
     Vp3Fragment *all_fragments;
     int fragment_start[3];
     int data_offset[3];
 
+    int8_t (*motion_val[2])[2];
+
     ScanTable scantable;
 
     /* tables */
@@ -221,7 +228,7 @@ typedef struct Vp3DecodeContext {
 
     /* these arrays need to be on 16-byte boundaries since SSE2 operations
      * index into them */
-    DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64];     //<qmat[qpi][is_inter][plane]
+    DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64];     ///< qmat[qpi][is_inter][plane]
 
     /* This table contains superblock_count * 16 entries. Each set of 16
      * numbers corresponds to the fragment indexes 0..15 of the superblock.
@@ -233,15 +240,14 @@ typedef struct Vp3DecodeContext {
      * is coded. */
     unsigned char *macroblock_coding;
 
-    uint8_t edge_emu_buffer[9*2048]; //FIXME dynamic alloc
-    int8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16
+    uint8_t *edge_emu_buffer;
 
     /* Huffman decode */
     int hti;
     unsigned int hbits;
     int entries;
     int huff_code_size;
-    uint16_t huffman_table[80][32][2];
+    uint32_t huffman_table[80][32][2];
 
     uint8_t filter_limit_values[64];
     DECLARE_ALIGNED(8, int, bounding_values_array)[256+2];
@@ -251,133 +257,95 @@ typedef struct Vp3DecodeContext {
  * VP3 specific functions
  ************************************************************************/
 
-/*
- * This function sets up all of the various blocks mappings:
- * superblocks <-> fragments, macroblocks <-> fragments,
- * superblocks <-> macroblocks
- *
- * Returns 0 is successful; returns 1 if *anything* went wrong.
- */
-static int init_block_mapping(Vp3DecodeContext *s)
+static void vp3_decode_flush(AVCodecContext *avctx)
 {
-    int i, j;
-    signed int hilbert_walk_mb[4];
-
-    int current_fragment = 0;
-    int current_width = 0;
-    int current_height = 0;
-    int right_edge = 0;
-    int bottom_edge = 0;
-    int superblock_row_inc = 0;
-    int mapping_index = 0;
-
-    static const signed char travel_width[16] = {
-         1,  1,  0, -1,
-         0,  0,  1,  0,
-         1,  0,  1,  0,
-         0, -1,  0,  1
-    };
-
-    static const signed char travel_height[16] = {
-         0,  0,  1,  0,
-         1,  1,  0, -1,
-         0,  1,  0, -1,
-        -1,  0, -1,  0
-    };
+    Vp3DecodeContext *s = avctx->priv_data;
 
-    hilbert_walk_mb[0] = 1;
-    hilbert_walk_mb[1] = s->macroblock_width;
-    hilbert_walk_mb[2] = 1;
-    hilbert_walk_mb[3] = -s->macroblock_width;
-
-    /* iterate through each superblock (all planes) and map the fragments */
-    for (i = 0; i < s->superblock_count; i++) {
-        /* time to re-assign the limits? */
-        if (i == 0) {
-
-            /* start of Y superblocks */
-            right_edge = s->fragment_width;
-            bottom_edge = s->fragment_height;
-            current_width = -1;
-            current_height = 0;
-            superblock_row_inc = 3 * s->fragment_width -
-                (s->y_superblock_width * 4 - s->fragment_width);
-
-            /* the first operation for this variable is to advance by 1 */
-            current_fragment = -1;
-
-        } else if (i == s->u_superblock_start) {
-
-            /* start of U superblocks */
-            right_edge = s->fragment_width / 2;
-            bottom_edge = s->fragment_height / 2;
-            current_width = -1;
-            current_height = 0;
-            superblock_row_inc = 3 * (s->fragment_width / 2) -
-                (s->c_superblock_width * 4 - s->fragment_width / 2);
-
-            /* the first operation for this variable is to advance by 1 */
-            current_fragment = s->fragment_start[1] - 1;
-
-        } else if (i == s->v_superblock_start) {
-
-            /* start of V superblocks */
-            right_edge = s->fragment_width / 2;
-            bottom_edge = s->fragment_height / 2;
-            current_width = -1;
-            current_height = 0;
-            superblock_row_inc = 3 * (s->fragment_width / 2) -
-                (s->c_superblock_width * 4 - s->fragment_width / 2);
-
-            /* the first operation for this variable is to advance by 1 */
-            current_fragment = s->fragment_start[2] - 1;
+    if (s->golden_frame.data[0]) {
+        if (s->golden_frame.data[0] == s->last_frame.data[0])
+            memset(&s->last_frame, 0, sizeof(AVFrame));
+        if (s->current_frame.data[0] == s->golden_frame.data[0])
+            memset(&s->current_frame, 0, sizeof(AVFrame));
+        ff_thread_release_buffer(avctx, &s->golden_frame);
+    }
+    if (s->last_frame.data[0]) {
+        if (s->current_frame.data[0] == s->last_frame.data[0])
+            memset(&s->current_frame, 0, sizeof(AVFrame));
+        ff_thread_release_buffer(avctx, &s->last_frame);
+    }
+    if (s->current_frame.data[0])
+        ff_thread_release_buffer(avctx, &s->current_frame);
+}
 
-        }
+static av_cold int vp3_decode_end(AVCodecContext *avctx)
+{
+    Vp3DecodeContext *s = avctx->priv_data;
+    int i;
 
-        if (current_width >= right_edge - 1) {
-            /* reset width and move to next superblock row */
-            current_width = -1;
-            current_height += 4;
+    av_freep(&s->superblock_coding);
+    av_freep(&s->all_fragments);
+    av_freep(&s->coded_fragment_list[0]);
+    av_freep(&s->dct_tokens_base);
+    av_freep(&s->superblock_fragments);
+    av_freep(&s->macroblock_coding);
+    av_freep(&s->motion_val[0]);
+    av_freep(&s->motion_val[1]);
+    av_freep(&s->edge_emu_buffer);
+
+    if (avctx->internal->is_copy)
+        return 0;
 
-            /* fragment is now at the start of a new superblock row */
-            current_fragment += superblock_row_inc;
-        }
+    for (i = 0; i < 16; i++) {
+        ff_free_vlc(&s->dc_vlc[i]);
+        ff_free_vlc(&s->ac_vlc_1[i]);
+        ff_free_vlc(&s->ac_vlc_2[i]);
+        ff_free_vlc(&s->ac_vlc_3[i]);
+        ff_free_vlc(&s->ac_vlc_4[i]);
+    }
 
-        /* iterate through all 16 fragments in a superblock */
-        for (j = 0; j < 16; j++) {
-            current_fragment += travel_width[j] + right_edge * travel_height[j];
-            current_width += travel_width[j];
-            current_height += travel_height[j];
-
-            /* check if the fragment is in bounds */
-            if ((current_width < right_edge) &&
-                (current_height < bottom_edge)) {
-                s->superblock_fragments[mapping_index] = current_fragment;
-            } else {
-                s->superblock_fragments[mapping_index] = -1;
-            }
+    ff_free_vlc(&s->superblock_run_length_vlc);
+    ff_free_vlc(&s->fragment_run_length_vlc);
+    ff_free_vlc(&s->mode_code_vlc);
+    ff_free_vlc(&s->motion_vector_vlc);
 
-            mapping_index++;
-        }
-    }
+    /* release all frames */
+    vp3_decode_flush(avctx);
 
-    return 0;  /* successful path out */
+    return 0;
 }
 
 /*
- * This function wipes out all of the fragment data.
+ * This function sets up all of the various blocks mappings:
+ * superblocks <-> fragments, macroblocks <-> fragments,
+ * superblocks <-> macroblocks
+ *
+ * @return 0 is successful; returns 1 if *anything* went wrong.
  */
-static void init_frame(Vp3DecodeContext *s, GetBitContext *gb)
+static int init_block_mapping(Vp3DecodeContext *s)
 {
-    int i;
+    int sb_x, sb_y, plane;
+    int x, y, i, j = 0;
 
-    /* zero out all of the fragment information */
-    for (i = 0; i < s->fragment_count; i++) {
-        s->all_fragments[i].motion_x = 127;
-        s->all_fragments[i].motion_y = 127;
-        s->all_fragments[i].dc = 0;
-        s->all_fragments[i].qpi = 0;
+    for (plane = 0; plane < 3; plane++) {
+        int sb_width    = plane ? s->c_superblock_width  : s->y_superblock_width;
+        int sb_height   = plane ? s->c_superblock_height : s->y_superblock_height;
+        int frag_width  = s->fragment_width[!!plane];
+        int frag_height = s->fragment_height[!!plane];
+
+        for (sb_y = 0; sb_y < sb_height; sb_y++)
+            for (sb_x = 0; sb_x < sb_width; sb_x++)
+                for (i = 0; i < 16; i++) {
+                    x = 4*sb_x + hilbert_offset[i][0];
+                    y = 4*sb_y + hilbert_offset[i][1];
+
+                    if (x < frag_width && y < frag_height)
+                        s->superblock_fragments[j++] = s->fragment_start[plane] + y*frag_width + x;
+                    else
+                        s->superblock_fragments[j++] = -1;
+                }
     }
+
+    return 0;  /* successful path out */
 }
 
 /*
@@ -416,8 +384,6 @@ static void init_dequantizer(Vp3DecodeContext *s, int qpi)
             s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
         }
     }
-
-    memset(s->qscale_table, (FFMAX(s->qmat[0][0][0][1], s->qmat[0][0][1][1])+8)/16, 512); //FIXME finetune
 }
 
 /*
@@ -434,6 +400,7 @@ static void init_loop_filter(Vp3DecodeContext *s)
     int value;
 
     filter_limit = s->filter_limit_values[s->qps[0]];
+    assert(filter_limit < 128);
 
     /* set up the bounding values */
     memset(s->bounding_values_array, 0, 256 * sizeof(int));
@@ -472,8 +439,15 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
     } else {
 
         /* unpack the list of partially-coded superblocks */
-        bit = get_bits1(gb);
-        while (current_superblock < s->superblock_count) {
+        bit = get_bits1(gb) ^ 1;
+        current_run = 0;
+
+        while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
+            if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
+                bit = get_bits1(gb);
+            else
+                bit ^= 1;
+
                 current_run = get_vlc2(gb,
                     s->superblock_run_length_vlc.table, 6, 2) + 1;
                 if (current_run == 34)
@@ -489,11 +463,6 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
             current_superblock += current_run;
             if (bit)
                 num_partial_superblocks += current_run;
-
-            if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
-                bit = get_bits1(gb);
-            else
-                bit ^= 1;
         }
 
         /* unpack the list of fully coded superblocks if any of the blocks were
@@ -502,8 +471,17 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
             int superblocks_decoded = 0;
 
             current_superblock = 0;
-            bit = get_bits1(gb);
-            while (superblocks_decoded < s->superblock_count - num_partial_superblocks) {
+            bit = get_bits1(gb) ^ 1;
+            current_run = 0;
+
+            while (superblocks_decoded < s->superblock_count - num_partial_superblocks
+                   && get_bits_left(gb) > 0) {
+
+                if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
+                    bit = get_bits1(gb);
+                else
+                    bit ^= 1;
+
                         current_run = get_vlc2(gb,
                             s->superblock_run_length_vlc.table, 6, 2) + 1;
                         if (current_run == 34)
@@ -522,11 +500,6 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
                 }
                 }
                 superblocks_decoded += current_run;
-
-                if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
-                    bit = get_bits1(gb);
-                else
-                    bit ^= 1;
             }
         }
 
@@ -552,18 +525,13 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
         int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count);
         int num_coded_frags = 0;
 
-    for (i = sb_start; i < sb_end; i++) {
+    for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
 
         /* iterate through all 16 fragments in a superblock */
         for (j = 0; j < 16; j++) {
 
             /* if the fragment is in bounds, check its coding status */
             current_fragment = s->superblock_fragments[i * 16 + j];
-            if (current_fragment >= s->fragment_count) {
-                av_log(s->avctx, AV_LOG_ERROR, "  vp3:unpack_superblocks(): bad fragment number (%d >= %d)\n",
-                    current_fragment, s->fragment_count);
-                return 1;
-            }
             if (current_fragment != -1) {
                 int coded = s->superblock_coding[i];
 
@@ -616,6 +584,7 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
     int coding_mode;
     int custom_mode_alphabet[CODING_MODE_COUNT];
     const int *alphabet;
+    Vp3Fragment *frag;
 
     if (s->keyframe) {
         for (i = 0; i < s->fragment_count; i++)
@@ -640,6 +609,8 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
          * coded fragments */
         for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
             for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
+                if (get_bits_left(gb) <= 0)
+                    return -1;
 
             for (j = 0; j < 4; j++) {
                 int mb_x = 2*sb_x +   (j>>1);
@@ -654,7 +625,7 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
                 /* coding modes are only stored if the macroblock has at least one
                  * luma block coded, otherwise it must be INTER_NO_MV */
                 for (k = 0; k < 4; k++) {
-                    current_fragment = BLOCK_Y*s->fragment_width + BLOCK_X;
+                    current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X;
                     if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
                         break;
                 }
@@ -672,20 +643,31 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
 
                 s->macroblock_coding[current_macroblock] = coding_mode;
                 for (k = 0; k < 4; k++) {
-                    current_fragment =
-                        BLOCK_Y*s->fragment_width + BLOCK_X;
-                    if (s->all_fragments[current_fragment].coding_method !=
-                        MODE_COPY)
-                        s->all_fragments[current_fragment].coding_method =
-                            coding_mode;
+                    frag = s->all_fragments + BLOCK_Y*s->fragment_width[0] + BLOCK_X;
+                    if (frag->coding_method != MODE_COPY)
+                        frag->coding_method = coding_mode;
                 }
-                for (k = 0; k < 2; k++) {
-                    current_fragment = s->fragment_start[k+1] +
-                        mb_y*(s->fragment_width>>1) + mb_x;
-                    if (s->all_fragments[current_fragment].coding_method !=
-                        MODE_COPY)
-                        s->all_fragments[current_fragment].coding_method =
-                            coding_mode;
+
+#define SET_CHROMA_MODES \
+    if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
+        frag[s->fragment_start[1]].coding_method = coding_mode;\
+    if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
+        frag[s->fragment_start[2]].coding_method = coding_mode;
+
+                if (s->chroma_y_shift) {
+                    frag = s->all_fragments + mb_y*s->fragment_width[1] + mb_x;
+                    SET_CHROMA_MODES
+                } else if (s->chroma_x_shift) {
+                    frag = s->all_fragments + 2*mb_y*s->fragment_width[1] + mb_x;
+                    for (k = 0; k < 2; k++) {
+                        SET_CHROMA_MODES
+                        frag += s->fragment_width[1];
+                    }
+                } else {
+                    for (k = 0; k < 4; k++) {
+                        frag = s->all_fragments + BLOCK_Y*s->fragment_width[1] + BLOCK_X;
+                        SET_CHROMA_MODES
+                    }
                 }
             }
             }
@@ -703,21 +685,19 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
 {
     int j, k, sb_x, sb_y;
     int coding_mode;
-    int motion_x[6];
-    int motion_y[6];
+    int motion_x[4];
+    int motion_y[4];
     int last_motion_x = 0;
     int last_motion_y = 0;
     int prior_last_motion_x = 0;
     int prior_last_motion_y = 0;
     int current_macroblock;
     int current_fragment;
+    int frag;
 
     if (s->keyframe)
         return 0;
 
-    memset(motion_x, 0, 6 * sizeof(int));
-    memset(motion_y, 0, 6 * sizeof(int));
-
     /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
     coding_mode = get_bits1(gb);
 
@@ -725,6 +705,8 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
      * coded fragments */
     for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
         for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
+            if (get_bits_left(gb) <= 0)
+                return -1;
 
         for (j = 0; j < 4; j++) {
             int mb_x = 2*sb_x +   (j>>1);
@@ -765,9 +747,8 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
 
                 /* fetch 4 vectors from the bitstream, one for each
                  * Y fragment, then average for the C fragment vectors */
-                motion_x[4] = motion_y[4] = 0;
                 for (k = 0; k < 4; k++) {
-                    current_fragment = BLOCK_Y*s->fragment_width + BLOCK_X;
+                    current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X;
                     if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
                         if (coding_mode == 0) {
                             motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
@@ -782,14 +763,7 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
                         motion_x[k] = 0;
                         motion_y[k] = 0;
                     }
-                    motion_x[4] += motion_x[k];
-                    motion_y[4] += motion_y[k];
                 }
-
-                motion_x[5]=
-                motion_x[4]= RSHIFT(motion_x[4], 2);
-                motion_y[5]=
-                motion_y[4]= RSHIFT(motion_y[4], 2);
                 break;
 
             case MODE_INTER_LAST_MV:
@@ -826,24 +800,55 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
             /* assign the motion vectors to the correct fragments */
             for (k = 0; k < 4; k++) {
                 current_fragment =
-                    BLOCK_Y*s->fragment_width + BLOCK_X;
+                    BLOCK_Y*s->fragment_width[0] + BLOCK_X;
                 if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
-                    s->all_fragments[current_fragment].motion_x = motion_x[k];
-                    s->all_fragments[current_fragment].motion_y = motion_y[k];
+                    s->motion_val[0][current_fragment][0] = motion_x[k];
+                    s->motion_val[0][current_fragment][1] = motion_y[k];
                 } else {
-                    s->all_fragments[current_fragment].motion_x = motion_x[0];
-                    s->all_fragments[current_fragment].motion_y = motion_y[0];
+                    s->motion_val[0][current_fragment][0] = motion_x[0];
+                    s->motion_val[0][current_fragment][1] = motion_y[0];
                 }
             }
-            for (k = 0; k < 2; k++) {
-                current_fragment = s->fragment_start[k+1] +
-                    mb_y*(s->fragment_width>>1) + mb_x;
+
+            if (s->chroma_y_shift) {
+                if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
+                    motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] + motion_x[2] + motion_x[3], 2);
+                    motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] + motion_y[2] + motion_y[3], 2);
+                }
+                motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1);
+                motion_y[0] = (motion_y[0]>>1) | (motion_y[0]&1);
+                frag = mb_y*s->fragment_width[1] + mb_x;
+                s->motion_val[1][frag][0] = motion_x[0];
+                s->motion_val[1][frag][1] = motion_y[0];
+            } else if (s->chroma_x_shift) {
                 if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
-                    s->all_fragments[current_fragment].motion_x = motion_x[k+4];
-                    s->all_fragments[current_fragment].motion_y = motion_y[k+4];
+                    motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
+                    motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
+                    motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
+                    motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
                 } else {
-                    s->all_fragments[current_fragment].motion_x = motion_x[0];
-                    s->all_fragments[current_fragment].motion_y = motion_y[0];
+                    motion_x[1] = motion_x[0];
+                    motion_y[1] = motion_y[0];
+                }
+                motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1);
+                motion_x[1] = (motion_x[1]>>1) | (motion_x[1]&1);
+
+                frag = 2*mb_y*s->fragment_width[1] + mb_x;
+                for (k = 0; k < 2; k++) {
+                    s->motion_val[1][frag][0] = motion_x[k];
+                    s->motion_val[1][frag][1] = motion_y[k];
+                    frag += s->fragment_width[1];
+                }
+            } else {
+                for (k = 0; k < 4; k++) {
+                    frag = BLOCK_Y*s->fragment_width[1] + BLOCK_X;
+                    if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
+                        s->motion_val[1][frag][0] = motion_x[k];
+                        s->motion_val[1][frag][1] = motion_y[k];
+                    } else {
+                        s->motion_val[1][frag][0] = motion_x[0];
+                        s->motion_val[1][frag][1] = motion_y[0];
+                    }
                 }
             }
         }
@@ -861,9 +866,15 @@ static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
     for (qpi = 0; qpi < s->nqps-1 && num_blocks > 0; qpi++) {
         i = blocks_decoded = num_blocks_at_qpi = 0;
 
-        bit = get_bits1(gb);
+        bit = get_bits1(gb) ^ 1;
+        run_length = 0;
 
         do {
+            if (run_length == MAXIMUM_LONG_BIT_RUN)
+                bit = get_bits1(gb);
+            else
+                bit ^= 1;
+
             run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1;
             if (run_length == 34)
                 run_length += get_bits(gb, 12);
@@ -881,12 +892,7 @@ static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
                     j++;
                 }
             }
-
-            if (run_length == MAXIMUM_LONG_BIT_RUN)
-                bit = get_bits1(gb);
-            else
-                bit ^= 1;
-        } while (blocks_decoded < num_blocks);
+        } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
 
         num_blocks -= num_blocks_at_qpi;
     }
@@ -914,7 +920,7 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
     int i, j = 0;
     int token;
     int zero_run = 0;
-    DCTELEM coeff = 0;
+    int16_t coeff = 0;
     int bits_to_get;
     int blocks_ended;
     int coeff_i = 0;
@@ -941,11 +947,11 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
     if (blocks_ended)
         dct_tokens[j++] = blocks_ended << 2;
 
-    while (coeff_i < num_coeffs) {
+    while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
             /* decode a VLC into a token */
-            token = get_vlc2(gb, vlc_table, 5, 3);
+            token = get_vlc2(gb, vlc_table, 11, 3);
             /* use the token to get a zero run, a coefficient, and an eob run */
-            if (token <= 6) {
+            if ((unsigned) token <= 6U) {
                 eob_run = eob_run_base[token];
                 if (eob_run_get_bits[token])
                     eob_run += get_bits(gb, eob_run_get_bits[token]);
@@ -963,7 +969,7 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
                     coeff_i        += eob_run;
                     eob_run = 0;
                 }
-            } else {
+            } else if (token >= 0) {
                 bits_to_get = coeff_get_bits[token];
                 if (bits_to_get)
                     bits_to_get = get_bits(gb, bits_to_get);
@@ -987,7 +993,7 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
                 }
 
                 if (coeff_index + zero_run > 64) {
-                    av_log(s->avctx, AV_LOG_ERROR, "Invalid zero run of %d with"
+                    av_log(s->avctx, AV_LOG_DEBUG, "Invalid zero run of %d with"
                            " %d coeffs left\n", zero_run, 64-coeff_index);
                     zero_run = 64 - coeff_index;
                 }
@@ -997,6 +1003,10 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
                 for (i = coeff_index+1; i <= coeff_index+zero_run; i++)
                     s->num_coded_frags[plane][i]--;
                 coeff_i++;
+            } else {
+                av_log(s->avctx, AV_LOG_ERROR,
+                       "Invalid token %d\n", token);
+                return -1;
             }
     }
 
@@ -1046,23 +1056,29 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
     /* unpack the Y plane DC coefficients */
     residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
         0, residual_eob_run);
+    if (residual_eob_run < 0)
+        return residual_eob_run;
 
     /* reverse prediction of the Y-plane DC coefficients */
-    reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);
+    reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]);
 
     /* unpack the C plane DC coefficients */
     residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
         1, residual_eob_run);
+    if (residual_eob_run < 0)
+        return residual_eob_run;
     residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
         2, residual_eob_run);
+    if (residual_eob_run < 0)
+        return residual_eob_run;
 
     /* reverse prediction of the C-plane DC coefficients */
     if (!(s->avctx->flags & CODEC_FLAG_GRAY))
     {
         reverse_dc_prediction(s, s->fragment_start[1],
-            s->fragment_width / 2, s->fragment_height / 2);
+            s->fragment_width[1], s->fragment_height[1]);
         reverse_dc_prediction(s, s->fragment_start[2],
-            s->fragment_width / 2, s->fragment_height / 2);
+            s->fragment_width[1], s->fragment_height[1]);
     }
 
     /* fetch the AC table indexes */
@@ -1091,11 +1107,17 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
     for (i = 1; i <= 63; i++) {
             residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
                 0, residual_eob_run);
+            if (residual_eob_run < 0)
+                return residual_eob_run;
 
             residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
                 1, residual_eob_run);
+            if (residual_eob_run < 0)
+                return residual_eob_run;
             residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
                 2, residual_eob_run);
+            if (residual_eob_run < 0)
+                return residual_eob_run;
     }
 
     return 0;
@@ -1265,8 +1287,8 @@ static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int ye
     int x, y;
     int *bounding_values= s->bounding_values_array+127;
 
-    int width           = s->fragment_width  >> !!plane;
-    int height          = s->fragment_height >> !!plane;
+    int width           = s->fragment_width[!!plane];
+    int height          = s->fragment_height[!!plane];
     int fragment        = s->fragment_start        [plane] + ystart * width;
     int stride          = s->current_frame.linesize[plane];
     uint8_t *plane_data = s->current_frame.data    [plane];
@@ -1284,14 +1306,14 @@ static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int ye
             {
                 /* do not perform left edge filter for left columns frags */
                 if (x > 0) {
-                    s->dsp.vp3_h_loop_filter(
+                    s->vp3dsp.h_loop_filter(
                         plane_data + 8*x,
                         stride, bounding_values);
                 }
 
                 /* do not perform top edge filter for top row fragments */
                 if (y > 0) {
-                    s->dsp.vp3_v_loop_filter(
+                    s->vp3dsp.v_loop_filter(
                         plane_data + 8*x,
                         stride, bounding_values);
                 }
@@ -1301,7 +1323,7 @@ static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int ye
                  * in this frame (it will be filtered in next iteration) */
                 if ((x < width - 1) &&
                     (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
-                    s->dsp.vp3_h_loop_filter(
+                    s->vp3dsp.h_loop_filter(
                         plane_data + 8*x + 8,
                         stride, bounding_values);
                 }
@@ -1311,7 +1333,7 @@ static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int ye
                  * in this frame (it will be filtered in the next row) */
                 if ((y < height - 1) &&
                     (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
-                    s->dsp.vp3_v_loop_filter(
+                    s->vp3dsp.v_loop_filter(
                         plane_data + 8*x + 8*stride,
                         stride, bounding_values);
                 }
@@ -1324,11 +1346,11 @@ static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int ye
 }
 
 /**
- * Pulls DCT tokens from the 64 levels to decode and dequant the coefficients
+ * Pull DCT tokens from the 64 levels to decode and dequant the coefficients
  * for the next block in coding order
  */
 static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag,
-                              int plane, int inter, DCTELEM block[64])
+                              int plane, int inter, int16_t block[64])
 {
     int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
     uint8_t *perm = s->scantable.permutated;
@@ -1346,6 +1368,10 @@ static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag,
         case 1: // zero run
             s->dct_tokens[plane][i]++;
             i += (token >> 2) & 0x7f;
+            if (i > 63) {
+                av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
+                return i;
+            }
             block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
             i++;
             break;
@@ -1353,11 +1379,12 @@ static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag,
             block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
             s->dct_tokens[plane][i++]++;
             break;
-        default:
-            av_log(s->avctx, AV_LOG_ERROR, "internal: invalid token type\n");
+        default: // shouldn't happen
             return i;
         }
     } while (i < 64);
+    // return value is expected to be a valid level
+    i--;
 end:
     // the actual DC+prediction is in the fragment structure
     block[0] = frag->dc * s->qmat[0][inter][plane][0];
@@ -1369,30 +1396,60 @@ end:
  */
 static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
 {
-    int h, cy;
-    int offset[4];
+    int h, cy, i;
+    int offset[AV_NUM_DATA_POINTERS];
+
+    if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
+        int y_flipped = s->flipped_image ? s->avctx->height-y : y;
+
+        // At the end of the frame, report INT_MAX instead of the height of the frame.
+        // This makes the other threads' ff_thread_await_progress() calls cheaper, because
+        // they don't have to clip their values.
+        ff_thread_report_progress(&s->current_frame, y_flipped==s->avctx->height ? INT_MAX : y_flipped-1, 0);
+    }
 
     if(s->avctx->draw_horiz_band==NULL)
         return;
 
     h= y - s->last_slice_end;
+    s->last_slice_end= y;
     y -= h;
 
     if (!s->flipped_image) {
-        if (y == 0)
-            h -= s->height - s->avctx->height;  // account for non-mod16
-        y = s->height - y - h;
+        y = s->avctx->height - y - h;
     }
 
-    cy = y >> 1;
+    cy = y >> s->chroma_y_shift;
     offset[0] = s->current_frame.linesize[0]*y;
     offset[1] = s->current_frame.linesize[1]*cy;
     offset[2] = s->current_frame.linesize[2]*cy;
-    offset[3] = 0;
+    for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
+        offset[i] = 0;
 
     emms_c();
     s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h);
-    s->last_slice_end= y + h;
+}
+
+/**
+ * Wait for the reference frame of the current fragment.
+ * The progress value is in luma pixel rows.
+ */
+static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
+{
+    AVFrame *ref_frame;
+    int ref_row;
+    int border = motion_y&1;
+
+    if (fragment->coding_method == MODE_USING_GOLDEN ||
+        fragment->coding_method == MODE_GOLDEN_MV)
+        ref_frame = &s->golden_frame;
+    else
+        ref_frame = &s->last_frame;
+
+    ref_row = y + (motion_y>>1);
+    ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);
+
+    ff_thread_await_progress(ref_frame, ref_row, 0);
 }
 
 /*
@@ -1401,8 +1458,8 @@ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
  */
 static void render_slice(Vp3DecodeContext *s, int slice)
 {
-    int x, y, i, j;
-    LOCAL_ALIGNED_16(DCTELEM, block, [64]);
+    int x, y, i, j, fragment;
+    int16_t *block = s->block;
     int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
     int motion_halfpel_index;
     uint8_t *motion_source;
@@ -1416,25 +1473,23 @@ static void render_slice(Vp3DecodeContext *s, int slice)
         uint8_t *  last_plane = s->   last_frame.data    [plane] + s->data_offset[plane];
         uint8_t *golden_plane = s-> golden_frame.data    [plane] + s->data_offset[plane];
         int stride            = s->current_frame.linesize[plane];
-        int plane_width       = s->width  >> !!plane;
-        int plane_height      = s->height >> !!plane;
+        int plane_width       = s->width  >> (plane && s->chroma_x_shift);
+        int plane_height      = s->height >> (plane && s->chroma_y_shift);
+        int8_t (*motion_val)[2] = s->motion_val[!!plane];
 
-        int sb_x, sb_y        = slice << !plane;
-        int slice_height      = sb_y + (plane ? 1 : 2);
+        int sb_x, sb_y        = slice << (!plane && s->chroma_y_shift);
+        int slice_height      = sb_y + 1 + (!plane && s->chroma_y_shift);
         int slice_width       = plane ? s->c_superblock_width : s->y_superblock_width;
 
-        int fragment_width    = s->fragment_width  >> !!plane;
-        int fragment_height   = s->fragment_height >> !!plane;
+        int fragment_width    = s->fragment_width[!!plane];
+        int fragment_height   = s->fragment_height[!!plane];
         int fragment_start    = s->fragment_start[plane];
+        int do_await          = !plane && HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_FRAME);
 
         if (!s->flipped_image) stride = -stride;
         if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY))
             continue;
 
-
-        if(FFABS(stride) > 2048)
-            return; //various tables are fixed size
-
         /* for each superblock row in the slice (both of them)... */
         for (; sb_y < slice_height; sb_y++) {
 
@@ -1445,8 +1500,9 @@ static void render_slice(Vp3DecodeContext *s, int slice)
                 for (j = 0; j < 16; j++) {
                     x = 4*sb_x + hilbert_offset[j][0];
                     y = 4*sb_y + hilbert_offset[j][1];
+                    fragment = y*fragment_width + x;
 
-                    i = fragment_start + y*fragment_width + x;
+                    i = fragment_start + fragment;
 
                     // bounds check
                     if (x >= fragment_width || y >= fragment_height)
@@ -1454,10 +1510,11 @@ static void render_slice(Vp3DecodeContext *s, int slice)
 
                 first_pixel = 8*y*stride + 8*x;
 
+                if (do_await && s->all_fragments[i].coding_method != MODE_INTRA)
+                    await_reference_row(s, &s->all_fragments[i], motion_val[fragment][1], (16*y) >> s->chroma_y_shift);
+
                 /* transform if this block was coded */
                 if (s->all_fragments[i].coding_method != MODE_COPY) {
-                    int intra = s->all_fragments[i].coding_method == MODE_INTRA;
-
                     if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
                         (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
                         motion_source= golden_plane;
@@ -1472,17 +1529,11 @@ static void render_slice(Vp3DecodeContext *s, int slice)
                     if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
                         (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
                         int src_x, src_y;
-                        motion_x = s->all_fragments[i].motion_x;
-                        motion_y = s->all_fragments[i].motion_y;
-                        if(plane){
-                            motion_x= (motion_x>>1) | (motion_x&1);
-                            motion_y= (motion_y>>1) | (motion_y&1);
-                        }
+                        motion_x = motion_val[fragment][0];
+                        motion_y = motion_val[fragment][1];
 
                         src_x= (motion_x>>1) + 8*x;
                         src_y= (motion_y>>1) + 8*y;
-                        if ((motion_x == 127) || (motion_y == 127))
-                            av_log(s->avctx, AV_LOG_ERROR, " help! got invalid motion vector! (%X, %X)\n", motion_x, motion_y);
 
                         motion_halfpel_index = motion_x & 0x01;
                         motion_source += (motion_x >> 1);
@@ -1492,10 +1543,9 @@ static void render_slice(Vp3DecodeContext *s, int slice)
 
                         if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){
                             uint8_t *temp= s->edge_emu_buffer;
-                            if(stride<0) temp -= 9*stride;
-                            else temp += 9*stride;
+                            if(stride<0) temp -= 8*stride;
 
-                            ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height);
+                            s->vdsp.emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height);
                             motion_source= temp;
                         }
                     }
@@ -1514,7 +1564,7 @@ static void render_slice(Vp3DecodeContext *s, int slice)
                                 motion_source, stride, 8);
                         }else{
                             int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1
-                            s->dsp.put_no_rnd_pixels_l2[1](
+                            s->vp3dsp.put_no_rnd_pixels_l2(
                                 output_plane + first_pixel,
                                 motion_source - d,
                                 motion_source + stride + 1 + d,
@@ -1522,23 +1572,29 @@ static void render_slice(Vp3DecodeContext *s, int slice)
                         }
                     }
 
-                        s->dsp.clear_block(block);
-                        vp3_dequant(s, s->all_fragments + i, plane, !intra, block);
-
                     /* invert DCT and place (or add) in final output */
 
                     if (s->all_fragments[i].coding_method == MODE_INTRA) {
-                        if(s->avctx->idct_algo!=FF_IDCT_VP3)
-                            block[0] += 128<<3;
-                        s->dsp.idct_put(
+                        int index;
+                        index = vp3_dequant(s, s->all_fragments + i, plane, 0, block);
+                        if (index > 63)
+                            continue;
+                        s->vp3dsp.idct_put(
                             output_plane + first_pixel,
                             stride,
                             block);
                     } else {
-                        s->dsp.idct_add(
+                        int index = vp3_dequant(s, s->all_fragments + i, plane, 1, block);
+                        if (index > 63)
+                            continue;
+                        if (index > 0) {
+                        s->vp3dsp.idct_add(
                             output_plane + first_pixel,
                             stride,
                             block);
+                        } else {
+                            s->vp3dsp.idct_dc_add(output_plane + first_pixel, stride, block);
+                        }
                     }
                 } else {
 
@@ -1553,7 +1609,8 @@ static void render_slice(Vp3DecodeContext *s, int slice)
             }
 
             // Filter up to the last row in the superblock row
-            apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1));
+            if (!s->skip_loop_filter)
+                apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1));
         }
     }
 
@@ -1565,18 +1622,48 @@ static void render_slice(Vp3DecodeContext *s, int slice)
       *     dispatch (slice - 1);
       */
 
-    vp3_draw_horiz_band(s, 64*slice + 64-16);
+    vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) -16, s->height-16));
+}
+
+/// Allocate tables for per-frame data in Vp3DecodeContext
+static av_cold int allocate_tables(AVCodecContext *avctx)
+{
+    Vp3DecodeContext *s = avctx->priv_data;
+    int y_fragment_count, c_fragment_count;
+
+    y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
+    c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
+
+    s->superblock_coding = av_malloc(s->superblock_count);
+    s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment));
+    s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int));
+    s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base));
+    s->motion_val[0] = av_malloc(y_fragment_count * sizeof(*s->motion_val[0]));
+    s->motion_val[1] = av_malloc(c_fragment_count * sizeof(*s->motion_val[1]));
+
+    /* work out the block mapping tables */
+    s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
+    s->macroblock_coding = av_malloc(s->macroblock_count + 1);
+
+    if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base ||
+        !s->coded_fragment_list[0] || !s->superblock_fragments || !s->macroblock_coding ||
+        !s->motion_val[0] || !s->motion_val[1]) {
+        vp3_decode_end(avctx);
+        return -1;
+    }
+
+    init_block_mapping(s);
+
+    return 0;
 }
 
-/*
- * This is the ffmpeg/libavcodec API init function.
- */
 static av_cold int vp3_decode_init(AVCodecContext *avctx)
 {
     Vp3DecodeContext *s = avctx->priv_data;
     int i, inter, plane;
     int c_width;
     int c_height;
+    int y_fragment_count, c_fragment_count;
 
     if (avctx->codec_tag == MKTAG('V','P','3','0'))
         s->version = 0;
@@ -1586,12 +1673,14 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx)
     s->avctx = avctx;
     s->width = FFALIGN(avctx->width, 16);
     s->height = FFALIGN(avctx->height, 16);
-    avctx->pix_fmt = PIX_FMT_YUV420P;
+    if (avctx->pix_fmt == AV_PIX_FMT_NONE)
+        avctx->pix_fmt = AV_PIX_FMT_YUV420P;
     avctx->chroma_sample_location = AVCHROMA_LOC_CENTER;
-    if(avctx->idct_algo==FF_IDCT_AUTO)
-        avctx->idct_algo=FF_IDCT_VP3;
-    dsputil_init(&s->dsp, avctx);
+    ff_dsputil_init(&s->dsp, avctx);
+    ff_videodsp_init(&s->vdsp, 8);
+    ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
 
+    ff_init_scantable_permutation(s->dsp.idct_permutation, s->vp3dsp.idct_perm);
     ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
 
     /* initialize to an impossible value which will force a recalculation
@@ -1599,13 +1688,16 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx)
     for (i = 0; i < 3; i++)
         s->qps[i] = -1;
 
+    av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift,
+                                     &s->chroma_y_shift);
+
     s->y_superblock_width = (s->width + 31) / 32;
     s->y_superblock_height = (s->height + 31) / 32;
     s->y_superblock_count = s->y_superblock_width * s->y_superblock_height;
 
     /* work out the dimensions for the C planes */
-    c_width = s->width / 2;
-    c_height = s->height / 2;
+    c_width = s->width >> s->chroma_x_shift;
+    c_height = s->height >> s->chroma_y_shift;
     s->c_superblock_width = (c_width + 31) / 32;
     s->c_superblock_height = (c_height + 31) / 32;
     s->c_superblock_count = s->c_superblock_width * s->c_superblock_height;
@@ -1613,28 +1705,22 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx)
     s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2);
     s->u_superblock_start = s->y_superblock_count;
     s->v_superblock_start = s->u_superblock_start + s->c_superblock_count;
-    s->superblock_coding = av_malloc(s->superblock_count);
 
     s->macroblock_width = (s->width + 15) / 16;
     s->macroblock_height = (s->height + 15) / 16;
     s->macroblock_count = s->macroblock_width * s->macroblock_height;
 
-    s->fragment_width = s->width / FRAGMENT_PIXELS;
-    s->fragment_height = s->height / FRAGMENT_PIXELS;
+    s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
+    s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
+    s->fragment_width[1]  = s->fragment_width[0]  >> s->chroma_x_shift;
+    s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
 
     /* fragment count covers all 8x8 blocks for all 3 planes */
-    s->fragment_count = s->fragment_width * s->fragment_height * 3 / 2;
-    s->fragment_start[1] = s->fragment_width * s->fragment_height;
-    s->fragment_start[2] = s->fragment_width * s->fragment_height * 5 / 4;
-
-    s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment));
-    s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int));
-    s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base));
-    if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base ||
-        !s->coded_fragment_list[0]) {
-        vp3_decode_end(avctx);
-        return -1;
-    }
+    y_fragment_count     = s->fragment_width[0] * s->fragment_height[0];
+    c_fragment_count     = s->fragment_width[1] * s->fragment_height[1];
+    s->fragment_count    = y_fragment_count + 2*c_fragment_count;
+    s->fragment_start[1] = y_fragment_count;
+    s->fragment_start[2] = y_fragment_count + c_fragment_count;
 
     if (!s->theora_tables)
     {
@@ -1660,61 +1746,61 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx)
         for (i = 0; i < 16; i++) {
 
             /* DC histograms */
-            init_vlc(&s->dc_vlc[i], 5, 32,
+            init_vlc(&s->dc_vlc[i], 11, 32,
                 &dc_bias[i][0][1], 4, 2,
                 &dc_bias[i][0][0], 4, 2, 0);
 
             /* group 1 AC histograms */
-            init_vlc(&s->ac_vlc_1[i], 5, 32,
+            init_vlc(&s->ac_vlc_1[i], 11, 32,
                 &ac_bias_0[i][0][1], 4, 2,
                 &ac_bias_0[i][0][0], 4, 2, 0);
 
             /* group 2 AC histograms */
-            init_vlc(&s->ac_vlc_2[i], 5, 32,
+            init_vlc(&s->ac_vlc_2[i], 11, 32,
                 &ac_bias_1[i][0][1], 4, 2,
                 &ac_bias_1[i][0][0], 4, 2, 0);
 
             /* group 3 AC histograms */
-            init_vlc(&s->ac_vlc_3[i], 5, 32,
+            init_vlc(&s->ac_vlc_3[i], 11, 32,
                 &ac_bias_2[i][0][1], 4, 2,
                 &ac_bias_2[i][0][0], 4, 2, 0);
 
             /* group 4 AC histograms */
-            init_vlc(&s->ac_vlc_4[i], 5, 32,
+            init_vlc(&s->ac_vlc_4[i], 11, 32,
                 &ac_bias_3[i][0][1], 4, 2,
                 &ac_bias_3[i][0][0], 4, 2, 0);
         }
     } else {
-        for (i = 0; i < 16; i++) {
 
+        for (i = 0; i < 16; i++) {
             /* DC histograms */
-            if (init_vlc(&s->dc_vlc[i], 5, 32,
-                &s->huffman_table[i][0][1], 4, 2,
-                &s->huffman_table[i][0][0], 4, 2, 0) < 0)
+            if (init_vlc(&s->dc_vlc[i], 11, 32,
+                &s->huffman_table[i][0][1], 8, 4,
+                &s->huffman_table[i][0][0], 8, 4, 0) < 0)
                 goto vlc_fail;
 
             /* group 1 AC histograms */
-            if (init_vlc(&s->ac_vlc_1[i], 5, 32,
-                &s->huffman_table[i+16][0][1], 4, 2,
-                &s->huffman_table[i+16][0][0], 4, 2, 0) < 0)
+            if (init_vlc(&s->ac_vlc_1[i], 11, 32,
+                &s->huffman_table[i+16][0][1], 8, 4,
+                &s->huffman_table[i+16][0][0], 8, 4, 0) < 0)
                 goto vlc_fail;
 
             /* group 2 AC histograms */
-            if (init_vlc(&s->ac_vlc_2[i], 5, 32,
-                &s->huffman_table[i+16*2][0][1], 4, 2,
-                &s->huffman_table[i+16*2][0][0], 4, 2, 0) < 0)
+            if (init_vlc(&s->ac_vlc_2[i], 11, 32,
+                &s->huffman_table[i+16*2][0][1], 8, 4,
+                &s->huffman_table[i+16*2][0][0], 8, 4, 0) < 0)
                 goto vlc_fail;
 
             /* group 3 AC histograms */
-            if (init_vlc(&s->ac_vlc_3[i], 5, 32,
-                &s->huffman_table[i+16*3][0][1], 4, 2,
-                &s->huffman_table[i+16*3][0][0], 4, 2, 0) < 0)
+            if (init_vlc(&s->ac_vlc_3[i], 11, 32,
+                &s->huffman_table[i+16*3][0][1], 8, 4,
+                &s->huffman_table[i+16*3][0][0], 8, 4, 0) < 0)
                 goto vlc_fail;
 
             /* group 4 AC histograms */
-            if (init_vlc(&s->ac_vlc_4[i], 5, 32,
-                &s->huffman_table[i+16*4][0][1], 4, 2,
-                &s->huffman_table[i+16*4][0][0], 4, 2, 0) < 0)
+            if (init_vlc(&s->ac_vlc_4[i], 11, 32,
+                &s->huffman_table[i+16*4][0][1], 8, 4,
+                &s->huffman_table[i+16*4][0][0], 8, 4, 0) < 0)
                 goto vlc_fail;
         }
     }
@@ -1735,40 +1821,103 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx)
         &motion_vector_vlc_table[0][1], 2, 1,
         &motion_vector_vlc_table[0][0], 2, 1, 0);
 
-    /* work out the block mapping tables */
-    s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
-    s->macroblock_coding = av_malloc(s->macroblock_count + 1);
-    if (!s->superblock_fragments || !s->macroblock_coding) {
-        vp3_decode_end(avctx);
-        return -1;
-    }
-    init_block_mapping(s);
-
     for (i = 0; i < 3; i++) {
         s->current_frame.data[i] = NULL;
         s->last_frame.data[i] = NULL;
         s->golden_frame.data[i] = NULL;
     }
 
-    return 0;
+    return allocate_tables(avctx);
 
 vlc_fail:
     av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n");
     return -1;
 }
 
-/*
- * This is the ffmpeg/libavcodec API frame decode function.
- */
+/// Release and shuffle frames after decode finishes
+static void update_frames(AVCodecContext *avctx)
+{
+    Vp3DecodeContext *s = avctx->priv_data;
+
+    /* release the last frame, if it is allocated and if it is not the
+     * golden frame */
+    if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY)
+        ff_thread_release_buffer(avctx, &s->last_frame);
+
+    /* shuffle frames (last = current) */
+    s->last_frame= s->current_frame;
+
+    if (s->keyframe) {
+        if (s->golden_frame.data[0])
+            ff_thread_release_buffer(avctx, &s->golden_frame);
+        s->golden_frame = s->current_frame;
+        s->last_frame.type = FF_BUFFER_TYPE_COPY;
+    }
+
+    s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */
+}
+
+static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
+{
+    Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data;
+    int qps_changed = 0, i, err;
+
+#define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field)
+
+    if (!s1->current_frame.data[0]
+        ||s->width != s1->width
+        ||s->height!= s1->height) {
+        if (s != s1)
+            copy_fields(s, s1, golden_frame, current_frame);
+        return -1;
+    }
+
+    if (s != s1) {
+        // init tables if the first frame hasn't been decoded
+        if (!s->current_frame.data[0]) {
+            int y_fragment_count, c_fragment_count;
+            s->avctx = dst;
+            err = allocate_tables(dst);
+            if (err)
+                return err;
+            y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
+            c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
+            memcpy(s->motion_val[0], s1->motion_val[0], y_fragment_count * sizeof(*s->motion_val[0]));
+            memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1]));
+        }
+
+        // copy previous frame data
+        copy_fields(s, s1, golden_frame, dsp);
+
+        // copy qscale data if necessary
+        for (i = 0; i < 3; i++) {
+            if (s->qps[i] != s1->qps[1]) {
+                qps_changed = 1;
+                memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
+            }
+        }
+
+        if (s->qps[0] != s1->qps[0])
+            memcpy(&s->bounding_values_array, &s1->bounding_values_array, sizeof(s->bounding_values_array));
+
+        if (qps_changed)
+            copy_fields(s, s1, qps, superblock_count);
+#undef copy_fields
+    }
+
+    update_frames(dst);
+
+    return 0;
+}
+
 static int vp3_decode_frame(AVCodecContext *avctx,
-                            void *data, int *data_size,
+                            void *data, int *got_frame,
                             AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     Vp3DecodeContext *s = avctx->priv_data;
     GetBitContext gb;
-    static int counter = 0;
     int i;
 
     init_get_bits(&gb, buf, buf_size * 8);
@@ -1794,8 +1943,10 @@ static int vp3_decode_frame(AVCodecContext *avctx,
 
     if (s->avctx->debug & FF_DEBUG_PICT_INFO)
         av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
-            s->keyframe?"key":"", counter, s->qps[0]);
-    counter++;
+            s->keyframe?"key":"", avctx->frame_number+1, s->qps[0]);
+
+    s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
+        avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL : AVDISCARD_NONKEY);
 
     if (s->qps[0] != s->last_qps[0])
         init_loop_filter(s);
@@ -1809,6 +1960,16 @@ static int vp3_decode_frame(AVCodecContext *avctx,
     if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
         return buf_size;
 
+    s->current_frame.reference = 3;
+    s->current_frame.pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
+    if (ff_thread_get_buffer(avctx, &s->current_frame) < 0) {
+        av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        goto error;
+    }
+
+    if (!s->edge_emu_buffer)
+        s->edge_emu_buffer = av_malloc(9*FFABS(s->current_frame.linesize[0]));
+
     if (s->keyframe) {
         if (!s->theora)
         {
@@ -1817,7 +1978,7 @@ static int vp3_decode_frame(AVCodecContext *avctx,
             if (s->version)
             {
                 s->version = get_bits(&gb, 5);
-                if (counter == 1)
+                if (avctx->frame_number == 0)
                     av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
             }
         }
@@ -1827,70 +1988,52 @@ static int vp3_decode_frame(AVCodecContext *avctx,
                     av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
             skip_bits(&gb, 2); /* reserved? */
         }
-
-        if (s->last_frame.data[0] == s->golden_frame.data[0]) {
-            if (s->golden_frame.data[0])
-                avctx->release_buffer(avctx, &s->golden_frame);
-            s->last_frame= s->golden_frame; /* ensure that we catch any access to this released frame */
-        } else {
-            if (s->golden_frame.data[0])
-                avctx->release_buffer(avctx, &s->golden_frame);
-            if (s->last_frame.data[0])
-                avctx->release_buffer(avctx, &s->last_frame);
-        }
-
-        s->golden_frame.reference = 3;
-        if(avctx->get_buffer(avctx, &s->golden_frame) < 0) {
-            av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
-            return -1;
-        }
-
-        /* golden frame is also the current frame */
-        s->current_frame= s->golden_frame;
     } else {
-        /* allocate a new current frame */
-        s->current_frame.reference = 3;
         if (!s->golden_frame.data[0]) {
-            av_log(s->avctx, AV_LOG_ERROR, "vp3: first frame not a keyframe\n");
-            return -1;
-        }
-        if(avctx->get_buffer(avctx, &s->current_frame) < 0) {
-            av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
-            return -1;
+            av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n");
+
+            s->golden_frame.reference = 3;
+            s->golden_frame.pict_type = AV_PICTURE_TYPE_I;
+            if (ff_thread_get_buffer(avctx, &s->golden_frame) < 0) {
+                av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+                goto error;
+            }
+            s->last_frame = s->golden_frame;
+            s->last_frame.type = FF_BUFFER_TYPE_COPY;
+            ff_thread_report_progress(&s->last_frame, INT_MAX, 0);
         }
     }
 
-    s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame
-    s->current_frame.qstride= 0;
-
-    init_frame(s, &gb);
+    memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
+    ff_thread_finish_setup(avctx);
 
     if (unpack_superblocks(s, &gb)){
         av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
-        return -1;
+        goto error;
     }
     if (unpack_modes(s, &gb)){
         av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
-        return -1;
+        goto error;
     }
     if (unpack_vectors(s, &gb)){
         av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
-        return -1;
+        goto error;
     }
     if (unpack_block_qpis(s, &gb)){
         av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
-        return -1;
+        goto error;
     }
     if (unpack_dct_coeffs(s, &gb)){
         av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
-        return -1;
+        goto error;
     }
 
     for (i = 0; i < 3; i++) {
+        int height = s->height >> (i && s->chroma_y_shift);
         if (s->flipped_image)
             s->data_offset[i] = 0;
         else
-            s->data_offset[i] = ((s->height>>!!i)-1) * s->current_frame.linesize[i];
+            s->data_offset[i] = (height-1) * s->current_frame.linesize[i];
     }
 
     s->last_slice_end = 0;
@@ -1899,64 +2042,26 @@ static int vp3_decode_frame(AVCodecContext *avctx,
 
     // filter the last row
     for (i = 0; i < 3; i++) {
-        int row = (s->height >> (3+!!i)) - 1;
+        int row = (s->height >> (3+(i && s->chroma_y_shift))) - 1;
         apply_loop_filter(s, i, row, row+1);
     }
-    vp3_draw_horiz_band(s, s->height);
+    vp3_draw_horiz_band(s, s->avctx->height);
 
-    *data_size=sizeof(AVFrame);
+    *got_frame = 1;
     *(AVFrame*)data= s->current_frame;
 
-    /* release the last frame, if it is allocated and if it is not the
-     * golden frame */
-    if ((s->last_frame.data[0]) &&
-        (s->last_frame.data[0] != s->golden_frame.data[0]))
-        avctx->release_buffer(avctx, &s->last_frame);
-
-    /* shuffle frames (last = current) */
-    s->last_frame= s->current_frame;
-    s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */
+    if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME))
+        update_frames(avctx);
 
     return buf_size;
-}
 
-/*
- * This is the ffmpeg/libavcodec API module cleanup function.
- */
-static av_cold int vp3_decode_end(AVCodecContext *avctx)
-{
-    Vp3DecodeContext *s = avctx->priv_data;
-    int i;
-
-    av_free(s->superblock_coding);
-    av_free(s->all_fragments);
-    av_free(s->coded_fragment_list[0]);
-    av_free(s->dct_tokens_base);
-    av_free(s->superblock_fragments);
-    av_free(s->macroblock_coding);
-
-    for (i = 0; i < 16; i++) {
-        free_vlc(&s->dc_vlc[i]);
-        free_vlc(&s->ac_vlc_1[i]);
-        free_vlc(&s->ac_vlc_2[i]);
-        free_vlc(&s->ac_vlc_3[i]);
-        free_vlc(&s->ac_vlc_4[i]);
-    }
-
-    free_vlc(&s->superblock_run_length_vlc);
-    free_vlc(&s->fragment_run_length_vlc);
-    free_vlc(&s->mode_code_vlc);
-    free_vlc(&s->motion_vector_vlc);
+error:
+    ff_thread_report_progress(&s->current_frame, INT_MAX, 0);
 
-    /* release all frames */
-    if (s->golden_frame.data[0] && s->golden_frame.data[0] != s->last_frame.data[0])
-        avctx->release_buffer(avctx, &s->golden_frame);
-    if (s->last_frame.data[0])
-        avctx->release_buffer(avctx, &s->last_frame);
-    /* no need to release the current_frame since it will always be pointing
-     * to the same frame as either the golden or last frame */
+    if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME))
+        avctx->release_buffer(avctx, &s->current_frame);
 
-    return 0;
+    return -1;
 }
 
 static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
@@ -1970,7 +2075,8 @@ static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
             return -1;
         }
         token = get_bits(gb, 5);
-        //av_log(avctx, AV_LOG_DEBUG, "hti %d hbits %x token %d entry : %d size %d\n", s->hti, s->hbits, token, s->entries, s->huff_code_size);
+        av_dlog(avctx, "hti %d hbits %x token %d entry : %d size %d\n",
+                s->hti, s->hbits, token, s->entries, s->huff_code_size);
         s->huffman_table[s->hti][token][0] = s->hbits;
         s->huffman_table[s->hti][token][1] = s->huff_code_size;
         s->entries++;
@@ -1993,11 +2099,34 @@ static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
     return 0;
 }
 
+static int vp3_init_thread_copy(AVCodecContext *avctx)
+{
+    Vp3DecodeContext *s = avctx->priv_data;
+
+    s->superblock_coding      = NULL;
+    s->all_fragments          = NULL;
+    s->coded_fragment_list[0] = NULL;
+    s->dct_tokens_base        = NULL;
+    s->superblock_fragments   = NULL;
+    s->macroblock_coding      = NULL;
+    s->motion_val[0]          = NULL;
+    s->motion_val[1]          = NULL;
+    s->edge_emu_buffer        = NULL;
+
+    return 0;
+}
+
 #if CONFIG_THEORA_DECODER
+static const enum AVPixelFormat theora_pix_fmts[4] = {
+    AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P
+};
+
 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
 {
     Vp3DecodeContext *s = avctx->priv_data;
     int visible_width, visible_height, colorspace;
+    int offset_x = 0, offset_y = 0;
+    AVRational fps, aspect;
 
     s->theora = get_bits_long(gb, 24);
     av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
@@ -2013,7 +2142,7 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
     visible_width  = s->width  = get_bits(gb, 16) << 4;
     visible_height = s->height = get_bits(gb, 16) << 4;
 
-    if(avcodec_check_dimensions(avctx, s->width, s->height)){
+    if(av_image_check_size(s->width, s->height, 0, avctx)){
         av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height);
         s->width= s->height= 0;
         return -1;
@@ -2023,14 +2152,24 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
         visible_width  = get_bits_long(gb, 24);
         visible_height = get_bits_long(gb, 24);
 
-        skip_bits(gb, 8); /* offset x */
-        skip_bits(gb, 8); /* offset y */
+        offset_x = get_bits(gb, 8); /* offset x */
+        offset_y = get_bits(gb, 8); /* offset y, from bottom */
+    }
+
+    fps.num = get_bits_long(gb, 32);
+    fps.den = get_bits_long(gb, 32);
+    if (fps.num && fps.den) {
+        av_reduce(&avctx->time_base.num, &avctx->time_base.den,
+                  fps.den, fps.num, 1<<30);
     }
 
-    skip_bits(gb, 32); /* fps numerator */
-    skip_bits(gb, 32); /* fps denumerator */
-    skip_bits(gb, 24); /* aspect numerator */
-    skip_bits(gb, 24); /* aspect denumerator */
+    aspect.num = get_bits_long(gb, 24);
+    aspect.den = get_bits_long(gb, 24);
+    if (aspect.num && aspect.den) {
+        av_reduce(&avctx->sample_aspect_ratio.num,
+                  &avctx->sample_aspect_ratio.den,
+                  aspect.num, aspect.den, 1<<30);
+    }
 
     if (s->theora < 0x030200)
         skip_bits(gb, 5); /* keyframe frequency force */
@@ -2042,14 +2181,15 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
     if (s->theora >= 0x030200)
     {
         skip_bits(gb, 5); /* keyframe frequency force */
-        skip_bits(gb, 2); /* pixel format: 420,res,422,444 */
+        avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
         skip_bits(gb, 3); /* reserved */
     }
 
 //    align_get_bits(gb);
 
     if (   visible_width  <= s->width  && visible_width  > s->width-16
-        && visible_height <= s->height && visible_height > s->height-16)
+        && visible_height <= s->height && visible_height > s->height-16
+        && !offset_x && (offset_y == s->height - visible_height))
         avcodec_set_dimensions(avctx, visible_width, visible_height);
     else
         avcodec_set_dimensions(avctx, s->width, s->height);
@@ -2075,13 +2215,9 @@ static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
     if (s->theora >= 0x030200) {
         n = get_bits(gb, 3);
         /* loop filter limit values table */
-        for (i = 0; i < 64; i++) {
-            s->filter_limit_values[i] = get_bits(gb, n);
-            if (s->filter_limit_values[i] > 127) {
-                av_log(avctx, AV_LOG_ERROR, "filter limit value too large (%i > 127), clamping\n", s->filter_limit_values[i]);
-                s->filter_limit_values[i] = 127;
-            }
-        }
+        if (n)
+            for (i = 0; i < 64; i++)
+                s->filter_limit_values[i] = get_bits(gb, n);
     }
 
     if (s->theora >= 0x030200)
@@ -2195,13 +2331,15 @@ static av_cold int theora_decode_init(AVCodecContext *avctx)
         return -1;
     }
 
-    if (ff_split_xiph_headers(avctx->extradata, avctx->extradata_size,
+    if (avpriv_split_xiph_headers(avctx->extradata, avctx->extradata_size,
                               42, header_start, header_len) < 0) {
         av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
         return -1;
     }
 
   for(i=0;i<3;i++) {
+    if (header_len[i] <= 0)
+        continue;
     init_get_bits(&gb, header_start[i], header_len[i] * 8);
 
     ptype = get_bits(&gb, 8);
@@ -2241,31 +2379,35 @@ static av_cold int theora_decode_init(AVCodecContext *avctx)
     return vp3_decode_init(avctx);
 }
 
-AVCodec theora_decoder = {
-    "theora",
-    CODEC_TYPE_VIDEO,
-    CODEC_ID_THEORA,
-    sizeof(Vp3DecodeContext),
-    theora_decode_init,
-    NULL,
-    vp3_decode_end,
-    vp3_decode_frame,
-    CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
-    NULL,
-    .long_name = NULL_IF_CONFIG_SMALL("Theora"),
+AVCodec ff_theora_decoder = {
+    .name                  = "theora",
+    .type                  = AVMEDIA_TYPE_VIDEO,
+    .id                    = AV_CODEC_ID_THEORA,
+    .priv_data_size        = sizeof(Vp3DecodeContext),
+    .init                  = theora_decode_init,
+    .close                 = vp3_decode_end,
+    .decode                = vp3_decode_frame,
+    .capabilities          = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
+                             CODEC_CAP_FRAME_THREADS,
+    .flush                 = vp3_decode_flush,
+    .long_name             = NULL_IF_CONFIG_SMALL("Theora"),
+    .init_thread_copy      = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
+    .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context)
 };
 #endif
 
-AVCodec vp3_decoder = {
-    "vp3",
-    CODEC_TYPE_VIDEO,
-    CODEC_ID_VP3,
-    sizeof(Vp3DecodeContext),
-    vp3_decode_init,
-    NULL,
-    vp3_decode_end,
-    vp3_decode_frame,
-    CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
-    NULL,
-    .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"),
+AVCodec ff_vp3_decoder = {
+    .name                  = "vp3",
+    .type                  = AVMEDIA_TYPE_VIDEO,
+    .id                    = AV_CODEC_ID_VP3,
+    .priv_data_size        = sizeof(Vp3DecodeContext),
+    .init                  = vp3_decode_init,
+    .close                 = vp3_decode_end,
+    .decode                = vp3_decode_frame,
+    .capabilities          = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
+                             CODEC_CAP_FRAME_THREADS,
+    .flush                 = vp3_decode_flush,
+    .long_name             = NULL_IF_CONFIG_SMALL("On2 VP3"),
+    .init_thread_copy      = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
+    .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
 };