]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/utvideodec.c
lavc: Add per-thread surfaces in get_hw_frame_parameters()
[ffmpeg] / libavcodec / utvideodec.c
index 41d534481569924e4c305f95486221486caa7c50..26b991ca7b56d626be2908da19d48adb0264d142 100644 (file)
 #include <stdlib.h>
 
 #include "libavutil/intreadwrite.h"
+
 #include "avcodec.h"
+#include "bitstream.h"
+#include "bswapdsp.h"
 #include "bytestream.h"
-#include "get_bits.h"
-#include "dsputil.h"
+#include "internal.h"
 #include "thread.h"
 #include "utvideo.h"
+#include "vlc.h"
+
+static int build_huff10(const uint8_t *src, VLC *vlc, int *fsym)
+{
+    int i;
+    HuffEntry he[1024];
+    int last;
+    uint32_t codes[1024];
+    uint8_t bits[1024];
+    uint16_t syms[1024];
+    uint32_t code;
+
+    *fsym = -1;
+    for (i = 0; i < 1024; i++) {
+        he[i].sym = i;
+        he[i].len = *src++;
+    }
+    qsort(he, 1024, sizeof(*he), ff_ut10_huff_cmp_len);
+
+    if (!he[0].len) {
+        *fsym = he[0].sym;
+        return 0;
+    }
+
+    last = 1023;
+    while (he[last].len == 255 && last)
+        last--;
+
+    if (he[last].len > 32) {
+        return -1;
+    }
+
+    code = 1;
+    for (i = last; i >= 0; i--) {
+        codes[i] = code >> (32 - he[i].len);
+        bits[i]  = he[i].len;
+        syms[i]  = he[i].sym;
+        code += 0x80000000u >> (he[i].len - 1);
+    }
+
+    return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 11), last + 1,
+                              bits,  sizeof(*bits),  sizeof(*bits),
+                              codes, sizeof(*codes), sizeof(*codes),
+                              syms,  sizeof(*syms),  sizeof(*syms), 0);
+}
 
 static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
 {
@@ -77,17 +124,132 @@ static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
                               syms,  sizeof(*syms),  sizeof(*syms), 0);
 }
 
+static int decode_plane10(UtvideoContext *c, int plane_no,
+                          uint16_t *dst, int step, int stride,
+                          int width, int height,
+                          const uint8_t *src, const uint8_t *huff,
+                          int use_pred)
+{
+    BitstreamContext bc;
+    int i, j, slice, pix, ret;
+    int sstart, send;
+    VLC vlc;
+    int prev, fsym;
+
+    if ((ret = build_huff10(huff, &vlc, &fsym)) < 0) {
+        av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
+        return ret;
+    }
+    if (fsym >= 0) { // build_huff reported a symbol to fill slices with
+        send = 0;
+        for (slice = 0; slice < c->slices; slice++) {
+            uint16_t *dest;
+
+            sstart = send;
+            send   = (height * (slice + 1) / c->slices);
+            dest   = dst + sstart * stride;
+
+            prev = 0x200;
+            for (j = sstart; j < send; j++) {
+                for (i = 0; i < width * step; i += step) {
+                    pix = fsym;
+                    if (use_pred) {
+                        prev += pix;
+                        prev &= 0x3FF;
+                        pix   = prev;
+                    }
+                    dest[i] = pix;
+                }
+                dest += stride;
+            }
+        }
+        return 0;
+    }
+
+    send = 0;
+    for (slice = 0; slice < c->slices; slice++) {
+        uint16_t *dest;
+        int slice_data_start, slice_data_end, slice_size;
+
+        sstart = send;
+        send   = (height * (slice + 1) / c->slices);
+        dest   = dst + sstart * stride;
+
+        // slice offset and size validation was done earlier
+        slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
+        slice_data_end   = AV_RL32(src + slice * 4);
+        slice_size       = slice_data_end - slice_data_start;
+
+        if (!slice_size) {
+            av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
+                   "yet a slice has a length of zero.\n");
+            goto fail;
+        }
+
+        memcpy(c->slice_bits, src + slice_data_start + c->slices * 4,
+               slice_size);
+        memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+        c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
+                          (uint32_t *) c->slice_bits,
+                          (slice_data_end - slice_data_start + 3) >> 2);
+        bitstream_init8(&bc, c->slice_bits, slice_size);
+
+        prev = 0x200;
+        for (j = sstart; j < send; j++) {
+            for (i = 0; i < width * step; i += step) {
+                if (bitstream_bits_left(&bc) <= 0) {
+                    av_log(c->avctx, AV_LOG_ERROR,
+                           "Slice decoding ran out of bits\n");
+                    goto fail;
+                }
+                pix = bitstream_read_vlc(&bc, vlc.table, vlc.bits, 3);
+                if (pix < 0) {
+                    av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
+                    goto fail;
+                }
+                if (use_pred) {
+                    prev += pix;
+                    prev &= 0x3FF;
+                    pix   = prev;
+                }
+                dest[i] = pix;
+            }
+            dest += stride;
+        }
+        if (bitstream_bits_left(&bc) > 32)
+            av_log(c->avctx, AV_LOG_WARNING,
+                   "%d bits left after decoding slice\n", bitstream_bits_left(&bc));
+    }
+
+    ff_free_vlc(&vlc);
+
+    return 0;
+fail:
+    ff_free_vlc(&vlc);
+    return AVERROR_INVALIDDATA;
+}
+
+static int compute_cmask(int plane_no, int interlaced, int pix_fmt)
+{
+    const int is_luma = (pix_fmt == AV_PIX_FMT_YUV420P) && !plane_no;
+
+    if (interlaced)
+        return ~(1 + 2 * is_luma);
+
+    return ~is_luma;
+}
+
 static int decode_plane(UtvideoContext *c, int plane_no,
-                        uint8_t *dst, int step, int stride,
+                        uint8_t *dst, int step, ptrdiff_t stride,
                         int width, int height,
                         const uint8_t *src, int use_pred)
 {
     int i, j, slice, pix;
     int sstart, send;
     VLC vlc;
-    GetBitContext gb;
+    BitstreamContext bc;
     int prev, fsym;
-    const int cmask = ~(!plane_no && c->avctx->pix_fmt == AV_PIX_FMT_YUV420P);
+    const int cmask = compute_cmask(plane_no, c->interlaced, c->avctx->pix_fmt);
 
     if (build_huff(src, &vlc, &fsym)) {
         av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
@@ -142,20 +304,21 @@ static int decode_plane(UtvideoContext *c, int plane_no,
 
         memcpy(c->slice_bits, src + slice_data_start + c->slices * 4,
                slice_size);
-        memset(c->slice_bits + slice_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
-        c->dsp.bswap_buf((uint32_t *) c->slice_bits, (uint32_t *) c->slice_bits,
-                         (slice_data_end - slice_data_start + 3) >> 2);
-        init_get_bits(&gb, c->slice_bits, slice_size * 8);
+        memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+        c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
+                          (uint32_t *) c->slice_bits,
+                          (slice_data_end - slice_data_start + 3) >> 2);
+        bitstream_init8(&bc, c->slice_bits, slice_size);
 
         prev = 0x80;
         for (j = sstart; j < send; j++) {
             for (i = 0; i < width * step; i += step) {
-                if (get_bits_left(&gb) <= 0) {
+                if (bitstream_bits_left(&bc) <= 0) {
                     av_log(c->avctx, AV_LOG_ERROR,
                            "Slice decoding ran out of bits\n");
                     goto fail;
                 }
-                pix = get_vlc2(&gb, vlc.table, vlc.bits, 4);
+                pix = bitstream_read_vlc(&bc, vlc.table, vlc.bits, 4);
                 if (pix < 0) {
                     av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
                     goto fail;
@@ -168,9 +331,9 @@ static int decode_plane(UtvideoContext *c, int plane_no,
             }
             dest += stride;
         }
-        if (get_bits_left(&gb) > 32)
+        if (bitstream_bits_left(&bc) > 32)
             av_log(c->avctx, AV_LOG_WARNING,
-                   "%d bits left after decoding slice\n", get_bits_left(&gb));
+                   "%d bits left after decoding slice\n", bitstream_bits_left(&bc));
     }
 
     ff_free_vlc(&vlc);
@@ -181,8 +344,8 @@ fail:
     return AVERROR_INVALIDDATA;
 }
 
-static void restore_rgb_planes(uint8_t *src, int step, int stride, int width,
-                               int height)
+static void restore_rgb_planes(uint8_t *src, int step, ptrdiff_t stride,
+                               int width, int height)
 {
     int i, j;
     uint8_t r, g, b;
@@ -199,8 +362,132 @@ static void restore_rgb_planes(uint8_t *src, int step, int stride, int width,
     }
 }
 
-static void restore_median(uint8_t *src, int step, int stride,
-                           int width, int height, int slices, int rmode)
+static void restore_rgb_planes10(AVFrame *frame, int width, int height)
+{
+    uint16_t *src_r = (uint16_t *)frame->data[2];
+    uint16_t *src_g = (uint16_t *)frame->data[0];
+    uint16_t *src_b = (uint16_t *)frame->data[1];
+    int r, g, b;
+    int i, j;
+
+    for (j = 0; j < height; j++) {
+        for (i = 0; i < width; i++) {
+            r = src_r[i];
+            g = src_g[i];
+            b = src_b[i];
+            src_r[i] = (r + g - 0x200) & 0x3FF;
+            src_b[i] = (b + g - 0x200) & 0x3FF;
+        }
+        src_r += frame->linesize[2] / 2;
+        src_g += frame->linesize[0] / 2;
+        src_b += frame->linesize[1] / 2;
+    }
+}
+
+static void restore_median_planar(UtvideoContext *c, uint8_t *src,
+                                  ptrdiff_t stride, int width, int height,
+                                  int slices, int rmode)
+{
+    int i, j, slice;
+    int A, B, C;
+    uint8_t *bsrc;
+    int slice_start, slice_height;
+    const int cmask = ~rmode;
+
+    for (slice = 0; slice < slices; slice++) {
+        slice_start  = ((slice * height) / slices) & cmask;
+        slice_height = ((((slice + 1) * height) / slices) & cmask) -
+                       slice_start;
+
+        if (!slice_height)
+            continue;
+        bsrc = src + slice_start * stride;
+
+        // first line - left neighbour prediction
+        bsrc[0] += 0x80;
+        c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
+        bsrc += stride;
+        if (slice_height <= 1)
+            continue;
+        // second line - first element has top prediction, the rest uses median
+        C        = bsrc[-stride];
+        bsrc[0] += C;
+        A        = bsrc[0];
+        for (i = 1; i < width; i++) {
+            B        = bsrc[i - stride];
+            bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
+            C        = B;
+            A        = bsrc[i];
+        }
+        bsrc += stride;
+        // the rest of lines use continuous median prediction
+        for (j = 2; j < slice_height; j++) {
+            c->hdspdec.add_hfyu_median_pred(bsrc, bsrc - stride,
+                                            bsrc, width, &A, &B);
+            bsrc += stride;
+        }
+    }
+}
+
+/* UtVideo interlaced mode treats every two lines as a single one,
+ * so restoring function should take care of possible padding between
+ * two parts of the same "line".
+ */
+static void restore_median_planar_il(UtvideoContext *c, uint8_t *src,
+                                     ptrdiff_t stride, int width, int height,
+                                     int slices, int rmode)
+{
+    int i, j, slice;
+    int A, B, C;
+    uint8_t *bsrc;
+    int slice_start, slice_height;
+    const int cmask   = ~(rmode ? 3 : 1);
+    const int stride2 = stride << 1;
+
+    for (slice = 0; slice < slices; slice++) {
+        slice_start    = ((slice * height) / slices) & cmask;
+        slice_height   = ((((slice + 1) * height) / slices) & cmask) -
+                         slice_start;
+        slice_height >>= 1;
+        if (!slice_height)
+            continue;
+
+        bsrc = src + slice_start * stride;
+
+        // first line - left neighbour prediction
+        bsrc[0] += 0x80;
+        A = c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
+        c->hdspdec.add_hfyu_left_pred(bsrc + stride, bsrc + stride, width, A);
+        bsrc += stride2;
+        if (slice_height <= 1)
+            continue;
+        // second line - first element has top prediction, the rest uses median
+        C        = bsrc[-stride2];
+        bsrc[0] += C;
+        A        = bsrc[0];
+        for (i = 1; i < width; i++) {
+            B        = bsrc[i - stride2];
+            bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
+            C        = B;
+            A        = bsrc[i];
+        }
+        c->hdspdec.add_hfyu_median_pred(bsrc + stride, bsrc - stride,
+                                        bsrc + stride, width, &A, &B);
+        bsrc += stride2;
+        // the rest of lines use continuous median prediction
+        for (j = 2; j < slice_height; j++) {
+            c->hdspdec.add_hfyu_median_pred(bsrc, bsrc - stride2,
+                                            bsrc, width, &A, &B);
+            c->hdspdec.add_hfyu_median_pred(bsrc + stride, bsrc - stride,
+                                            bsrc + stride, width, &A, &B);
+            bsrc += stride2;
+        }
+    }
+}
+
+static void restore_median_packed(uint8_t *src, int step, ptrdiff_t stride,
+                                  int width, int height,
+                                  int slices, int rmode)
 {
     int i, j, slice;
     int A, B, C;
@@ -212,6 +499,8 @@ static void restore_median(uint8_t *src, int step, int stride,
         slice_start  = ((slice * height) / slices) & cmask;
         slice_height = ((((slice + 1) * height) / slices) & cmask) -
                        slice_start;
+        if (!slice_height)
+            continue;
 
         bsrc = src + slice_start * stride;
 
@@ -253,21 +542,24 @@ static void restore_median(uint8_t *src, int step, int stride,
  * so restoring function should take care of possible padding between
  * two parts of the same "line".
  */
-static void restore_median_il(uint8_t *src, int step, int stride,
-                              int width, int height, int slices, int rmode)
+static void restore_median_packed_il(uint8_t *src, int step, ptrdiff_t stride,
+                                     int width, int height,
+                                     int slices, int rmode)
 {
     int i, j, slice;
     int A, B, C;
     uint8_t *bsrc;
     int slice_start, slice_height;
     const int cmask   = ~(rmode ? 3 : 1);
-    const int stride2 = stride << 1;
+    const ptrdiff_t stride2 = stride << 1;
 
     for (slice = 0; slice < slices; slice++) {
         slice_start    = ((slice * height) / slices) & cmask;
         slice_height   = ((((slice + 1) * height) / slices) & cmask) -
                          slice_start;
         slice_height >>= 1;
+        if (!slice_height)
+            continue;
 
         bsrc = src + slice_start * stride;
 
@@ -321,6 +613,197 @@ static void restore_median_il(uint8_t *src, int step, int stride,
     }
 }
 
+static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
+                                    int width, int height, int slices, int rmode)
+{
+    int i, j, slice;
+    int A, B, C;
+    uint8_t *bsrc;
+    int slice_start, slice_height;
+    const int cmask = ~rmode;
+
+    for (slice = 0; slice < slices; slice++) {
+        slice_start  = ((slice * height) / slices) & cmask;
+        slice_height = ((((slice + 1) * height) / slices) & cmask) -
+                       slice_start;
+
+        if (!slice_height)
+            continue;
+        bsrc = src + slice_start * stride;
+
+        // first line - left neighbour prediction
+        bsrc[0] += 0x80;
+        c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
+        bsrc += stride;
+        if (slice_height <= 1)
+            continue;
+        for (j = 1; j < slice_height; j++) {
+            // second line - first element has top prediction, the rest uses gradient
+            bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF;
+            for (i = 1; i < width; i++) {
+                A = bsrc[i - stride];
+                B = bsrc[i - (stride + 1)];
+                C = bsrc[i - 1];
+                bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
+            }
+            bsrc += stride;
+        }
+    }
+}
+
+static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
+                                      int width, int height, int slices, int rmode)
+{
+    int i, j, slice;
+    int A, B, C;
+    uint8_t *bsrc;
+    int slice_start, slice_height;
+    const int cmask   = ~(rmode ? 3 : 1);
+    const ptrdiff_t stride2 = stride << 1;
+
+    for (slice = 0; slice < slices; slice++) {
+        slice_start    = ((slice * height) / slices) & cmask;
+        slice_height   = ((((slice + 1) * height) / slices) & cmask) -
+                         slice_start;
+        slice_height >>= 1;
+        if (!slice_height)
+            continue;
+
+        bsrc = src + slice_start * stride;
+
+        // first line - left neighbour prediction
+        bsrc[0] += 0x80;
+        A = c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
+        c->hdspdec.add_hfyu_left_pred(bsrc + stride, bsrc + stride, width, A);
+        bsrc += stride2;
+        if (slice_height <= 1)
+            continue;
+        for (j = 1; j < slice_height; j++) {
+            // second line - first element has top prediction, the rest uses gradient
+            bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF;
+            for (i = 1; i < width; i++) {
+                A = bsrc[i - stride2];
+                B = bsrc[i - (stride2 + 1)];
+                C = bsrc[i - 1];
+                bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
+            }
+            A = bsrc[-stride];
+            B = bsrc[-(1 + stride + stride - width)];
+            C = bsrc[width - 1];
+            bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
+            for (i = 1; i < width; i++) {
+                A = bsrc[i - stride];
+                B = bsrc[i - (1 + stride)];
+                C = bsrc[i - 1 + stride];
+                bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
+            }
+            bsrc += stride2;
+        }
+    }
+}
+
+static void restore_gradient_packed(uint8_t *src, int step, ptrdiff_t stride,
+                                    int width, int height, int slices, int rmode)
+{
+    int i, j, slice;
+    int A, B, C;
+    uint8_t *bsrc;
+    int slice_start, slice_height;
+    const int cmask = ~rmode;
+
+    for (slice = 0; slice < slices; slice++) {
+        slice_start  = ((slice * height) / slices) & cmask;
+        slice_height = ((((slice + 1) * height) / slices) & cmask) -
+                       slice_start;
+
+        if (!slice_height)
+            continue;
+        bsrc = src + slice_start * stride;
+
+        // first line - left neighbour prediction
+        bsrc[0] += 0x80;
+        A = bsrc[0];
+        for (i = step; i < width * step; i += step) {
+            bsrc[i] += A;
+            A        = bsrc[i];
+        }
+        bsrc += stride;
+        if (slice_height <= 1)
+            continue;
+        for (j = 1; j < slice_height; j++) {
+            // second line - first element has top prediction, the rest uses gradient
+            C        = bsrc[-stride];
+            bsrc[0] += C;
+            for (i = step; i < width * step; i += step) {
+                A = bsrc[i - stride];
+                B = bsrc[i - (stride + step)];
+                C = bsrc[i - step];
+                bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
+            }
+            bsrc += stride;
+        }
+    }
+}
+
+static void restore_gradient_packed_il(uint8_t *src, int step, ptrdiff_t stride,
+                                       int width, int height, int slices, int rmode)
+{
+    int i, j, slice;
+    int A, B, C;
+    uint8_t *bsrc;
+    int slice_start, slice_height;
+    const int cmask   = ~(rmode ? 3 : 1);
+    const ptrdiff_t stride2 = stride << 1;
+
+    for (slice = 0; slice < slices; slice++) {
+        slice_start    = ((slice * height) / slices) & cmask;
+        slice_height   = ((((slice + 1) * height) / slices) & cmask) -
+                         slice_start;
+        slice_height >>= 1;
+        if (!slice_height)
+            continue;
+
+        bsrc = src + slice_start * stride;
+
+        // first line - left neighbour prediction
+        bsrc[0] += 0x80;
+        A        = bsrc[0];
+        for (i = step; i < width * step; i += step) {
+            bsrc[i] += A;
+            A        = bsrc[i];
+        }
+        for (i = 0; i < width * step; i += step) {
+            bsrc[stride + i] += A;
+            A                 = bsrc[stride + i];
+        }
+        bsrc += stride2;
+        if (slice_height <= 1)
+            continue;
+        for (j = 1; j < slice_height; j++) {
+            // second line - first element has top prediction, the rest uses gradient
+            C        = bsrc[-stride2];
+            bsrc[0] += C;
+            for (i = step; i < width * step; i += step) {
+                A = bsrc[i - stride2];
+                B = bsrc[i - (stride2 + step)];
+                C = bsrc[i - step];
+                bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
+            }
+            A = bsrc[-stride];
+            B = bsrc[-(step + stride + stride - width * step)];
+            C = bsrc[width * step - step];
+            bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
+            for (i = step; i < width * step; i += step) {
+                A = bsrc[i - stride];
+                B = bsrc[i - (step + stride)];
+                C = bsrc[i - step + stride];
+                bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
+            }
+            bsrc += stride2;
+        }
+    }
+}
+
 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                         AVPacket *avpkt)
 {
@@ -343,47 +826,75 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
 
     /* parse plane structure to get frame flags and validate slice offsets */
     bytestream2_init(&gb, buf, buf_size);
-    for (i = 0; i < c->planes; i++) {
-        plane_start[i] = gb.buffer;
-        if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
-            av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
+    if (c->pro) {
+        if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
+            av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
             return AVERROR_INVALIDDATA;
         }
-        bytestream2_skipu(&gb, 256);
-        slice_start = 0;
-        slice_end   = 0;
-        for (j = 0; j < c->slices; j++) {
-            slice_end   = bytestream2_get_le32u(&gb);
-            slice_size  = slice_end - slice_start;
-            if (slice_end < 0 || slice_size < 0 ||
-                bytestream2_get_bytes_left(&gb) < slice_end) {
-                av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
+        c->frame_info = bytestream2_get_le32u(&gb);
+        c->slices = ((c->frame_info >> 16) & 0xff) + 1;
+        for (i = 0; i < c->planes; i++) {
+            plane_start[i] = gb.buffer;
+            if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
+                av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
+                return AVERROR_INVALIDDATA;
+            }
+            slice_start = 0;
+            slice_end   = 0;
+            for (j = 0; j < c->slices; j++) {
+                slice_end   = bytestream2_get_le32u(&gb);
+                if (slice_end < 0 || slice_end < slice_start ||
+                    bytestream2_get_bytes_left(&gb) < slice_end) {
+                    av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
+                    return AVERROR_INVALIDDATA;
+                }
+                slice_size  = slice_end - slice_start;
+                slice_start = slice_end;
+                max_slice_size = FFMAX(max_slice_size, slice_size);
+            }
+            plane_size = slice_end;
+            bytestream2_skipu(&gb, plane_size);
+            bytestream2_skipu(&gb, 1024);
+        }
+        plane_start[c->planes] = gb.buffer;
+    } else {
+        for (i = 0; i < c->planes; i++) {
+            plane_start[i] = gb.buffer;
+            if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
+                av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
                 return AVERROR_INVALIDDATA;
             }
-            slice_start = slice_end;
-            max_slice_size = FFMAX(max_slice_size, slice_size);
+            bytestream2_skipu(&gb, 256);
+            slice_start = 0;
+            slice_end   = 0;
+            for (j = 0; j < c->slices; j++) {
+                slice_end   = bytestream2_get_le32u(&gb);
+                if (slice_end < 0 || slice_end < slice_start ||
+                    bytestream2_get_bytes_left(&gb) < slice_end) {
+                    av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
+                    return AVERROR_INVALIDDATA;
+                }
+                slice_size  = slice_end - slice_start;
+                slice_start = slice_end;
+                max_slice_size = FFMAX(max_slice_size, slice_size);
+            }
+            plane_size = slice_end;
+            bytestream2_skipu(&gb, plane_size);
         }
-        plane_size = slice_end;
-        bytestream2_skipu(&gb, plane_size);
-    }
-    plane_start[c->planes] = gb.buffer;
-    if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
-        av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
-        return AVERROR_INVALIDDATA;
+        plane_start[c->planes] = gb.buffer;
+        if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
+            av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
+            return AVERROR_INVALIDDATA;
+        }
+        c->frame_info = bytestream2_get_le32u(&gb);
     }
-    c->frame_info = bytestream2_get_le32u(&gb);
     av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
            c->frame_info);
 
     c->frame_pred = (c->frame_info >> 8) & 3;
 
-    if (c->frame_pred == PRED_GRADIENT) {
-        avpriv_request_sample(avctx, "Frame with gradient prediction");
-        return AVERROR_PATCHWELCOME;
-    }
-
     av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
-                   max_slice_size + FF_INPUT_BUFFER_PADDING_SIZE);
+                   max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
 
     if (!c->slice_bits) {
         av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
@@ -402,20 +913,45 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                 return ret;
             if (c->frame_pred == PRED_MEDIAN) {
                 if (!c->interlaced) {
-                    restore_median(frame.f->data[0] + ff_ut_rgb_order[i],
-                                   c->planes, frame.f->linesize[0], avctx->width,
-                                   avctx->height, c->slices, 0);
+                    restore_median_packed(frame.f->data[0] + ff_ut_rgb_order[i],
+                                          c->planes, frame.f->linesize[0], avctx->width,
+                                          avctx->height, c->slices, 0);
                 } else {
-                    restore_median_il(frame.f->data[0] + ff_ut_rgb_order[i],
-                                      c->planes, frame.f->linesize[0],
-                                      avctx->width, avctx->height, c->slices,
-                                      0);
+                    restore_median_packed_il(frame.f->data[0] + ff_ut_rgb_order[i],
+                                             c->planes, frame.f->linesize[0],
+                                             avctx->width, avctx->height, c->slices,
+                                             0);
+                }
+            } else if (c->frame_pred == PRED_GRADIENT) {
+                if (!c->interlaced) {
+                    restore_gradient_packed(frame.f->data[0] + ff_ut_rgb_order[i],
+                                            c->planes, frame.f->linesize[0],
+                                            avctx->width, avctx->height,
+                                            c->slices, 0);
+                } else {
+                    restore_gradient_packed_il(frame.f->data[0] + ff_ut_rgb_order[i],
+                                               c->planes, frame.f->linesize[0],
+                                               avctx->width, avctx->height,
+                                               c->slices, 0);
                 }
             }
         }
         restore_rgb_planes(frame.f->data[0], c->planes, frame.f->linesize[0],
                            avctx->width, avctx->height);
         break;
+    case AV_PIX_FMT_GBRAP10:
+    case AV_PIX_FMT_GBRP10:
+        for (i = 0; i < c->planes; i++) {
+            ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1,
+                                 frame.f->linesize[i] / 2, avctx->width,
+                                 avctx->height, plane_start[i],
+                                 plane_start[i + 1] - 1024,
+                                 c->frame_pred == PRED_LEFT);
+            if (ret)
+                return ret;
+        }
+        restore_rgb_planes10(frame.f, avctx->width, avctx->height);
+        break;
     case AV_PIX_FMT_YUV420P:
         for (i = 0; i < 3; i++) {
             ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
@@ -425,14 +961,26 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                 return ret;
             if (c->frame_pred == PRED_MEDIAN) {
                 if (!c->interlaced) {
-                    restore_median(frame.f->data[i], 1, frame.f->linesize[i],
-                                   avctx->width >> !!i, avctx->height >> !!i,
-                                   c->slices, !i);
+                    restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
+                                          avctx->width >> !!i, avctx->height >> !!i,
+                                          c->slices, !i);
+                } else {
+                    restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+                                             avctx->width  >> !!i,
+                                             avctx->height >> !!i,
+                                             c->slices, !i);
+                }
+            } else if (c->frame_pred == PRED_GRADIENT) {
+                if (!c->interlaced) {
+                    restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
+                                            avctx->width >> !!i,
+                                            avctx->height >> !!i,
+                                            c->slices, !i);
                 } else {
-                    restore_median_il(frame.f->data[i], 1, frame.f->linesize[i],
-                                      avctx->width  >> !!i,
-                                      avctx->height >> !!i,
-                                      c->slices, !i);
+                    restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+                                               avctx->width  >> !!i,
+                                               avctx->height >> !!i,
+                                               c->slices, !i);
                 }
             }
         }
@@ -446,17 +994,66 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                 return ret;
             if (c->frame_pred == PRED_MEDIAN) {
                 if (!c->interlaced) {
-                    restore_median(frame.f->data[i], 1, frame.f->linesize[i],
-                                   avctx->width >> !!i, avctx->height,
-                                   c->slices, 0);
+                    restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
+                                          avctx->width >> !!i, avctx->height,
+                                          c->slices, 0);
+                } else {
+                    restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+                                             avctx->width >> !!i, avctx->height,
+                                             c->slices, 0);
+                }
+            } else if (c->frame_pred == PRED_GRADIENT) {
+                if (!c->interlaced) {
+                    restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
+                                            avctx->width >> !!i, avctx->height,
+                                            c->slices, 0);
+                } else {
+                    restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+                                               avctx->width  >> !!i, avctx->height,
+                                               c->slices, 0);
+                }
+            }
+        }
+        break;
+    case AV_PIX_FMT_YUV444P:
+        for (i = 0; i < 3; i++) {
+            ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
+                               avctx->width, avctx->height,
+                               plane_start[i], c->frame_pred == PRED_LEFT);
+            if (ret)
+                return ret;
+            if (c->frame_pred == PRED_MEDIAN) {
+                if (!c->interlaced) {
+                    restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
+                                          avctx->width, avctx->height,
+                                          c->slices, 0);
+                } else {
+                    restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+                                             avctx->width, avctx->height,
+                                             c->slices, 0);
+                }
+            } else if (c->frame_pred == PRED_GRADIENT) {
+                if (!c->interlaced) {
+                    restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
+                                            avctx->width, avctx->height,
+                                            c->slices, 0);
                 } else {
-                    restore_median_il(frame.f->data[i], 1, frame.f->linesize[i],
-                                      avctx->width >> !!i, avctx->height,
-                                      c->slices, 0);
+                    restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+                                               avctx->width, avctx->height,
+                                               c->slices, 0);
                 }
             }
         }
         break;
+    case AV_PIX_FMT_YUV422P10:
+        for (i = 0; i < 3; i++) {
+            ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1, frame.f->linesize[i] / 2,
+                                 avctx->width >> !!i, avctx->height,
+                                 plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
+            if (ret)
+                return ret;
+        }
+        break;
     }
 
     frame.f->key_frame = 1;
@@ -475,30 +1072,40 @@ static av_cold int decode_init(AVCodecContext *avctx)
 
     c->avctx = avctx;
 
-    ff_dsputil_init(&c->dsp, avctx);
+    ff_bswapdsp_init(&c->bdsp);
+    ff_huffyuvdsp_init(&c->hdspdec);
+
+    if (avctx->extradata_size >= 16) {
+        av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
+               avctx->extradata[3], avctx->extradata[2],
+               avctx->extradata[1], avctx->extradata[0]);
+        av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
+               AV_RB32(avctx->extradata + 4));
+        c->frame_info_size = AV_RL32(avctx->extradata + 8);
+        c->flags           = AV_RL32(avctx->extradata + 12);
 
-    if (avctx->extradata_size < 16) {
+        if (c->frame_info_size != 4)
+            avpriv_request_sample(avctx, "Frame info not 4 bytes");
+        av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
+        c->slices      = (c->flags >> 24) + 1;
+        c->compression = c->flags & 1;
+        c->interlaced  = c->flags & 0x800;
+    } else if (avctx->extradata_size == 8) {
+        av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
+               avctx->extradata[3], avctx->extradata[2],
+               avctx->extradata[1], avctx->extradata[0]);
+        av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
+               AV_RB32(avctx->extradata + 4));
+        c->interlaced  = 0;
+        c->pro         = 1;
+        c->frame_info_size = 4;
+    } else {
         av_log(avctx, AV_LOG_ERROR,
                "Insufficient extradata size %d, should be at least 16\n",
                avctx->extradata_size);
         return AVERROR_INVALIDDATA;
     }
 
-    av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
-           avctx->extradata[3], avctx->extradata[2],
-           avctx->extradata[1], avctx->extradata[0]);
-    av_log(avctx, AV_LOG_DEBUG, "Original format %X\n",
-           AV_RB32(avctx->extradata + 4));
-    c->frame_info_size = AV_RL32(avctx->extradata + 8);
-    c->flags           = AV_RL32(avctx->extradata + 12);
-
-    if (c->frame_info_size != 4)
-        avpriv_request_sample(avctx, "Frame info not 4 bytes");
-    av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
-    c->slices      = (c->flags >> 24) + 1;
-    c->compression = c->flags & 1;
-    c->interlaced  = c->flags & 0x800;
-
     c->slice_bits_size = 0;
 
     switch (avctx->codec_tag) {
@@ -520,6 +1127,23 @@ static av_cold int decode_init(AVCodecContext *avctx)
         avctx->pix_fmt = AV_PIX_FMT_YUV422P;
         avctx->colorspace = AVCOL_SPC_BT470BG;
         break;
+    case MKTAG('U', 'L', 'Y', '4'):
+        c->planes      = 3;
+        avctx->pix_fmt = AV_PIX_FMT_YUV444P;
+        avctx->colorspace = AVCOL_SPC_BT470BG;
+        break;
+    case MKTAG('U', 'Q', 'Y', '2'):
+        c->planes      = 3;
+        avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
+        break;
+    case MKTAG('U', 'Q', 'R', 'G'):
+        c->planes      = 3;
+        avctx->pix_fmt = AV_PIX_FMT_GBRP10;
+        break;
+    case MKTAG('U', 'Q', 'R', 'A'):
+        c->planes      = 4;
+        avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
+        break;
     case MKTAG('U', 'L', 'H', '0'):
         c->planes      = 3;
         avctx->pix_fmt = AV_PIX_FMT_YUV420P;
@@ -530,6 +1154,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
         avctx->pix_fmt = AV_PIX_FMT_YUV422P;
         avctx->colorspace = AVCOL_SPC_BT709;
         break;
+    case MKTAG('U', 'L', 'H', '4'):
+        c->planes      = 3;
+        avctx->pix_fmt = AV_PIX_FMT_YUV444P;
+        avctx->colorspace = AVCOL_SPC_BT709;
+        break;
     default:
         av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
                avctx->codec_tag);
@@ -557,5 +1186,6 @@ AVCodec ff_utvideo_decoder = {
     .init           = decode_init,
     .close          = decode_end,
     .decode         = decode_frame,
-    .capabilities   = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
+    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
+    .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE,
 };