#include "bytestream.h"
#include "get_bits.h"
#include "dsputil.h"
+#include "thread.h"
enum {
PRED_NONE = 0,
return (aa->len - bb->len)*256 + aa->sym - bb->sym;
}
-static int build_huff(const uint8_t *src, VLC *vlc)
+static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
{
int i;
HuffEntry he[256];
uint8_t syms[256];
uint32_t code;
+ *fsym = -1;
for (i = 0; i < 256; i++) {
he[i].sym = i;
he[i].len = *src++;
}
qsort(he, 256, sizeof(*he), huff_cmp);
- if (!he[0].len || he[0].len > 32)
+ if (!he[0].len) {
+ *fsym = he[0].sym;
+ return 0;
+ }
+ if (he[0].len > 32)
return -1;
last = 255;
int sstart, send;
VLC vlc;
GetBitContext gb;
- int prev;
+ int prev, fsym;
const int cmask = ~(!plane_no && c->avctx->pix_fmt == PIX_FMT_YUV420P);
- if (build_huff(src, &vlc)) {
+ if (build_huff(src, &vlc, &fsym)) {
av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
return AVERROR_INVALIDDATA;
}
+ if (fsym >= 0) { // build_huff reported a symbol to fill slices with
+ send = 0;
+ for (slice = 0; slice < c->slices; slice++) {
+ uint8_t *dest;
+
+ sstart = send;
+ send = (height * (slice + 1) / c->slices) & cmask;
+ dest = dst + sstart * stride;
+
+ prev = 0x80;
+ for (j = sstart; j < send; j++) {
+ for (i = 0; i < width * step; i += step) {
+ pix = fsym;
+ if (use_pred) {
+ prev += pix;
+ pix = prev;
+ }
+ dest[i] = pix;
+ }
+ dest += stride;
+ }
+ }
+ return 0;
+ }
src += 256;
src_size -= 256;
}
}
+/* UtVideo interlaced mode treats every two lines as a single one,
+ * so restoring function should take care of possible padding between
+ * two parts of the same "line".
+ */
+static void restore_median_il(uint8_t *src, int step, int stride,
+ int width, int height, int slices, int rmode)
+{
+ int i, j, slice;
+ int A, B, C;
+ uint8_t *bsrc;
+ int slice_start, slice_height;
+ const int cmask = ~(rmode ? 3 : 1);
+ const int stride2 = stride << 1;
+
+ for (slice = 0; slice < slices; slice++) {
+ slice_start = ((slice * height) / slices) & cmask;
+ slice_height = ((((slice + 1) * height) / slices) & cmask) - slice_start;
+ slice_height >>= 1;
+
+ bsrc = src + slice_start * stride;
+
+ // first line - left neighbour prediction
+ bsrc[0] += 0x80;
+ A = bsrc[0];
+ for (i = step; i < width * step; i += step) {
+ bsrc[i] += A;
+ A = bsrc[i];
+ }
+ for (i = 0; i < width * step; i += step) {
+ bsrc[stride + i] += A;
+ A = bsrc[stride + i];
+ }
+ bsrc += stride2;
+ if (slice_height == 1)
+ continue;
+ // second line - first element has top predition, the rest uses median
+ C = bsrc[-stride2];
+ bsrc[0] += C;
+ A = bsrc[0];
+ for (i = step; i < width * step; i += step) {
+ B = bsrc[i - stride2];
+ bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
+ C = B;
+ A = bsrc[i];
+ }
+ for (i = 0; i < width * step; i += step) {
+ B = bsrc[i - stride];
+ bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C));
+ C = B;
+ A = bsrc[stride + i];
+ }
+ bsrc += stride2;
+ // the rest of lines use continuous median prediction
+ for (j = 2; j < slice_height; j++) {
+ for (i = 0; i < width * step; i += step) {
+ B = bsrc[i - stride2];
+ bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
+ C = B;
+ A = bsrc[i];
+ }
+ for (i = 0; i < width * step; i += step) {
+ B = bsrc[i - stride];
+ bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C));
+ C = B;
+ A = bsrc[i + stride];
+ }
+ bsrc += stride2;
+ }
+ }
+}
+
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int ret;
if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
+ ff_thread_release_buffer(avctx, &c->pic);
c->pic.reference = 1;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
- if ((ret = avctx->get_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_thread_get_buffer(avctx, &c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
+ ff_thread_finish_setup(avctx);
+
/* parse plane structure to retrieve frame flags and validate slice offsets */
ptr = buf;
for (i = 0; i < c->planes; i++) {
c->frame_pred == PRED_LEFT);
if (ret)
return ret;
- if (c->frame_pred == PRED_MEDIAN)
- restore_median(c->pic.data[i], 1, c->pic.linesize[i],
- avctx->width >> !!i, avctx->height >> !!i,
- c->slices, !i);
+ if (c->frame_pred == PRED_MEDIAN) {
+ if (!c->interlaced) {
+ restore_median(c->pic.data[i], 1, c->pic.linesize[i],
+ avctx->width >> !!i, avctx->height >> !!i,
+ c->slices, !i);
+ } else {
+ restore_median_il(c->pic.data[i], 1, c->pic.linesize[i],
+ avctx->width >> !!i,
+ avctx->height >> !!i,
+ c->slices, !i);
+ }
+ }
}
break;
case PIX_FMT_YUV422P:
c->frame_pred == PRED_LEFT);
if (ret)
return ret;
- if (c->frame_pred == PRED_MEDIAN)
- restore_median(c->pic.data[i], 1, c->pic.linesize[i],
- avctx->width >> !!i, avctx->height, c->slices, 0);
+ if (c->frame_pred == PRED_MEDIAN) {
+ if (!c->interlaced) {
+ restore_median(c->pic.data[i], 1, c->pic.linesize[i],
+ avctx->width >> !!i, avctx->height,
+ c->slices, 0);
+ } else {
+ restore_median_il(c->pic.data[i], 1, c->pic.linesize[i],
+ avctx->width >> !!i, avctx->height,
+ c->slices, 0);
+ }
+ }
}
break;
}
UtvideoContext * const c = avctx->priv_data;
if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
+ ff_thread_release_buffer(avctx, &c->pic);
av_freep(&c->slice_bits);
.init = decode_init,
.close = decode_end,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
.long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
};