3 * Copyright (c) 2011 Konstantin Shishkov
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #define UNCHECKED_BITSTREAM_READER 1
32 #include "libavutil/intreadwrite.h"
35 #include "bytestream.h"
41 static int build_huff10(const uint8_t *src, VLC *vlc, int *fsym)
52 for (i = 0; i < 1024; i++) {
56 qsort(he, 1024, sizeof(*he), ff_ut10_huff_cmp_len);
64 while (he[last].len == 255 && last)
67 if (he[last].len > 32) {
72 for (i = last; i >= 0; i--) {
73 codes[i] = code >> (32 - he[i].len);
76 code += 0x80000000u >> (he[i].len - 1);
79 return ff_init_vlc_sparse(vlc, VLC_BITS, last + 1,
80 bits, sizeof(*bits), sizeof(*bits),
81 codes, sizeof(*codes), sizeof(*codes),
82 syms, sizeof(*syms), sizeof(*syms), 0);
85 static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
96 for (i = 0; i < 256; i++) {
100 qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
108 while (he[last].len == 255 && last)
111 if (he[last].len > 32)
115 for (i = last; i >= 0; i--) {
116 codes[i] = code >> (32 - he[i].len);
119 code += 0x80000000u >> (he[i].len - 1);
122 return ff_init_vlc_sparse(vlc, VLC_BITS, last + 1,
123 bits, sizeof(*bits), sizeof(*bits),
124 codes, sizeof(*codes), sizeof(*codes),
125 syms, sizeof(*syms), sizeof(*syms), 0);
128 static int decode_plane10(UtvideoContext *c, int plane_no,
129 uint16_t *dst, int step, ptrdiff_t stride,
130 int width, int height,
131 const uint8_t *src, const uint8_t *huff,
134 int i, j, slice, pix, ret;
140 if ((ret = build_huff10(huff, &vlc, &fsym)) < 0) {
141 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
144 if (fsym >= 0) { // build_huff reported a symbol to fill slices with
146 for (slice = 0; slice < c->slices; slice++) {
150 send = (height * (slice + 1) / c->slices);
151 dest = dst + sstart * stride;
154 for (j = sstart; j < send; j++) {
155 for (i = 0; i < width * step; i += step) {
171 for (slice = 0; slice < c->slices; slice++) {
173 int slice_data_start, slice_data_end, slice_size;
176 send = (height * (slice + 1) / c->slices);
177 dest = dst + sstart * stride;
179 // slice offset and size validation was done earlier
180 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
181 slice_data_end = AV_RL32(src + slice * 4);
182 slice_size = slice_data_end - slice_data_start;
185 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
186 "yet a slice has a length of zero.\n");
190 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
191 c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
192 (uint32_t *)(src + slice_data_start + c->slices * 4),
193 (slice_data_end - slice_data_start + 3) >> 2);
194 init_get_bits(&gb, c->slice_bits, slice_size * 8);
197 for (j = sstart; j < send; j++) {
198 int ws = width * step;
199 for (i = 0; i < ws; i += step) {
200 pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3);
202 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
213 if (get_bits_left(&gb) < 0) {
214 av_log(c->avctx, AV_LOG_ERROR,
215 "Slice decoding ran out of bits\n");
219 if (get_bits_left(&gb) > 32)
220 av_log(c->avctx, AV_LOG_WARNING,
221 "%d bits left after decoding slice\n", get_bits_left(&gb));
229 return AVERROR_INVALIDDATA;
232 static int decode_plane(UtvideoContext *c, int plane_no,
233 uint8_t *dst, int step, ptrdiff_t stride,
234 int width, int height,
235 const uint8_t *src, int use_pred)
237 int i, j, slice, pix;
242 const int cmask = c->interlaced ? ~(1 + 2 * (!plane_no && c->avctx->pix_fmt == AV_PIX_FMT_YUV420P)) : ~(!plane_no && c->avctx->pix_fmt == AV_PIX_FMT_YUV420P);
244 if (build_huff(src, &vlc, &fsym)) {
245 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
246 return AVERROR_INVALIDDATA;
248 if (fsym >= 0) { // build_huff reported a symbol to fill slices with
250 for (slice = 0; slice < c->slices; slice++) {
254 send = (height * (slice + 1) / c->slices) & cmask;
255 dest = dst + sstart * stride;
258 for (j = sstart; j < send; j++) {
259 for (i = 0; i < width * step; i += step) {
276 for (slice = 0; slice < c->slices; slice++) {
278 int slice_data_start, slice_data_end, slice_size;
281 send = (height * (slice + 1) / c->slices) & cmask;
282 dest = dst + sstart * stride;
284 // slice offset and size validation was done earlier
285 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
286 slice_data_end = AV_RL32(src + slice * 4);
287 slice_size = slice_data_end - slice_data_start;
290 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
291 "yet a slice has a length of zero.\n");
295 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
296 c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
297 (uint32_t *)(src + slice_data_start + c->slices * 4),
298 (slice_data_end - slice_data_start + 3) >> 2);
299 init_get_bits(&gb, c->slice_bits, slice_size * 8);
302 for (j = sstart; j < send; j++) {
303 int ws = width * step;
304 for (i = 0; i < ws; i += step) {
305 pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3);
307 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
316 if (get_bits_left(&gb) < 0) {
317 av_log(c->avctx, AV_LOG_ERROR,
318 "Slice decoding ran out of bits\n");
323 if (get_bits_left(&gb) > 32)
324 av_log(c->avctx, AV_LOG_WARNING,
325 "%d bits left after decoding slice\n", get_bits_left(&gb));
333 return AVERROR_INVALIDDATA;
340 static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
341 int width, int height, int slices, int rmode)
346 int slice_start, slice_height;
347 const int cmask = ~rmode;
349 for (slice = 0; slice < slices; slice++) {
350 slice_start = ((slice * height) / slices) & cmask;
351 slice_height = ((((slice + 1) * height) / slices) & cmask) -
356 bsrc = src + slice_start * stride;
358 // first line - left neighbour prediction
360 c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
362 if (slice_height <= 1)
364 // second line - first element has top prediction, the rest uses median
368 for (i = 1; i < width; i++) {
369 B = bsrc[i - stride];
370 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
375 // the rest of lines use continuous median prediction
376 for (j = 2; j < slice_height; j++) {
377 c->llviddsp.add_median_pred(bsrc, bsrc - stride,
378 bsrc, width, &A, &B);
384 /* UtVideo interlaced mode treats every two lines as a single one,
385 * so restoring function should take care of possible padding between
386 * two parts of the same "line".
388 static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
389 int width, int height, int slices, int rmode)
394 int slice_start, slice_height;
395 const int cmask = ~(rmode ? 3 : 1);
396 const ptrdiff_t stride2 = stride << 1;
398 for (slice = 0; slice < slices; slice++) {
399 slice_start = ((slice * height) / slices) & cmask;
400 slice_height = ((((slice + 1) * height) / slices) & cmask) -
406 bsrc = src + slice_start * stride;
408 // first line - left neighbour prediction
410 A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
411 c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
413 if (slice_height <= 1)
415 // second line - first element has top prediction, the rest uses median
419 for (i = 1; i < width; i++) {
420 B = bsrc[i - stride2];
421 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
425 c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
426 bsrc + stride, width, &A, &B);
428 // the rest of lines use continuous median prediction
429 for (j = 2; j < slice_height; j++) {
430 c->llviddsp.add_median_pred(bsrc, bsrc - stride2,
431 bsrc, width, &A, &B);
432 c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
433 bsrc + stride, width, &A, &B);
439 static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
440 int width, int height, int slices, int rmode)
445 int slice_start, slice_height;
446 const int cmask = ~rmode;
448 for (slice = 0; slice < slices; slice++) {
449 slice_start = ((slice * height) / slices) & cmask;
450 slice_height = ((((slice + 1) * height) / slices) & cmask) -
455 bsrc = src + slice_start * stride;
457 // first line - left neighbour prediction
459 c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
461 if (slice_height <= 1)
463 for (j = 1; j < slice_height; j++) {
464 // second line - first element has top prediction, the rest uses gradient
465 bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF;
466 for (i = 1; i < width; i++) {
467 A = bsrc[i - stride];
468 B = bsrc[i - (stride + 1)];
470 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
477 static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
478 int width, int height, int slices, int rmode)
483 int slice_start, slice_height;
484 const int cmask = ~(rmode ? 3 : 1);
485 const ptrdiff_t stride2 = stride << 1;
487 for (slice = 0; slice < slices; slice++) {
488 slice_start = ((slice * height) / slices) & cmask;
489 slice_height = ((((slice + 1) * height) / slices) & cmask) -
495 bsrc = src + slice_start * stride;
497 // first line - left neighbour prediction
499 A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
500 c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
502 if (slice_height <= 1)
504 for (j = 1; j < slice_height; j++) {
505 // second line - first element has top prediction, the rest uses gradient
506 bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF;
507 for (i = 1; i < width; i++) {
508 A = bsrc[i - stride2];
509 B = bsrc[i - (stride2 + 1)];
511 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
514 B = bsrc[-(1 + stride + stride - width)];
516 bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
517 for (i = 1; i < width; i++) {
518 A = bsrc[i - stride];
519 B = bsrc[i - (1 + stride)];
520 C = bsrc[i - 1 + stride];
521 bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
528 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
531 const uint8_t *buf = avpkt->data;
532 int buf_size = avpkt->size;
533 UtvideoContext *c = avctx->priv_data;
535 const uint8_t *plane_start[5];
536 int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
539 ThreadFrame frame = { .f = data };
541 if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
544 /* parse plane structure to get frame flags and validate slice offsets */
545 bytestream2_init(&gb, buf, buf_size);
547 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
548 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
549 return AVERROR_INVALIDDATA;
551 c->frame_info = bytestream2_get_le32u(&gb);
552 c->slices = ((c->frame_info >> 16) & 0xff) + 1;
553 for (i = 0; i < c->planes; i++) {
554 plane_start[i] = gb.buffer;
555 if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
556 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
557 return AVERROR_INVALIDDATA;
561 for (j = 0; j < c->slices; j++) {
562 slice_end = bytestream2_get_le32u(&gb);
563 if (slice_end < 0 || slice_end < slice_start ||
564 bytestream2_get_bytes_left(&gb) < slice_end) {
565 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
566 return AVERROR_INVALIDDATA;
568 slice_size = slice_end - slice_start;
569 slice_start = slice_end;
570 max_slice_size = FFMAX(max_slice_size, slice_size);
572 plane_size = slice_end;
573 bytestream2_skipu(&gb, plane_size);
574 bytestream2_skipu(&gb, 1024);
576 plane_start[c->planes] = gb.buffer;
578 for (i = 0; i < c->planes; i++) {
579 plane_start[i] = gb.buffer;
580 if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
581 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
582 return AVERROR_INVALIDDATA;
584 bytestream2_skipu(&gb, 256);
587 for (j = 0; j < c->slices; j++) {
588 slice_end = bytestream2_get_le32u(&gb);
589 if (slice_end < 0 || slice_end < slice_start ||
590 bytestream2_get_bytes_left(&gb) < slice_end) {
591 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
592 return AVERROR_INVALIDDATA;
594 slice_size = slice_end - slice_start;
595 slice_start = slice_end;
596 max_slice_size = FFMAX(max_slice_size, slice_size);
598 plane_size = slice_end;
599 bytestream2_skipu(&gb, plane_size);
601 plane_start[c->planes] = gb.buffer;
602 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
603 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
604 return AVERROR_INVALIDDATA;
606 c->frame_info = bytestream2_get_le32u(&gb);
608 av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
611 c->frame_pred = (c->frame_info >> 8) & 3;
613 max_slice_size += 4*avctx->width;
615 av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
616 max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
618 if (!c->slice_bits) {
619 av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
620 return AVERROR(ENOMEM);
623 switch (c->avctx->pix_fmt) {
624 case AV_PIX_FMT_GBRP:
625 case AV_PIX_FMT_GBRAP:
626 for (i = 0; i < c->planes; i++) {
627 ret = decode_plane(c, i, frame.f->data[i], 1,
628 frame.f->linesize[i], avctx->width,
629 avctx->height, plane_start[i],
630 c->frame_pred == PRED_LEFT);
633 if (c->frame_pred == PRED_MEDIAN) {
634 if (!c->interlaced) {
635 restore_median_planar(c, frame.f->data[i],
636 frame.f->linesize[i], avctx->width,
637 avctx->height, c->slices, 0);
639 restore_median_planar_il(c, frame.f->data[i],
640 frame.f->linesize[i],
641 avctx->width, avctx->height, c->slices,
644 } else if (c->frame_pred == PRED_GRADIENT) {
645 if (!c->interlaced) {
646 restore_gradient_planar(c, frame.f->data[i],
647 frame.f->linesize[i], avctx->width,
648 avctx->height, c->slices, 0);
650 restore_gradient_planar_il(c, frame.f->data[i],
651 frame.f->linesize[i],
652 avctx->width, avctx->height, c->slices,
657 c->utdsp.restore_rgb_planes(frame.f->data[2], frame.f->data[0], frame.f->data[1],
658 frame.f->linesize[2], frame.f->linesize[0], frame.f->linesize[1],
659 avctx->width, avctx->height);
661 case AV_PIX_FMT_GBRAP10:
662 case AV_PIX_FMT_GBRP10:
663 for (i = 0; i < c->planes; i++) {
664 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1,
665 frame.f->linesize[i] / 2, avctx->width,
666 avctx->height, plane_start[i],
667 plane_start[i + 1] - 1024,
668 c->frame_pred == PRED_LEFT);
672 c->utdsp.restore_rgb_planes10((uint16_t *)frame.f->data[2], (uint16_t *)frame.f->data[0], (uint16_t *)frame.f->data[1],
673 frame.f->linesize[2] / 2, frame.f->linesize[0] / 2, frame.f->linesize[1] / 2,
674 avctx->width, avctx->height);
676 case AV_PIX_FMT_YUV420P:
677 for (i = 0; i < 3; i++) {
678 ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
679 avctx->width >> !!i, avctx->height >> !!i,
680 plane_start[i], c->frame_pred == PRED_LEFT);
683 if (c->frame_pred == PRED_MEDIAN) {
684 if (!c->interlaced) {
685 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
686 avctx->width >> !!i, avctx->height >> !!i,
689 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
691 avctx->height >> !!i,
694 } else if (c->frame_pred == PRED_GRADIENT) {
695 if (!c->interlaced) {
696 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
697 avctx->width >> !!i, avctx->height >> !!i,
700 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
702 avctx->height >> !!i,
708 case AV_PIX_FMT_YUV422P:
709 for (i = 0; i < 3; i++) {
710 ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
711 avctx->width >> !!i, avctx->height,
712 plane_start[i], c->frame_pred == PRED_LEFT);
715 if (c->frame_pred == PRED_MEDIAN) {
716 if (!c->interlaced) {
717 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
718 avctx->width >> !!i, avctx->height,
721 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
722 avctx->width >> !!i, avctx->height,
725 } else if (c->frame_pred == PRED_GRADIENT) {
726 if (!c->interlaced) {
727 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
728 avctx->width >> !!i, avctx->height,
731 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
732 avctx->width >> !!i, avctx->height,
738 case AV_PIX_FMT_YUV444P:
739 for (i = 0; i < 3; i++) {
740 ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
741 avctx->width, avctx->height,
742 plane_start[i], c->frame_pred == PRED_LEFT);
745 if (c->frame_pred == PRED_MEDIAN) {
746 if (!c->interlaced) {
747 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
748 avctx->width, avctx->height,
751 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
752 avctx->width, avctx->height,
755 } else if (c->frame_pred == PRED_GRADIENT) {
756 if (!c->interlaced) {
757 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
758 avctx->width, avctx->height,
761 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
762 avctx->width, avctx->height,
768 case AV_PIX_FMT_YUV422P10:
769 for (i = 0; i < 3; i++) {
770 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1, frame.f->linesize[i] / 2,
771 avctx->width >> !!i, avctx->height,
772 plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
779 frame.f->key_frame = 1;
780 frame.f->pict_type = AV_PICTURE_TYPE_I;
781 frame.f->interlaced_frame = !!c->interlaced;
785 /* always report that the buffer was completely consumed */
789 static av_cold int decode_init(AVCodecContext *avctx)
791 UtvideoContext * const c = avctx->priv_data;
795 ff_utvideodsp_init(&c->utdsp);
796 ff_bswapdsp_init(&c->bdsp);
797 ff_llviddsp_init(&c->llviddsp);
799 if (avctx->extradata_size >= 16) {
800 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
801 avctx->extradata[3], avctx->extradata[2],
802 avctx->extradata[1], avctx->extradata[0]);
803 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
804 AV_RB32(avctx->extradata + 4));
805 c->frame_info_size = AV_RL32(avctx->extradata + 8);
806 c->flags = AV_RL32(avctx->extradata + 12);
808 if (c->frame_info_size != 4)
809 avpriv_request_sample(avctx, "Frame info not 4 bytes");
810 av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
811 c->slices = (c->flags >> 24) + 1;
812 c->compression = c->flags & 1;
813 c->interlaced = c->flags & 0x800;
814 } else if (avctx->extradata_size == 8) {
815 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
816 avctx->extradata[3], avctx->extradata[2],
817 avctx->extradata[1], avctx->extradata[0]);
818 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
819 AV_RB32(avctx->extradata + 4));
822 c->frame_info_size = 4;
824 av_log(avctx, AV_LOG_ERROR,
825 "Insufficient extradata size %d, should be at least 16\n",
826 avctx->extradata_size);
827 return AVERROR_INVALIDDATA;
830 c->slice_bits_size = 0;
832 switch (avctx->codec_tag) {
833 case MKTAG('U', 'L', 'R', 'G'):
835 avctx->pix_fmt = AV_PIX_FMT_GBRP;
837 case MKTAG('U', 'L', 'R', 'A'):
839 avctx->pix_fmt = AV_PIX_FMT_GBRAP;
841 case MKTAG('U', 'L', 'Y', '0'):
843 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
844 avctx->colorspace = AVCOL_SPC_BT470BG;
846 case MKTAG('U', 'L', 'Y', '2'):
848 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
849 avctx->colorspace = AVCOL_SPC_BT470BG;
851 case MKTAG('U', 'L', 'Y', '4'):
853 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
854 avctx->colorspace = AVCOL_SPC_BT470BG;
856 case MKTAG('U', 'Q', 'Y', '2'):
858 avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
860 case MKTAG('U', 'Q', 'R', 'G'):
862 avctx->pix_fmt = AV_PIX_FMT_GBRP10;
864 case MKTAG('U', 'Q', 'R', 'A'):
866 avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
868 case MKTAG('U', 'L', 'H', '0'):
870 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
871 avctx->colorspace = AVCOL_SPC_BT709;
873 case MKTAG('U', 'L', 'H', '2'):
875 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
876 avctx->colorspace = AVCOL_SPC_BT709;
878 case MKTAG('U', 'L', 'H', '4'):
880 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
881 avctx->colorspace = AVCOL_SPC_BT709;
884 av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
886 return AVERROR_INVALIDDATA;
892 static av_cold int decode_end(AVCodecContext *avctx)
894 UtvideoContext * const c = avctx->priv_data;
896 av_freep(&c->slice_bits);
901 AVCodec ff_utvideo_decoder = {
903 .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
904 .type = AVMEDIA_TYPE_VIDEO,
905 .id = AV_CODEC_ID_UTVIDEO,
906 .priv_data_size = sizeof(UtvideoContext),
909 .decode = decode_frame,
910 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
911 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,