3 * Copyright (c) 2011 Konstantin Shishkov
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #define UNCHECKED_BITSTREAM_READER 1
32 #include "libavutil/intreadwrite.h"
33 #include "libavutil/pixdesc.h"
36 #include "bytestream.h"
42 static int build_huff10(const uint8_t *src, VLC *vlc, int *fsym)
53 for (i = 0; i < 1024; i++) {
57 qsort(he, 1024, sizeof(*he), ff_ut10_huff_cmp_len);
65 while (he[last].len == 255 && last)
68 if (he[last].len > 32) {
73 for (i = last; i >= 0; i--) {
74 codes[i] = code >> (32 - he[i].len);
77 code += 0x80000000u >> (he[i].len - 1);
80 return ff_init_vlc_sparse(vlc, VLC_BITS, last + 1,
81 bits, sizeof(*bits), sizeof(*bits),
82 codes, sizeof(*codes), sizeof(*codes),
83 syms, sizeof(*syms), sizeof(*syms), 0);
86 static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
97 for (i = 0; i < 256; i++) {
101 qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
109 while (he[last].len == 255 && last)
112 if (he[last].len > 32)
116 for (i = last; i >= 0; i--) {
117 codes[i] = code >> (32 - he[i].len);
120 code += 0x80000000u >> (he[i].len - 1);
123 return ff_init_vlc_sparse(vlc, VLC_BITS, last + 1,
124 bits, sizeof(*bits), sizeof(*bits),
125 codes, sizeof(*codes), sizeof(*codes),
126 syms, sizeof(*syms), sizeof(*syms), 0);
129 static int decode_plane10(UtvideoContext *c, int plane_no,
130 uint16_t *dst, ptrdiff_t stride,
131 int width, int height,
132 const uint8_t *src, const uint8_t *huff,
135 int i, j, slice, pix, ret;
141 if ((ret = build_huff10(huff, &vlc, &fsym)) < 0) {
142 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
145 if (fsym >= 0) { // build_huff reported a symbol to fill slices with
147 for (slice = 0; slice < c->slices; slice++) {
151 send = (height * (slice + 1) / c->slices);
152 dest = dst + sstart * stride;
155 for (j = sstart; j < send; j++) {
156 for (i = 0; i < width; i++) {
172 for (slice = 0; slice < c->slices; slice++) {
174 int slice_data_start, slice_data_end, slice_size;
177 send = (height * (slice + 1) / c->slices);
178 dest = dst + sstart * stride;
180 // slice offset and size validation was done earlier
181 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
182 slice_data_end = AV_RL32(src + slice * 4);
183 slice_size = slice_data_end - slice_data_start;
186 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
187 "yet a slice has a length of zero.\n");
191 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
192 c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
193 (uint32_t *)(src + slice_data_start + c->slices * 4),
194 (slice_data_end - slice_data_start + 3) >> 2);
195 init_get_bits(&gb, c->slice_bits, slice_size * 8);
198 for (j = sstart; j < send; j++) {
199 for (i = 0; i < width; i++) {
200 pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3);
202 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
213 if (get_bits_left(&gb) < 0) {
214 av_log(c->avctx, AV_LOG_ERROR,
215 "Slice decoding ran out of bits\n");
219 if (get_bits_left(&gb) > 32)
220 av_log(c->avctx, AV_LOG_WARNING,
221 "%d bits left after decoding slice\n", get_bits_left(&gb));
229 return AVERROR_INVALIDDATA;
232 static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt)
234 const int is_luma = (pix_fmt == AV_PIX_FMT_YUV420P) && !plane_no;
237 return ~(1 + 2 * is_luma);
242 static int decode_plane(UtvideoContext *c, int plane_no,
243 uint8_t *dst, ptrdiff_t stride,
244 int width, int height,
245 const uint8_t *src, int use_pred)
247 int i, j, slice, pix;
252 const int cmask = compute_cmask(plane_no, c->interlaced, c->avctx->pix_fmt);
256 for (slice = 0; slice < c->slices; slice++) {
257 GetBitContext cbit, pbit;
260 ret = init_get_bits8(&cbit, c->control_stream[plane_no][slice], c->control_stream_size[plane_no][slice]);
264 ret = init_get_bits8(&pbit, c->packed_stream[plane_no][slice], c->packed_stream_size[plane_no][slice]);
269 send = (height * (slice + 1) / c->slices) & cmask;
270 dest = dst + sstart * stride;
272 if (3 * ((dst + send * stride - dest + 7)/8) > get_bits_left(&cbit))
273 return AVERROR_INVALIDDATA;
275 for (p = dest; p < dst + send * stride; p += 8) {
276 int bits = get_bits_le(&cbit, 3);
281 uint32_t sub = 0x80 >> (8 - (bits + 1)), add;
284 if ((bits + 1) * 8 > get_bits_left(&pbit))
285 return AVERROR_INVALIDDATA;
287 for (k = 0; k < 8; k++) {
289 p[k] = get_bits_le(&pbit, bits + 1);
290 add = (~p[k] & sub) << (8 - bits);
301 if (build_huff(src, &vlc, &fsym)) {
302 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
303 return AVERROR_INVALIDDATA;
305 if (fsym >= 0) { // build_huff reported a symbol to fill slices with
307 for (slice = 0; slice < c->slices; slice++) {
311 send = (height * (slice + 1) / c->slices) & cmask;
312 dest = dst + sstart * stride;
315 for (j = sstart; j < send; j++) {
316 for (i = 0; i < width; i++) {
333 for (slice = 0; slice < c->slices; slice++) {
335 int slice_data_start, slice_data_end, slice_size;
338 send = (height * (slice + 1) / c->slices) & cmask;
339 dest = dst + sstart * stride;
341 // slice offset and size validation was done earlier
342 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
343 slice_data_end = AV_RL32(src + slice * 4);
344 slice_size = slice_data_end - slice_data_start;
347 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
348 "yet a slice has a length of zero.\n");
352 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
353 c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
354 (uint32_t *)(src + slice_data_start + c->slices * 4),
355 (slice_data_end - slice_data_start + 3) >> 2);
356 init_get_bits(&gb, c->slice_bits, slice_size * 8);
359 for (j = sstart; j < send; j++) {
360 for (i = 0; i < width; i++) {
361 pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3);
363 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
372 if (get_bits_left(&gb) < 0) {
373 av_log(c->avctx, AV_LOG_ERROR,
374 "Slice decoding ran out of bits\n");
379 if (get_bits_left(&gb) > 32)
380 av_log(c->avctx, AV_LOG_WARNING,
381 "%d bits left after decoding slice\n", get_bits_left(&gb));
389 return AVERROR_INVALIDDATA;
396 static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
397 int width, int height, int slices, int rmode)
402 int slice_start, slice_height;
403 const int cmask = ~rmode;
405 for (slice = 0; slice < slices; slice++) {
406 slice_start = ((slice * height) / slices) & cmask;
407 slice_height = ((((slice + 1) * height) / slices) & cmask) -
412 bsrc = src + slice_start * stride;
414 // first line - left neighbour prediction
416 c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
418 if (slice_height <= 1)
420 // second line - first element has top prediction, the rest uses median
424 for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
425 B = bsrc[i - stride];
426 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
431 c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride + 16,
432 bsrc + 16, width - 16, &A, &B);
435 // the rest of lines use continuous median prediction
436 for (j = 2; j < slice_height; j++) {
437 c->llviddsp.add_median_pred(bsrc, bsrc - stride,
438 bsrc, width, &A, &B);
444 /* UtVideo interlaced mode treats every two lines as a single one,
445 * so restoring function should take care of possible padding between
446 * two parts of the same "line".
448 static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
449 int width, int height, int slices, int rmode)
454 int slice_start, slice_height;
455 const int cmask = ~(rmode ? 3 : 1);
456 const ptrdiff_t stride2 = stride << 1;
458 for (slice = 0; slice < slices; slice++) {
459 slice_start = ((slice * height) / slices) & cmask;
460 slice_height = ((((slice + 1) * height) / slices) & cmask) -
466 bsrc = src + slice_start * stride;
468 // first line - left neighbour prediction
470 A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
471 c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
473 if (slice_height <= 1)
475 // second line - first element has top prediction, the rest uses median
479 for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
480 B = bsrc[i - stride2];
481 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
486 c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride2 + 16,
487 bsrc + 16, width - 16, &A, &B);
489 c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
490 bsrc + stride, width, &A, &B);
492 // the rest of lines use continuous median prediction
493 for (j = 2; j < slice_height; j++) {
494 c->llviddsp.add_median_pred(bsrc, bsrc - stride2,
495 bsrc, width, &A, &B);
496 c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
497 bsrc + stride, width, &A, &B);
503 static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
504 int width, int height, int slices, int rmode)
509 int slice_start, slice_height;
510 const int cmask = ~rmode;
511 int min_width = FFMIN(width, 32);
513 for (slice = 0; slice < slices; slice++) {
514 slice_start = ((slice * height) / slices) & cmask;
515 slice_height = ((((slice + 1) * height) / slices) & cmask) -
520 bsrc = src + slice_start * stride;
522 // first line - left neighbour prediction
524 c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
526 if (slice_height <= 1)
528 for (j = 1; j < slice_height; j++) {
529 // second line - first element has top prediction, the rest uses gradient
530 bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF;
531 for (i = 1; i < min_width; i++) { /* dsp need align 32 */
532 A = bsrc[i - stride];
533 B = bsrc[i - (stride + 1)];
535 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
538 c->llviddsp.add_gradient_pred(bsrc + 32, stride, width - 32);
544 static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
545 int width, int height, int slices, int rmode)
550 int slice_start, slice_height;
551 const int cmask = ~(rmode ? 3 : 1);
552 const ptrdiff_t stride2 = stride << 1;
553 int min_width = FFMIN(width, 32);
555 for (slice = 0; slice < slices; slice++) {
556 slice_start = ((slice * height) / slices) & cmask;
557 slice_height = ((((slice + 1) * height) / slices) & cmask) -
563 bsrc = src + slice_start * stride;
565 // first line - left neighbour prediction
567 A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
568 c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
570 if (slice_height <= 1)
572 for (j = 1; j < slice_height; j++) {
573 // second line - first element has top prediction, the rest uses gradient
574 bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF;
575 for (i = 1; i < min_width; i++) { /* dsp need align 32 */
576 A = bsrc[i - stride2];
577 B = bsrc[i - (stride2 + 1)];
579 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
582 c->llviddsp.add_gradient_pred(bsrc + 32, stride2, width - 32);
585 B = bsrc[-(1 + stride + stride - width)];
587 bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
588 for (i = 1; i < width; i++) {
589 A = bsrc[i - stride];
590 B = bsrc[i - (1 + stride)];
591 C = bsrc[i - 1 + stride];
592 bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
599 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
602 const uint8_t *buf = avpkt->data;
603 int buf_size = avpkt->size;
604 UtvideoContext *c = avctx->priv_data;
606 const uint8_t *plane_start[5];
607 int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
610 ThreadFrame frame = { .f = data };
612 if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
615 /* parse plane structure to get frame flags and validate slice offsets */
616 bytestream2_init(&gb, buf, buf_size);
619 const uint8_t *packed_stream;
620 const uint8_t *control_stream;
625 c->frame_info = PRED_GRADIENT << 8;
627 if (bytestream2_get_byte(&gb) != 1)
628 return AVERROR_INVALIDDATA;
629 bytestream2_skip(&gb, 3);
630 c->offset = bytestream2_get_le32(&gb);
632 if (buf_size <= c->offset + 8LL)
633 return AVERROR_INVALIDDATA;
635 bytestream2_init(&pb, buf + 8 + c->offset, buf_size - 8 - c->offset);
637 nb_cbs = bytestream2_get_le32(&pb);
638 if (nb_cbs > c->offset)
639 return AVERROR_INVALIDDATA;
641 packed_stream = buf + 8;
642 control_stream = packed_stream + (c->offset - nb_cbs);
643 left = control_stream - packed_stream;
645 for (i = 0; i < c->planes; i++) {
646 for (j = 0; j < c->slices; j++) {
647 c->packed_stream[i][j] = packed_stream;
648 c->packed_stream_size[i][j] = bytestream2_get_le32(&pb);
649 if (c->packed_stream_size[i][j] > left)
650 return AVERROR_INVALIDDATA;
651 left -= c->packed_stream_size[i][j];
652 packed_stream += c->packed_stream_size[i][j];
656 left = buf + buf_size - control_stream;
658 for (i = 0; i < c->planes; i++) {
659 for (j = 0; j < c->slices; j++) {
660 c->control_stream[i][j] = control_stream;
661 c->control_stream_size[i][j] = bytestream2_get_le32(&pb);
662 if (c->control_stream_size[i][j] > left)
663 return AVERROR_INVALIDDATA;
664 left -= c->control_stream_size[i][j];
665 control_stream += c->control_stream_size[i][j];
669 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
670 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
671 return AVERROR_INVALIDDATA;
673 c->frame_info = bytestream2_get_le32u(&gb);
674 c->slices = ((c->frame_info >> 16) & 0xff) + 1;
675 for (i = 0; i < c->planes; i++) {
676 plane_start[i] = gb.buffer;
677 if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
678 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
679 return AVERROR_INVALIDDATA;
683 for (j = 0; j < c->slices; j++) {
684 slice_end = bytestream2_get_le32u(&gb);
685 if (slice_end < 0 || slice_end < slice_start ||
686 bytestream2_get_bytes_left(&gb) < slice_end + 1024LL) {
687 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
688 return AVERROR_INVALIDDATA;
690 slice_size = slice_end - slice_start;
691 slice_start = slice_end;
692 max_slice_size = FFMAX(max_slice_size, slice_size);
694 plane_size = slice_end;
695 bytestream2_skipu(&gb, plane_size);
696 bytestream2_skipu(&gb, 1024);
698 plane_start[c->planes] = gb.buffer;
700 for (i = 0; i < c->planes; i++) {
701 plane_start[i] = gb.buffer;
702 if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
703 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
704 return AVERROR_INVALIDDATA;
706 bytestream2_skipu(&gb, 256);
709 for (j = 0; j < c->slices; j++) {
710 slice_end = bytestream2_get_le32u(&gb);
711 if (slice_end < 0 || slice_end < slice_start ||
712 bytestream2_get_bytes_left(&gb) < slice_end) {
713 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
714 return AVERROR_INVALIDDATA;
716 slice_size = slice_end - slice_start;
717 slice_start = slice_end;
718 max_slice_size = FFMAX(max_slice_size, slice_size);
720 plane_size = slice_end;
721 bytestream2_skipu(&gb, plane_size);
723 plane_start[c->planes] = gb.buffer;
724 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
725 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
726 return AVERROR_INVALIDDATA;
728 c->frame_info = bytestream2_get_le32u(&gb);
730 av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
733 c->frame_pred = (c->frame_info >> 8) & 3;
735 max_slice_size += 4*avctx->width;
738 av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
739 max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
741 if (!c->slice_bits) {
742 av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
743 return AVERROR(ENOMEM);
747 switch (c->avctx->pix_fmt) {
748 case AV_PIX_FMT_GBRP:
749 case AV_PIX_FMT_GBRAP:
750 for (i = 0; i < c->planes; i++) {
751 ret = decode_plane(c, i, frame.f->data[i],
752 frame.f->linesize[i], avctx->width,
753 avctx->height, plane_start[i],
754 c->frame_pred == PRED_LEFT);
757 if (c->frame_pred == PRED_MEDIAN) {
758 if (!c->interlaced) {
759 restore_median_planar(c, frame.f->data[i],
760 frame.f->linesize[i], avctx->width,
761 avctx->height, c->slices, 0);
763 restore_median_planar_il(c, frame.f->data[i],
764 frame.f->linesize[i],
765 avctx->width, avctx->height, c->slices,
768 } else if (c->frame_pred == PRED_GRADIENT) {
769 if (!c->interlaced) {
770 restore_gradient_planar(c, frame.f->data[i],
771 frame.f->linesize[i], avctx->width,
772 avctx->height, c->slices, 0);
774 restore_gradient_planar_il(c, frame.f->data[i],
775 frame.f->linesize[i],
776 avctx->width, avctx->height, c->slices,
781 c->utdsp.restore_rgb_planes(frame.f->data[2], frame.f->data[0], frame.f->data[1],
782 frame.f->linesize[2], frame.f->linesize[0], frame.f->linesize[1],
783 avctx->width, avctx->height);
785 case AV_PIX_FMT_GBRAP10:
786 case AV_PIX_FMT_GBRP10:
787 for (i = 0; i < c->planes; i++) {
788 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i],
789 frame.f->linesize[i] / 2, avctx->width,
790 avctx->height, plane_start[i],
791 plane_start[i + 1] - 1024,
792 c->frame_pred == PRED_LEFT);
796 c->utdsp.restore_rgb_planes10((uint16_t *)frame.f->data[2], (uint16_t *)frame.f->data[0], (uint16_t *)frame.f->data[1],
797 frame.f->linesize[2] / 2, frame.f->linesize[0] / 2, frame.f->linesize[1] / 2,
798 avctx->width, avctx->height);
800 case AV_PIX_FMT_YUV420P:
801 for (i = 0; i < 3; i++) {
802 ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
803 avctx->width >> !!i, avctx->height >> !!i,
804 plane_start[i], c->frame_pred == PRED_LEFT);
807 if (c->frame_pred == PRED_MEDIAN) {
808 if (!c->interlaced) {
809 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
810 avctx->width >> !!i, avctx->height >> !!i,
813 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
815 avctx->height >> !!i,
818 } else if (c->frame_pred == PRED_GRADIENT) {
819 if (!c->interlaced) {
820 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
821 avctx->width >> !!i, avctx->height >> !!i,
824 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
826 avctx->height >> !!i,
832 case AV_PIX_FMT_YUV422P:
833 for (i = 0; i < 3; i++) {
834 ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
835 avctx->width >> !!i, avctx->height,
836 plane_start[i], c->frame_pred == PRED_LEFT);
839 if (c->frame_pred == PRED_MEDIAN) {
840 if (!c->interlaced) {
841 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
842 avctx->width >> !!i, avctx->height,
845 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
846 avctx->width >> !!i, avctx->height,
849 } else if (c->frame_pred == PRED_GRADIENT) {
850 if (!c->interlaced) {
851 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
852 avctx->width >> !!i, avctx->height,
855 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
856 avctx->width >> !!i, avctx->height,
862 case AV_PIX_FMT_YUV444P:
863 for (i = 0; i < 3; i++) {
864 ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
865 avctx->width, avctx->height,
866 plane_start[i], c->frame_pred == PRED_LEFT);
869 if (c->frame_pred == PRED_MEDIAN) {
870 if (!c->interlaced) {
871 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
872 avctx->width, avctx->height,
875 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
876 avctx->width, avctx->height,
879 } else if (c->frame_pred == PRED_GRADIENT) {
880 if (!c->interlaced) {
881 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
882 avctx->width, avctx->height,
885 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
886 avctx->width, avctx->height,
892 case AV_PIX_FMT_YUV422P10:
893 for (i = 0; i < 3; i++) {
894 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], frame.f->linesize[i] / 2,
895 avctx->width >> !!i, avctx->height,
896 plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
903 frame.f->key_frame = 1;
904 frame.f->pict_type = AV_PICTURE_TYPE_I;
905 frame.f->interlaced_frame = !!c->interlaced;
909 /* always report that the buffer was completely consumed */
913 static av_cold int decode_init(AVCodecContext *avctx)
915 UtvideoContext * const c = avctx->priv_data;
916 int h_shift, v_shift;
920 ff_utvideodsp_init(&c->utdsp);
921 ff_bswapdsp_init(&c->bdsp);
922 ff_llviddsp_init(&c->llviddsp);
924 c->slice_bits_size = 0;
926 switch (avctx->codec_tag) {
927 case MKTAG('U', 'L', 'R', 'G'):
929 avctx->pix_fmt = AV_PIX_FMT_GBRP;
931 case MKTAG('U', 'L', 'R', 'A'):
933 avctx->pix_fmt = AV_PIX_FMT_GBRAP;
935 case MKTAG('U', 'L', 'Y', '0'):
937 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
938 avctx->colorspace = AVCOL_SPC_BT470BG;
940 case MKTAG('U', 'L', 'Y', '2'):
942 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
943 avctx->colorspace = AVCOL_SPC_BT470BG;
945 case MKTAG('U', 'L', 'Y', '4'):
947 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
948 avctx->colorspace = AVCOL_SPC_BT470BG;
950 case MKTAG('U', 'Q', 'Y', '2'):
952 avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
954 case MKTAG('U', 'Q', 'R', 'G'):
956 avctx->pix_fmt = AV_PIX_FMT_GBRP10;
958 case MKTAG('U', 'Q', 'R', 'A'):
960 avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
962 case MKTAG('U', 'L', 'H', '0'):
964 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
965 avctx->colorspace = AVCOL_SPC_BT709;
967 case MKTAG('U', 'L', 'H', '2'):
969 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
970 avctx->colorspace = AVCOL_SPC_BT709;
972 case MKTAG('U', 'L', 'H', '4'):
974 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
975 avctx->colorspace = AVCOL_SPC_BT709;
977 case MKTAG('U', 'M', 'Y', '2'):
980 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
981 avctx->colorspace = AVCOL_SPC_BT470BG;
983 case MKTAG('U', 'M', 'H', '2'):
986 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
987 avctx->colorspace = AVCOL_SPC_BT709;
989 case MKTAG('U', 'M', 'Y', '4'):
992 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
993 avctx->colorspace = AVCOL_SPC_BT470BG;
995 case MKTAG('U', 'M', 'H', '4'):
998 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
999 avctx->colorspace = AVCOL_SPC_BT709;
1001 case MKTAG('U', 'M', 'R', 'G'):
1004 avctx->pix_fmt = AV_PIX_FMT_GBRP;
1006 case MKTAG('U', 'M', 'R', 'A'):
1009 avctx->pix_fmt = AV_PIX_FMT_GBRAP;
1012 av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
1014 return AVERROR_INVALIDDATA;
1017 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &h_shift, &v_shift);
1018 if ((avctx->width & ((1<<h_shift)-1)) ||
1019 (avctx->height & ((1<<v_shift)-1))) {
1020 avpriv_request_sample(avctx, "Odd dimensions");
1021 return AVERROR_PATCHWELCOME;
1024 if (c->pack && avctx->extradata_size >= 16) {
1025 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1026 avctx->extradata[3], avctx->extradata[2],
1027 avctx->extradata[1], avctx->extradata[0]);
1028 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1029 AV_RB32(avctx->extradata + 4));
1030 c->compression = avctx->extradata[8];
1031 if (c->compression != 2)
1032 avpriv_request_sample(avctx, "Unknown compression type");
1033 c->slices = avctx->extradata[9] + 1;
1034 } else if (avctx->extradata_size >= 16) {
1035 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1036 avctx->extradata[3], avctx->extradata[2],
1037 avctx->extradata[1], avctx->extradata[0]);
1038 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1039 AV_RB32(avctx->extradata + 4));
1040 c->frame_info_size = AV_RL32(avctx->extradata + 8);
1041 c->flags = AV_RL32(avctx->extradata + 12);
1043 if (c->frame_info_size != 4)
1044 avpriv_request_sample(avctx, "Frame info not 4 bytes");
1045 av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
1046 c->slices = (c->flags >> 24) + 1;
1047 c->compression = c->flags & 1;
1048 c->interlaced = c->flags & 0x800;
1049 } else if (avctx->extradata_size == 8) {
1050 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1051 avctx->extradata[3], avctx->extradata[2],
1052 avctx->extradata[1], avctx->extradata[0]);
1053 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1054 AV_RB32(avctx->extradata + 4));
1057 c->frame_info_size = 4;
1059 av_log(avctx, AV_LOG_ERROR,
1060 "Insufficient extradata size %d, should be at least 16\n",
1061 avctx->extradata_size);
1062 return AVERROR_INVALIDDATA;
1068 static av_cold int decode_end(AVCodecContext *avctx)
1070 UtvideoContext * const c = avctx->priv_data;
1072 av_freep(&c->slice_bits);
1077 AVCodec ff_utvideo_decoder = {
1079 .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
1080 .type = AVMEDIA_TYPE_VIDEO,
1081 .id = AV_CODEC_ID_UTVIDEO,
1082 .priv_data_size = sizeof(UtvideoContext),
1083 .init = decode_init,
1084 .close = decode_end,
1085 .decode = decode_frame,
1086 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1087 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,