3 * Copyright (c) 2011 Konstantin Shishkov
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #define CACHED_BITSTREAM_READER !ARCH_X86_32
31 #define UNCHECKED_BITSTREAM_READER 1
33 #include "libavutil/intreadwrite.h"
34 #include "libavutil/pixdesc.h"
37 #include "bytestream.h"
43 static int build_huff(const uint8_t *src, VLC *vlc, int *fsym, unsigned nb_elems)
54 for (i = 0; i < nb_elems; i++) {
58 qsort(he, nb_elems, sizeof(*he), ff_ut10_huff_cmp_len);
66 while (he[last].len == 255 && last)
69 if (he[last].len > 32) {
74 for (i = last; i >= 0; i--) {
75 codes[i] = code >> (32 - he[i].len);
78 code += 0x80000000u >> (he[i].len - 1);
81 return ff_init_vlc_sparse(vlc, VLC_BITS, last + 1,
82 bits, sizeof(*bits), sizeof(*bits),
83 codes, sizeof(*codes), sizeof(*codes),
84 syms, sizeof(*syms), sizeof(*syms), 0);
87 static int decode_plane10(UtvideoContext *c, int plane_no,
88 uint16_t *dst, ptrdiff_t stride,
89 int width, int height,
90 const uint8_t *src, const uint8_t *huff,
93 int i, j, slice, pix, ret;
99 if ((ret = build_huff(huff, &vlc, &fsym, 1024)) < 0) {
100 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
103 if (fsym >= 0) { // build_huff reported a symbol to fill slices with
105 for (slice = 0; slice < c->slices; slice++) {
109 send = (height * (slice + 1) / c->slices);
110 dest = dst + sstart * stride;
113 for (j = sstart; j < send; j++) {
114 for (i = 0; i < width; i++) {
130 for (slice = 0; slice < c->slices; slice++) {
132 int slice_data_start, slice_data_end, slice_size;
135 send = (height * (slice + 1) / c->slices);
136 dest = dst + sstart * stride;
138 // slice offset and size validation was done earlier
139 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
140 slice_data_end = AV_RL32(src + slice * 4);
141 slice_size = slice_data_end - slice_data_start;
144 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
145 "yet a slice has a length of zero.\n");
149 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
150 c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
151 (uint32_t *)(src + slice_data_start + c->slices * 4),
152 (slice_data_end - slice_data_start + 3) >> 2);
153 init_get_bits(&gb, c->slice_bits, slice_size * 8);
156 for (j = sstart; j < send; j++) {
157 for (i = 0; i < width; i++) {
158 pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3);
160 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
171 if (get_bits_left(&gb) < 0) {
172 av_log(c->avctx, AV_LOG_ERROR,
173 "Slice decoding ran out of bits\n");
177 if (get_bits_left(&gb) > 32)
178 av_log(c->avctx, AV_LOG_WARNING,
179 "%d bits left after decoding slice\n", get_bits_left(&gb));
187 return AVERROR_INVALIDDATA;
190 static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt)
192 const int is_luma = (pix_fmt == AV_PIX_FMT_YUV420P) && !plane_no;
195 return ~(1 + 2 * is_luma);
200 static int decode_plane(UtvideoContext *c, int plane_no,
201 uint8_t *dst, ptrdiff_t stride,
202 int width, int height,
203 const uint8_t *src, int use_pred)
205 int i, j, slice, pix;
210 const int cmask = compute_cmask(plane_no, c->interlaced, c->avctx->pix_fmt);
214 for (slice = 0; slice < c->slices; slice++) {
215 GetBitContext cbit, pbit;
218 ret = init_get_bits8_le(&cbit, c->control_stream[plane_no][slice], c->control_stream_size[plane_no][slice]);
222 ret = init_get_bits8_le(&pbit, c->packed_stream[plane_no][slice], c->packed_stream_size[plane_no][slice]);
227 send = (height * (slice + 1) / c->slices) & cmask;
228 dest = dst + sstart * stride;
230 if (3 * ((dst + send * stride - dest + 7)/8) > get_bits_left(&cbit))
231 return AVERROR_INVALIDDATA;
233 for (p = dest; p < dst + send * stride; p += 8) {
234 int bits = get_bits_le(&cbit, 3);
239 uint32_t sub = 0x80 >> (8 - (bits + 1)), add;
242 if ((bits + 1) * 8 > get_bits_left(&pbit))
243 return AVERROR_INVALIDDATA;
245 for (k = 0; k < 8; k++) {
247 p[k] = get_bits_le(&pbit, bits + 1);
248 add = (~p[k] & sub) << (8 - bits);
259 if (build_huff(src, &vlc, &fsym, 256)) {
260 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
261 return AVERROR_INVALIDDATA;
263 if (fsym >= 0) { // build_huff reported a symbol to fill slices with
265 for (slice = 0; slice < c->slices; slice++) {
269 send = (height * (slice + 1) / c->slices) & cmask;
270 dest = dst + sstart * stride;
273 for (j = sstart; j < send; j++) {
274 for (i = 0; i < width; i++) {
277 prev += (unsigned)pix;
291 for (slice = 0; slice < c->slices; slice++) {
293 int slice_data_start, slice_data_end, slice_size;
296 send = (height * (slice + 1) / c->slices) & cmask;
297 dest = dst + sstart * stride;
299 // slice offset and size validation was done earlier
300 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
301 slice_data_end = AV_RL32(src + slice * 4);
302 slice_size = slice_data_end - slice_data_start;
305 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
306 "yet a slice has a length of zero.\n");
310 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
311 c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
312 (uint32_t *)(src + slice_data_start + c->slices * 4),
313 (slice_data_end - slice_data_start + 3) >> 2);
314 init_get_bits(&gb, c->slice_bits, slice_size * 8);
317 for (j = sstart; j < send; j++) {
318 for (i = 0; i < width; i++) {
319 pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3);
321 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
330 if (get_bits_left(&gb) < 0) {
331 av_log(c->avctx, AV_LOG_ERROR,
332 "Slice decoding ran out of bits\n");
337 if (get_bits_left(&gb) > 32)
338 av_log(c->avctx, AV_LOG_WARNING,
339 "%d bits left after decoding slice\n", get_bits_left(&gb));
347 return AVERROR_INVALIDDATA;
354 static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
355 int width, int height, int slices, int rmode)
360 int slice_start, slice_height;
361 const int cmask = ~rmode;
363 for (slice = 0; slice < slices; slice++) {
364 slice_start = ((slice * height) / slices) & cmask;
365 slice_height = ((((slice + 1) * height) / slices) & cmask) -
370 bsrc = src + slice_start * stride;
372 // first line - left neighbour prediction
374 c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
376 if (slice_height <= 1)
378 // second line - first element has top prediction, the rest uses median
382 for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
383 B = bsrc[i - stride];
384 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
389 c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride + 16,
390 bsrc + 16, width - 16, &A, &B);
393 // the rest of lines use continuous median prediction
394 for (j = 2; j < slice_height; j++) {
395 c->llviddsp.add_median_pred(bsrc, bsrc - stride,
396 bsrc, width, &A, &B);
402 /* UtVideo interlaced mode treats every two lines as a single one,
403 * so restoring function should take care of possible padding between
404 * two parts of the same "line".
406 static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
407 int width, int height, int slices, int rmode)
412 int slice_start, slice_height;
413 const int cmask = ~(rmode ? 3 : 1);
414 const ptrdiff_t stride2 = stride << 1;
416 for (slice = 0; slice < slices; slice++) {
417 slice_start = ((slice * height) / slices) & cmask;
418 slice_height = ((((slice + 1) * height) / slices) & cmask) -
424 bsrc = src + slice_start * stride;
426 // first line - left neighbour prediction
428 A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
429 c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
431 if (slice_height <= 1)
433 // second line - first element has top prediction, the rest uses median
437 for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
438 B = bsrc[i - stride2];
439 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
444 c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride2 + 16,
445 bsrc + 16, width - 16, &A, &B);
447 c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
448 bsrc + stride, width, &A, &B);
450 // the rest of lines use continuous median prediction
451 for (j = 2; j < slice_height; j++) {
452 c->llviddsp.add_median_pred(bsrc, bsrc - stride2,
453 bsrc, width, &A, &B);
454 c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
455 bsrc + stride, width, &A, &B);
461 static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
462 int width, int height, int slices, int rmode)
467 int slice_start, slice_height;
468 const int cmask = ~rmode;
469 int min_width = FFMIN(width, 32);
471 for (slice = 0; slice < slices; slice++) {
472 slice_start = ((slice * height) / slices) & cmask;
473 slice_height = ((((slice + 1) * height) / slices) & cmask) -
478 bsrc = src + slice_start * stride;
480 // first line - left neighbour prediction
482 c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
484 if (slice_height <= 1)
486 for (j = 1; j < slice_height; j++) {
487 // second line - first element has top prediction, the rest uses gradient
488 bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF;
489 for (i = 1; i < min_width; i++) { /* dsp need align 32 */
490 A = bsrc[i - stride];
491 B = bsrc[i - (stride + 1)];
493 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
496 c->llviddsp.add_gradient_pred(bsrc + 32, stride, width - 32);
502 static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
503 int width, int height, int slices, int rmode)
508 int slice_start, slice_height;
509 const int cmask = ~(rmode ? 3 : 1);
510 const ptrdiff_t stride2 = stride << 1;
511 int min_width = FFMIN(width, 32);
513 for (slice = 0; slice < slices; slice++) {
514 slice_start = ((slice * height) / slices) & cmask;
515 slice_height = ((((slice + 1) * height) / slices) & cmask) -
521 bsrc = src + slice_start * stride;
523 // first line - left neighbour prediction
525 A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
526 c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
528 if (slice_height <= 1)
530 for (j = 1; j < slice_height; j++) {
531 // second line - first element has top prediction, the rest uses gradient
532 bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF;
533 for (i = 1; i < min_width; i++) { /* dsp need align 32 */
534 A = bsrc[i - stride2];
535 B = bsrc[i - (stride2 + 1)];
537 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
540 c->llviddsp.add_gradient_pred(bsrc + 32, stride2, width - 32);
543 B = bsrc[-(1 + stride + stride - width)];
545 bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
546 for (i = 1; i < width; i++) {
547 A = bsrc[i - stride];
548 B = bsrc[i - (1 + stride)];
549 C = bsrc[i - 1 + stride];
550 bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
557 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
560 const uint8_t *buf = avpkt->data;
561 int buf_size = avpkt->size;
562 UtvideoContext *c = avctx->priv_data;
564 const uint8_t *plane_start[5];
565 int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
568 ThreadFrame frame = { .f = data };
570 if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
573 /* parse plane structure to get frame flags and validate slice offsets */
574 bytestream2_init(&gb, buf, buf_size);
577 const uint8_t *packed_stream;
578 const uint8_t *control_stream;
583 c->frame_info = PRED_GRADIENT << 8;
585 if (bytestream2_get_byte(&gb) != 1)
586 return AVERROR_INVALIDDATA;
587 bytestream2_skip(&gb, 3);
588 c->offset = bytestream2_get_le32(&gb);
590 if (buf_size <= c->offset + 8LL)
591 return AVERROR_INVALIDDATA;
593 bytestream2_init(&pb, buf + 8 + c->offset, buf_size - 8 - c->offset);
595 nb_cbs = bytestream2_get_le32(&pb);
596 if (nb_cbs > c->offset)
597 return AVERROR_INVALIDDATA;
599 packed_stream = buf + 8;
600 control_stream = packed_stream + (c->offset - nb_cbs);
601 left = control_stream - packed_stream;
603 for (i = 0; i < c->planes; i++) {
604 for (j = 0; j < c->slices; j++) {
605 c->packed_stream[i][j] = packed_stream;
606 c->packed_stream_size[i][j] = bytestream2_get_le32(&pb);
607 if (c->packed_stream_size[i][j] > left)
608 return AVERROR_INVALIDDATA;
609 left -= c->packed_stream_size[i][j];
610 packed_stream += c->packed_stream_size[i][j];
614 left = buf + buf_size - control_stream;
616 for (i = 0; i < c->planes; i++) {
617 for (j = 0; j < c->slices; j++) {
618 c->control_stream[i][j] = control_stream;
619 c->control_stream_size[i][j] = bytestream2_get_le32(&pb);
620 if (c->control_stream_size[i][j] > left)
621 return AVERROR_INVALIDDATA;
622 left -= c->control_stream_size[i][j];
623 control_stream += c->control_stream_size[i][j];
627 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
628 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
629 return AVERROR_INVALIDDATA;
631 c->frame_info = bytestream2_get_le32u(&gb);
632 c->slices = ((c->frame_info >> 16) & 0xff) + 1;
633 for (i = 0; i < c->planes; i++) {
634 plane_start[i] = gb.buffer;
635 if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
636 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
637 return AVERROR_INVALIDDATA;
641 for (j = 0; j < c->slices; j++) {
642 slice_end = bytestream2_get_le32u(&gb);
643 if (slice_end < 0 || slice_end < slice_start ||
644 bytestream2_get_bytes_left(&gb) < slice_end + 1024LL) {
645 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
646 return AVERROR_INVALIDDATA;
648 slice_size = slice_end - slice_start;
649 slice_start = slice_end;
650 max_slice_size = FFMAX(max_slice_size, slice_size);
652 plane_size = slice_end;
653 bytestream2_skipu(&gb, plane_size);
654 bytestream2_skipu(&gb, 1024);
656 plane_start[c->planes] = gb.buffer;
658 for (i = 0; i < c->planes; i++) {
659 plane_start[i] = gb.buffer;
660 if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
661 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
662 return AVERROR_INVALIDDATA;
664 bytestream2_skipu(&gb, 256);
667 for (j = 0; j < c->slices; j++) {
668 slice_end = bytestream2_get_le32u(&gb);
669 if (slice_end < 0 || slice_end < slice_start ||
670 bytestream2_get_bytes_left(&gb) < slice_end) {
671 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
672 return AVERROR_INVALIDDATA;
674 slice_size = slice_end - slice_start;
675 slice_start = slice_end;
676 max_slice_size = FFMAX(max_slice_size, slice_size);
678 plane_size = slice_end;
679 bytestream2_skipu(&gb, plane_size);
681 plane_start[c->planes] = gb.buffer;
682 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
683 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
684 return AVERROR_INVALIDDATA;
686 c->frame_info = bytestream2_get_le32u(&gb);
688 av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
691 c->frame_pred = (c->frame_info >> 8) & 3;
693 max_slice_size += 4*avctx->width;
696 av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
697 max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
699 if (!c->slice_bits) {
700 av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
701 return AVERROR(ENOMEM);
705 switch (c->avctx->pix_fmt) {
706 case AV_PIX_FMT_GBRP:
707 case AV_PIX_FMT_GBRAP:
708 for (i = 0; i < c->planes; i++) {
709 ret = decode_plane(c, i, frame.f->data[i],
710 frame.f->linesize[i], avctx->width,
711 avctx->height, plane_start[i],
712 c->frame_pred == PRED_LEFT);
715 if (c->frame_pred == PRED_MEDIAN) {
716 if (!c->interlaced) {
717 restore_median_planar(c, frame.f->data[i],
718 frame.f->linesize[i], avctx->width,
719 avctx->height, c->slices, 0);
721 restore_median_planar_il(c, frame.f->data[i],
722 frame.f->linesize[i],
723 avctx->width, avctx->height, c->slices,
726 } else if (c->frame_pred == PRED_GRADIENT) {
727 if (!c->interlaced) {
728 restore_gradient_planar(c, frame.f->data[i],
729 frame.f->linesize[i], avctx->width,
730 avctx->height, c->slices, 0);
732 restore_gradient_planar_il(c, frame.f->data[i],
733 frame.f->linesize[i],
734 avctx->width, avctx->height, c->slices,
739 c->utdsp.restore_rgb_planes(frame.f->data[2], frame.f->data[0], frame.f->data[1],
740 frame.f->linesize[2], frame.f->linesize[0], frame.f->linesize[1],
741 avctx->width, avctx->height);
743 case AV_PIX_FMT_GBRAP10:
744 case AV_PIX_FMT_GBRP10:
745 for (i = 0; i < c->planes; i++) {
746 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i],
747 frame.f->linesize[i] / 2, avctx->width,
748 avctx->height, plane_start[i],
749 plane_start[i + 1] - 1024,
750 c->frame_pred == PRED_LEFT);
754 c->utdsp.restore_rgb_planes10((uint16_t *)frame.f->data[2], (uint16_t *)frame.f->data[0], (uint16_t *)frame.f->data[1],
755 frame.f->linesize[2] / 2, frame.f->linesize[0] / 2, frame.f->linesize[1] / 2,
756 avctx->width, avctx->height);
758 case AV_PIX_FMT_YUV420P:
759 for (i = 0; i < 3; i++) {
760 ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
761 avctx->width >> !!i, avctx->height >> !!i,
762 plane_start[i], c->frame_pred == PRED_LEFT);
765 if (c->frame_pred == PRED_MEDIAN) {
766 if (!c->interlaced) {
767 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
768 avctx->width >> !!i, avctx->height >> !!i,
771 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
773 avctx->height >> !!i,
776 } else if (c->frame_pred == PRED_GRADIENT) {
777 if (!c->interlaced) {
778 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
779 avctx->width >> !!i, avctx->height >> !!i,
782 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
784 avctx->height >> !!i,
790 case AV_PIX_FMT_YUV422P:
791 for (i = 0; i < 3; i++) {
792 ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
793 avctx->width >> !!i, avctx->height,
794 plane_start[i], c->frame_pred == PRED_LEFT);
797 if (c->frame_pred == PRED_MEDIAN) {
798 if (!c->interlaced) {
799 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
800 avctx->width >> !!i, avctx->height,
803 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
804 avctx->width >> !!i, avctx->height,
807 } else if (c->frame_pred == PRED_GRADIENT) {
808 if (!c->interlaced) {
809 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
810 avctx->width >> !!i, avctx->height,
813 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
814 avctx->width >> !!i, avctx->height,
820 case AV_PIX_FMT_YUV444P:
821 for (i = 0; i < 3; i++) {
822 ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
823 avctx->width, avctx->height,
824 plane_start[i], c->frame_pred == PRED_LEFT);
827 if (c->frame_pred == PRED_MEDIAN) {
828 if (!c->interlaced) {
829 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
830 avctx->width, avctx->height,
833 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
834 avctx->width, avctx->height,
837 } else if (c->frame_pred == PRED_GRADIENT) {
838 if (!c->interlaced) {
839 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
840 avctx->width, avctx->height,
843 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
844 avctx->width, avctx->height,
850 case AV_PIX_FMT_YUV420P10:
851 for (i = 0; i < 3; i++) {
852 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], frame.f->linesize[i] / 2,
853 avctx->width >> !!i, avctx->height >> !!i,
854 plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
859 case AV_PIX_FMT_YUV422P10:
860 for (i = 0; i < 3; i++) {
861 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], frame.f->linesize[i] / 2,
862 avctx->width >> !!i, avctx->height,
863 plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
870 frame.f->key_frame = 1;
871 frame.f->pict_type = AV_PICTURE_TYPE_I;
872 frame.f->interlaced_frame = !!c->interlaced;
876 /* always report that the buffer was completely consumed */
880 static av_cold int decode_init(AVCodecContext *avctx)
882 UtvideoContext * const c = avctx->priv_data;
883 int h_shift, v_shift;
887 ff_utvideodsp_init(&c->utdsp);
888 ff_bswapdsp_init(&c->bdsp);
889 ff_llviddsp_init(&c->llviddsp);
891 c->slice_bits_size = 0;
893 switch (avctx->codec_tag) {
894 case MKTAG('U', 'L', 'R', 'G'):
896 avctx->pix_fmt = AV_PIX_FMT_GBRP;
898 case MKTAG('U', 'L', 'R', 'A'):
900 avctx->pix_fmt = AV_PIX_FMT_GBRAP;
902 case MKTAG('U', 'L', 'Y', '0'):
904 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
905 avctx->colorspace = AVCOL_SPC_BT470BG;
907 case MKTAG('U', 'L', 'Y', '2'):
909 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
910 avctx->colorspace = AVCOL_SPC_BT470BG;
912 case MKTAG('U', 'L', 'Y', '4'):
914 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
915 avctx->colorspace = AVCOL_SPC_BT470BG;
917 case MKTAG('U', 'Q', 'Y', '0'):
920 avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
922 case MKTAG('U', 'Q', 'Y', '2'):
925 avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
927 case MKTAG('U', 'Q', 'R', 'G'):
930 avctx->pix_fmt = AV_PIX_FMT_GBRP10;
932 case MKTAG('U', 'Q', 'R', 'A'):
935 avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
937 case MKTAG('U', 'L', 'H', '0'):
939 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
940 avctx->colorspace = AVCOL_SPC_BT709;
942 case MKTAG('U', 'L', 'H', '2'):
944 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
945 avctx->colorspace = AVCOL_SPC_BT709;
947 case MKTAG('U', 'L', 'H', '4'):
949 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
950 avctx->colorspace = AVCOL_SPC_BT709;
952 case MKTAG('U', 'M', 'Y', '2'):
955 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
956 avctx->colorspace = AVCOL_SPC_BT470BG;
958 case MKTAG('U', 'M', 'H', '2'):
961 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
962 avctx->colorspace = AVCOL_SPC_BT709;
964 case MKTAG('U', 'M', 'Y', '4'):
967 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
968 avctx->colorspace = AVCOL_SPC_BT470BG;
970 case MKTAG('U', 'M', 'H', '4'):
973 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
974 avctx->colorspace = AVCOL_SPC_BT709;
976 case MKTAG('U', 'M', 'R', 'G'):
979 avctx->pix_fmt = AV_PIX_FMT_GBRP;
981 case MKTAG('U', 'M', 'R', 'A'):
984 avctx->pix_fmt = AV_PIX_FMT_GBRAP;
987 av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
989 return AVERROR_INVALIDDATA;
992 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &h_shift, &v_shift);
993 if ((avctx->width & ((1<<h_shift)-1)) ||
994 (avctx->height & ((1<<v_shift)-1))) {
995 avpriv_request_sample(avctx, "Odd dimensions");
996 return AVERROR_PATCHWELCOME;
999 if (c->pack && avctx->extradata_size >= 16) {
1000 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1001 avctx->extradata[3], avctx->extradata[2],
1002 avctx->extradata[1], avctx->extradata[0]);
1003 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1004 AV_RB32(avctx->extradata + 4));
1005 c->compression = avctx->extradata[8];
1006 if (c->compression != 2)
1007 avpriv_request_sample(avctx, "Unknown compression type");
1008 c->slices = avctx->extradata[9] + 1;
1009 } else if (!c->pro && avctx->extradata_size >= 16) {
1010 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1011 avctx->extradata[3], avctx->extradata[2],
1012 avctx->extradata[1], avctx->extradata[0]);
1013 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1014 AV_RB32(avctx->extradata + 4));
1015 c->frame_info_size = AV_RL32(avctx->extradata + 8);
1016 c->flags = AV_RL32(avctx->extradata + 12);
1018 if (c->frame_info_size != 4)
1019 avpriv_request_sample(avctx, "Frame info not 4 bytes");
1020 av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
1021 c->slices = (c->flags >> 24) + 1;
1022 c->compression = c->flags & 1;
1023 c->interlaced = c->flags & 0x800;
1024 } else if (c->pro && avctx->extradata_size == 8) {
1025 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1026 avctx->extradata[3], avctx->extradata[2],
1027 avctx->extradata[1], avctx->extradata[0]);
1028 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1029 AV_RB32(avctx->extradata + 4));
1031 c->frame_info_size = 4;
1033 av_log(avctx, AV_LOG_ERROR,
1034 "Insufficient extradata size %d, should be at least 16\n",
1035 avctx->extradata_size);
1036 return AVERROR_INVALIDDATA;
1042 static av_cold int decode_end(AVCodecContext *avctx)
1044 UtvideoContext * const c = avctx->priv_data;
1046 av_freep(&c->slice_bits);
1051 AVCodec ff_utvideo_decoder = {
1053 .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
1054 .type = AVMEDIA_TYPE_VIDEO,
1055 .id = AV_CODEC_ID_UTVIDEO,
1056 .priv_data_size = sizeof(UtvideoContext),
1057 .init = decode_init,
1058 .close = decode_end,
1059 .decode = decode_frame,
1060 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1061 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,