3 * Copyright (c) 2011 Konstantin Shishkov
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "libavutil/intreadwrite.h"
33 #include "bitstream.h"
35 #include "bytestream.h"
41 static int build_huff10(const uint8_t *src, VLC *vlc, int *fsym)
52 for (i = 0; i < 1024; i++) {
56 qsort(he, 1024, sizeof(*he), ff_ut10_huff_cmp_len);
64 while (he[last].len == 255 && last)
67 if (he[last].len > 32) {
72 for (i = last; i >= 0; i--) {
73 codes[i] = code >> (32 - he[i].len);
76 code += 0x80000000u >> (he[i].len - 1);
79 return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 11), last + 1,
80 bits, sizeof(*bits), sizeof(*bits),
81 codes, sizeof(*codes), sizeof(*codes),
82 syms, sizeof(*syms), sizeof(*syms), 0);
85 static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
96 for (i = 0; i < 256; i++) {
100 qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
110 while (he[last].len == 255 && last)
114 for (i = last; i >= 0; i--) {
115 codes[i] = code >> (32 - he[i].len);
118 code += 0x80000000u >> (he[i].len - 1);
121 return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 9), last + 1,
122 bits, sizeof(*bits), sizeof(*bits),
123 codes, sizeof(*codes), sizeof(*codes),
124 syms, sizeof(*syms), sizeof(*syms), 0);
127 static int decode_plane10(UtvideoContext *c, int plane_no,
128 uint16_t *dst, int step, int stride,
129 int width, int height,
130 const uint8_t *src, const uint8_t *huff,
134 int i, j, slice, pix, ret;
139 if ((ret = build_huff10(huff, &vlc, &fsym)) < 0) {
140 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
143 if (fsym >= 0) { // build_huff reported a symbol to fill slices with
145 for (slice = 0; slice < c->slices; slice++) {
149 send = (height * (slice + 1) / c->slices);
150 dest = dst + sstart * stride;
153 for (j = sstart; j < send; j++) {
154 for (i = 0; i < width * step; i += step) {
170 for (slice = 0; slice < c->slices; slice++) {
172 int slice_data_start, slice_data_end, slice_size;
175 send = (height * (slice + 1) / c->slices);
176 dest = dst + sstart * stride;
178 // slice offset and size validation was done earlier
179 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
180 slice_data_end = AV_RL32(src + slice * 4);
181 slice_size = slice_data_end - slice_data_start;
184 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
185 "yet a slice has a length of zero.\n");
189 memcpy(c->slice_bits, src + slice_data_start + c->slices * 4,
191 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
192 c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
193 (uint32_t *) c->slice_bits,
194 (slice_data_end - slice_data_start + 3) >> 2);
195 bitstream_init8(&bc, c->slice_bits, slice_size);
198 for (j = sstart; j < send; j++) {
199 for (i = 0; i < width * step; i += step) {
200 if (bitstream_bits_left(&bc) <= 0) {
201 av_log(c->avctx, AV_LOG_ERROR,
202 "Slice decoding ran out of bits\n");
205 pix = bitstream_read_vlc(&bc, vlc.table, vlc.bits, 3);
207 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
219 if (bitstream_bits_left(&bc) > 32)
220 av_log(c->avctx, AV_LOG_WARNING,
221 "%d bits left after decoding slice\n", bitstream_bits_left(&bc));
229 return AVERROR_INVALIDDATA;
232 static int compute_cmask(int plane_no, int interlaced, int pix_fmt)
234 const int is_luma = (pix_fmt == AV_PIX_FMT_YUV420P) && !plane_no;
237 return ~(1 + 2 * is_luma);
242 static int decode_plane(UtvideoContext *c, int plane_no,
243 uint8_t *dst, int step, ptrdiff_t stride,
244 int width, int height,
245 const uint8_t *src, int use_pred)
247 int i, j, slice, pix;
252 const int cmask = compute_cmask(plane_no, c->interlaced, c->avctx->pix_fmt);
254 if (build_huff(src, &vlc, &fsym)) {
255 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
256 return AVERROR_INVALIDDATA;
258 if (fsym >= 0) { // build_huff reported a symbol to fill slices with
260 for (slice = 0; slice < c->slices; slice++) {
264 send = (height * (slice + 1) / c->slices) & cmask;
265 dest = dst + sstart * stride;
268 for (j = sstart; j < send; j++) {
269 for (i = 0; i < width * step; i += step) {
286 for (slice = 0; slice < c->slices; slice++) {
288 int slice_data_start, slice_data_end, slice_size;
291 send = (height * (slice + 1) / c->slices) & cmask;
292 dest = dst + sstart * stride;
294 // slice offset and size validation was done earlier
295 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
296 slice_data_end = AV_RL32(src + slice * 4);
297 slice_size = slice_data_end - slice_data_start;
300 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
301 "yet a slice has a length of zero.\n");
305 memcpy(c->slice_bits, src + slice_data_start + c->slices * 4,
307 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
308 c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
309 (uint32_t *) c->slice_bits,
310 (slice_data_end - slice_data_start + 3) >> 2);
311 bitstream_init8(&bc, c->slice_bits, slice_size);
314 for (j = sstart; j < send; j++) {
315 for (i = 0; i < width * step; i += step) {
316 if (bitstream_bits_left(&bc) <= 0) {
317 av_log(c->avctx, AV_LOG_ERROR,
318 "Slice decoding ran out of bits\n");
321 pix = bitstream_read_vlc(&bc, vlc.table, vlc.bits, 4);
323 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
334 if (bitstream_bits_left(&bc) > 32)
335 av_log(c->avctx, AV_LOG_WARNING,
336 "%d bits left after decoding slice\n", bitstream_bits_left(&bc));
344 return AVERROR_INVALIDDATA;
347 static void restore_rgb_planes(uint8_t *src, int step, ptrdiff_t stride,
348 int width, int height)
353 for (j = 0; j < height; j++) {
354 for (i = 0; i < width * step; i += step) {
358 src[i] = r + g - 0x80;
359 src[i + 2] = b + g - 0x80;
365 static void restore_rgb_planes10(AVFrame *frame, int width, int height)
367 uint16_t *src_r = (uint16_t *)frame->data[2];
368 uint16_t *src_g = (uint16_t *)frame->data[0];
369 uint16_t *src_b = (uint16_t *)frame->data[1];
373 for (j = 0; j < height; j++) {
374 for (i = 0; i < width; i++) {
378 src_r[i] = (r + g - 0x200) & 0x3FF;
379 src_b[i] = (b + g - 0x200) & 0x3FF;
381 src_r += frame->linesize[2] / 2;
382 src_g += frame->linesize[0] / 2;
383 src_b += frame->linesize[1] / 2;
387 static void restore_median_planar(UtvideoContext *c, uint8_t *src,
388 ptrdiff_t stride, int width, int height,
389 int slices, int rmode)
394 int slice_start, slice_height;
395 const int cmask = ~rmode;
397 for (slice = 0; slice < slices; slice++) {
398 slice_start = ((slice * height) / slices) & cmask;
399 slice_height = ((((slice + 1) * height) / slices) & cmask) -
404 bsrc = src + slice_start * stride;
406 // first line - left neighbour prediction
408 c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
410 if (slice_height <= 1)
412 // second line - first element has top prediction, the rest uses median
416 for (i = 1; i < width; i++) {
417 B = bsrc[i - stride];
418 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
423 // the rest of lines use continuous median prediction
424 for (j = 2; j < slice_height; j++) {
425 c->hdspdec.add_hfyu_median_pred(bsrc, bsrc - stride,
426 bsrc, width, &A, &B);
432 /* UtVideo interlaced mode treats every two lines as a single one,
433 * so restoring function should take care of possible padding between
434 * two parts of the same "line".
436 static void restore_median_planar_il(UtvideoContext *c, uint8_t *src,
437 ptrdiff_t stride, int width, int height,
438 int slices, int rmode)
443 int slice_start, slice_height;
444 const int cmask = ~(rmode ? 3 : 1);
445 const int stride2 = stride << 1;
447 for (slice = 0; slice < slices; slice++) {
448 slice_start = ((slice * height) / slices) & cmask;
449 slice_height = ((((slice + 1) * height) / slices) & cmask) -
455 bsrc = src + slice_start * stride;
457 // first line - left neighbour prediction
459 A = c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
460 c->hdspdec.add_hfyu_left_pred(bsrc + stride, bsrc + stride, width, A);
462 if (slice_height <= 1)
464 // second line - first element has top prediction, the rest uses median
468 for (i = 1; i < width; i++) {
469 B = bsrc[i - stride2];
470 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
474 c->hdspdec.add_hfyu_median_pred(bsrc + stride, bsrc - stride,
475 bsrc + stride, width, &A, &B);
477 // the rest of lines use continuous median prediction
478 for (j = 2; j < slice_height; j++) {
479 c->hdspdec.add_hfyu_median_pred(bsrc, bsrc - stride2,
480 bsrc, width, &A, &B);
481 c->hdspdec.add_hfyu_median_pred(bsrc + stride, bsrc - stride,
482 bsrc + stride, width, &A, &B);
488 static void restore_median_packed(uint8_t *src, int step, ptrdiff_t stride,
489 int width, int height,
490 int slices, int rmode)
495 int slice_start, slice_height;
496 const int cmask = ~rmode;
498 for (slice = 0; slice < slices; slice++) {
499 slice_start = ((slice * height) / slices) & cmask;
500 slice_height = ((((slice + 1) * height) / slices) & cmask) -
505 bsrc = src + slice_start * stride;
507 // first line - left neighbour prediction
510 for (i = step; i < width * step; i += step) {
515 if (slice_height == 1)
517 // second line - first element has top prediction, the rest uses median
521 for (i = step; i < width * step; i += step) {
522 B = bsrc[i - stride];
523 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
528 // the rest of lines use continuous median prediction
529 for (j = 2; j < slice_height; j++) {
530 for (i = 0; i < width * step; i += step) {
531 B = bsrc[i - stride];
532 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
541 /* UtVideo interlaced mode treats every two lines as a single one,
542 * so restoring function should take care of possible padding between
543 * two parts of the same "line".
545 static void restore_median_packed_il(uint8_t *src, int step, ptrdiff_t stride,
546 int width, int height,
547 int slices, int rmode)
552 int slice_start, slice_height;
553 const int cmask = ~(rmode ? 3 : 1);
554 const ptrdiff_t stride2 = stride << 1;
556 for (slice = 0; slice < slices; slice++) {
557 slice_start = ((slice * height) / slices) & cmask;
558 slice_height = ((((slice + 1) * height) / slices) & cmask) -
564 bsrc = src + slice_start * stride;
566 // first line - left neighbour prediction
569 for (i = step; i < width * step; i += step) {
573 for (i = 0; i < width * step; i += step) {
574 bsrc[stride + i] += A;
575 A = bsrc[stride + i];
578 if (slice_height == 1)
580 // second line - first element has top prediction, the rest uses median
584 for (i = step; i < width * step; i += step) {
585 B = bsrc[i - stride2];
586 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
590 for (i = 0; i < width * step; i += step) {
591 B = bsrc[i - stride];
592 bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C));
594 A = bsrc[stride + i];
597 // the rest of lines use continuous median prediction
598 for (j = 2; j < slice_height; j++) {
599 for (i = 0; i < width * step; i += step) {
600 B = bsrc[i - stride2];
601 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
605 for (i = 0; i < width * step; i += step) {
606 B = bsrc[i - stride];
607 bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C));
609 A = bsrc[i + stride];
616 static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
617 int width, int height, int slices, int rmode)
622 int slice_start, slice_height;
623 const int cmask = ~rmode;
625 for (slice = 0; slice < slices; slice++) {
626 slice_start = ((slice * height) / slices) & cmask;
627 slice_height = ((((slice + 1) * height) / slices) & cmask) -
632 bsrc = src + slice_start * stride;
634 // first line - left neighbour prediction
636 c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
638 if (slice_height <= 1)
640 for (j = 1; j < slice_height; j++) {
641 // second line - first element has top prediction, the rest uses gradient
642 bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF;
643 for (i = 1; i < width; i++) {
644 A = bsrc[i - stride];
645 B = bsrc[i - (stride + 1)];
647 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
654 static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
655 int width, int height, int slices, int rmode)
660 int slice_start, slice_height;
661 const int cmask = ~(rmode ? 3 : 1);
662 const ptrdiff_t stride2 = stride << 1;
664 for (slice = 0; slice < slices; slice++) {
665 slice_start = ((slice * height) / slices) & cmask;
666 slice_height = ((((slice + 1) * height) / slices) & cmask) -
672 bsrc = src + slice_start * stride;
674 // first line - left neighbour prediction
676 A = c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
677 c->hdspdec.add_hfyu_left_pred(bsrc + stride, bsrc + stride, width, A);
679 if (slice_height <= 1)
681 for (j = 1; j < slice_height; j++) {
682 // second line - first element has top prediction, the rest uses gradient
683 bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF;
684 for (i = 1; i < width; i++) {
685 A = bsrc[i - stride2];
686 B = bsrc[i - (stride2 + 1)];
688 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
691 B = bsrc[-(1 + stride + stride - width)];
693 bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
694 for (i = 1; i < width; i++) {
695 A = bsrc[i - stride];
696 B = bsrc[i - (1 + stride)];
697 C = bsrc[i - 1 + stride];
698 bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
705 static void restore_gradient_packed(uint8_t *src, int step, ptrdiff_t stride,
706 int width, int height, int slices, int rmode)
711 int slice_start, slice_height;
712 const int cmask = ~rmode;
714 for (slice = 0; slice < slices; slice++) {
715 slice_start = ((slice * height) / slices) & cmask;
716 slice_height = ((((slice + 1) * height) / slices) & cmask) -
721 bsrc = src + slice_start * stride;
723 // first line - left neighbour prediction
726 for (i = step; i < width * step; i += step) {
731 if (slice_height <= 1)
733 for (j = 1; j < slice_height; j++) {
734 // second line - first element has top prediction, the rest uses gradient
737 for (i = step; i < width * step; i += step) {
738 A = bsrc[i - stride];
739 B = bsrc[i - (stride + step)];
741 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
748 static void restore_gradient_packed_il(uint8_t *src, int step, ptrdiff_t stride,
749 int width, int height, int slices, int rmode)
754 int slice_start, slice_height;
755 const int cmask = ~(rmode ? 3 : 1);
756 const ptrdiff_t stride2 = stride << 1;
758 for (slice = 0; slice < slices; slice++) {
759 slice_start = ((slice * height) / slices) & cmask;
760 slice_height = ((((slice + 1) * height) / slices) & cmask) -
766 bsrc = src + slice_start * stride;
768 // first line - left neighbour prediction
771 for (i = step; i < width * step; i += step) {
775 for (i = 0; i < width * step; i += step) {
776 bsrc[stride + i] += A;
777 A = bsrc[stride + i];
780 if (slice_height <= 1)
782 for (j = 1; j < slice_height; j++) {
783 // second line - first element has top prediction, the rest uses gradient
786 for (i = step; i < width * step; i += step) {
787 A = bsrc[i - stride2];
788 B = bsrc[i - (stride2 + step)];
790 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
793 B = bsrc[-(step + stride + stride - width * step)];
794 C = bsrc[width * step - step];
795 bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
796 for (i = step; i < width * step; i += step) {
797 A = bsrc[i - stride];
798 B = bsrc[i - (step + stride)];
799 C = bsrc[i - step + stride];
800 bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
807 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
810 const uint8_t *buf = avpkt->data;
811 int buf_size = avpkt->size;
812 UtvideoContext *c = avctx->priv_data;
814 const uint8_t *plane_start[5];
815 int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
818 ThreadFrame frame = { .f = data };
820 if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) {
821 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
825 ff_thread_finish_setup(avctx);
827 /* parse plane structure to get frame flags and validate slice offsets */
828 bytestream2_init(&gb, buf, buf_size);
830 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
831 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
832 return AVERROR_INVALIDDATA;
834 c->frame_info = bytestream2_get_le32u(&gb);
835 c->slices = ((c->frame_info >> 16) & 0xff) + 1;
836 for (i = 0; i < c->planes; i++) {
837 plane_start[i] = gb.buffer;
838 if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
839 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
840 return AVERROR_INVALIDDATA;
844 for (j = 0; j < c->slices; j++) {
845 slice_end = bytestream2_get_le32u(&gb);
846 if (slice_end < 0 || slice_end < slice_start ||
847 bytestream2_get_bytes_left(&gb) < slice_end) {
848 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
849 return AVERROR_INVALIDDATA;
851 slice_size = slice_end - slice_start;
852 slice_start = slice_end;
853 max_slice_size = FFMAX(max_slice_size, slice_size);
855 plane_size = slice_end;
856 bytestream2_skipu(&gb, plane_size);
857 bytestream2_skipu(&gb, 1024);
859 plane_start[c->planes] = gb.buffer;
861 for (i = 0; i < c->planes; i++) {
862 plane_start[i] = gb.buffer;
863 if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
864 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
865 return AVERROR_INVALIDDATA;
867 bytestream2_skipu(&gb, 256);
870 for (j = 0; j < c->slices; j++) {
871 slice_end = bytestream2_get_le32u(&gb);
872 if (slice_end < 0 || slice_end < slice_start ||
873 bytestream2_get_bytes_left(&gb) < slice_end) {
874 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
875 return AVERROR_INVALIDDATA;
877 slice_size = slice_end - slice_start;
878 slice_start = slice_end;
879 max_slice_size = FFMAX(max_slice_size, slice_size);
881 plane_size = slice_end;
882 bytestream2_skipu(&gb, plane_size);
884 plane_start[c->planes] = gb.buffer;
885 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
886 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
887 return AVERROR_INVALIDDATA;
889 c->frame_info = bytestream2_get_le32u(&gb);
891 av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
894 c->frame_pred = (c->frame_info >> 8) & 3;
896 av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
897 max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
899 if (!c->slice_bits) {
900 av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
901 return AVERROR(ENOMEM);
904 switch (c->avctx->pix_fmt) {
905 case AV_PIX_FMT_RGB24:
906 case AV_PIX_FMT_RGBA:
907 for (i = 0; i < c->planes; i++) {
908 ret = decode_plane(c, i, frame.f->data[0] + ff_ut_rgb_order[i],
909 c->planes, frame.f->linesize[0], avctx->width,
910 avctx->height, plane_start[i],
911 c->frame_pred == PRED_LEFT);
914 if (c->frame_pred == PRED_MEDIAN) {
915 if (!c->interlaced) {
916 restore_median_packed(frame.f->data[0] + ff_ut_rgb_order[i],
917 c->planes, frame.f->linesize[0], avctx->width,
918 avctx->height, c->slices, 0);
920 restore_median_packed_il(frame.f->data[0] + ff_ut_rgb_order[i],
921 c->planes, frame.f->linesize[0],
922 avctx->width, avctx->height, c->slices,
925 } else if (c->frame_pred == PRED_GRADIENT) {
926 if (!c->interlaced) {
927 restore_gradient_packed(frame.f->data[0] + ff_ut_rgb_order[i],
928 c->planes, frame.f->linesize[0],
929 avctx->width, avctx->height,
932 restore_gradient_packed_il(frame.f->data[0] + ff_ut_rgb_order[i],
933 c->planes, frame.f->linesize[0],
934 avctx->width, avctx->height,
939 restore_rgb_planes(frame.f->data[0], c->planes, frame.f->linesize[0],
940 avctx->width, avctx->height);
942 case AV_PIX_FMT_GBRAP10:
943 case AV_PIX_FMT_GBRP10:
944 for (i = 0; i < c->planes; i++) {
945 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1,
946 frame.f->linesize[i] / 2, avctx->width,
947 avctx->height, plane_start[i],
948 plane_start[i + 1] - 1024,
949 c->frame_pred == PRED_LEFT);
953 restore_rgb_planes10(frame.f, avctx->width, avctx->height);
955 case AV_PIX_FMT_YUV420P:
956 for (i = 0; i < 3; i++) {
957 ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
958 avctx->width >> !!i, avctx->height >> !!i,
959 plane_start[i], c->frame_pred == PRED_LEFT);
962 if (c->frame_pred == PRED_MEDIAN) {
963 if (!c->interlaced) {
964 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
965 avctx->width >> !!i, avctx->height >> !!i,
968 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
970 avctx->height >> !!i,
973 } else if (c->frame_pred == PRED_GRADIENT) {
974 if (!c->interlaced) {
975 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
977 avctx->height >> !!i,
980 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
982 avctx->height >> !!i,
988 case AV_PIX_FMT_YUV422P:
989 for (i = 0; i < 3; i++) {
990 ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
991 avctx->width >> !!i, avctx->height,
992 plane_start[i], c->frame_pred == PRED_LEFT);
995 if (c->frame_pred == PRED_MEDIAN) {
996 if (!c->interlaced) {
997 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
998 avctx->width >> !!i, avctx->height,
1001 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
1002 avctx->width >> !!i, avctx->height,
1005 } else if (c->frame_pred == PRED_GRADIENT) {
1006 if (!c->interlaced) {
1007 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
1008 avctx->width >> !!i, avctx->height,
1011 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
1012 avctx->width >> !!i, avctx->height,
1018 case AV_PIX_FMT_YUV444P:
1019 for (i = 0; i < 3; i++) {
1020 ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
1021 avctx->width, avctx->height,
1022 plane_start[i], c->frame_pred == PRED_LEFT);
1025 if (c->frame_pred == PRED_MEDIAN) {
1026 if (!c->interlaced) {
1027 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
1028 avctx->width, avctx->height,
1031 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
1032 avctx->width, avctx->height,
1035 } else if (c->frame_pred == PRED_GRADIENT) {
1036 if (!c->interlaced) {
1037 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
1038 avctx->width, avctx->height,
1041 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
1042 avctx->width, avctx->height,
1048 case AV_PIX_FMT_YUV422P10:
1049 for (i = 0; i < 3; i++) {
1050 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1, frame.f->linesize[i] / 2,
1051 avctx->width >> !!i, avctx->height,
1052 plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
1059 frame.f->key_frame = 1;
1060 frame.f->pict_type = AV_PICTURE_TYPE_I;
1061 frame.f->interlaced_frame = !!c->interlaced;
1065 /* always report that the buffer was completely consumed */
1069 static av_cold int decode_init(AVCodecContext *avctx)
1071 UtvideoContext * const c = avctx->priv_data;
1075 ff_bswapdsp_init(&c->bdsp);
1076 ff_huffyuvdsp_init(&c->hdspdec);
1078 if (avctx->extradata_size >= 16) {
1079 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1080 avctx->extradata[3], avctx->extradata[2],
1081 avctx->extradata[1], avctx->extradata[0]);
1082 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1083 AV_RB32(avctx->extradata + 4));
1084 c->frame_info_size = AV_RL32(avctx->extradata + 8);
1085 c->flags = AV_RL32(avctx->extradata + 12);
1087 if (c->frame_info_size != 4)
1088 avpriv_request_sample(avctx, "Frame info not 4 bytes");
1089 av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
1090 c->slices = (c->flags >> 24) + 1;
1091 c->compression = c->flags & 1;
1092 c->interlaced = c->flags & 0x800;
1093 } else if (avctx->extradata_size == 8) {
1094 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1095 avctx->extradata[3], avctx->extradata[2],
1096 avctx->extradata[1], avctx->extradata[0]);
1097 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1098 AV_RB32(avctx->extradata + 4));
1101 c->frame_info_size = 4;
1103 av_log(avctx, AV_LOG_ERROR,
1104 "Insufficient extradata size %d, should be at least 16\n",
1105 avctx->extradata_size);
1106 return AVERROR_INVALIDDATA;
1109 c->slice_bits_size = 0;
1111 switch (avctx->codec_tag) {
1112 case MKTAG('U', 'L', 'R', 'G'):
1114 avctx->pix_fmt = AV_PIX_FMT_RGB24;
1116 case MKTAG('U', 'L', 'R', 'A'):
1118 avctx->pix_fmt = AV_PIX_FMT_RGBA;
1120 case MKTAG('U', 'L', 'Y', '0'):
1122 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1123 avctx->colorspace = AVCOL_SPC_BT470BG;
1125 case MKTAG('U', 'L', 'Y', '2'):
1127 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1128 avctx->colorspace = AVCOL_SPC_BT470BG;
1130 case MKTAG('U', 'L', 'Y', '4'):
1132 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1133 avctx->colorspace = AVCOL_SPC_BT470BG;
1135 case MKTAG('U', 'Q', 'Y', '2'):
1137 avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
1139 case MKTAG('U', 'Q', 'R', 'G'):
1141 avctx->pix_fmt = AV_PIX_FMT_GBRP10;
1143 case MKTAG('U', 'Q', 'R', 'A'):
1145 avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
1147 case MKTAG('U', 'L', 'H', '0'):
1149 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1150 avctx->colorspace = AVCOL_SPC_BT709;
1152 case MKTAG('U', 'L', 'H', '2'):
1154 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1155 avctx->colorspace = AVCOL_SPC_BT709;
1157 case MKTAG('U', 'L', 'H', '4'):
1159 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1160 avctx->colorspace = AVCOL_SPC_BT709;
1163 av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
1165 return AVERROR_INVALIDDATA;
1171 static av_cold int decode_end(AVCodecContext *avctx)
1173 UtvideoContext * const c = avctx->priv_data;
1175 av_freep(&c->slice_bits);
1180 AVCodec ff_utvideo_decoder = {
1182 .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
1183 .type = AVMEDIA_TYPE_VIDEO,
1184 .id = AV_CODEC_ID_UTVIDEO,
1185 .priv_data_size = sizeof(UtvideoContext),
1186 .init = decode_init,
1187 .close = decode_end,
1188 .decode = decode_frame,
1189 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1190 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,