3 * Copyright (c) 2001 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33 #include "libavutil/avassert.h"
34 #include "libavutil/buffer.h"
35 #include "libavutil/common.h"
36 #include "libavutil/intreadwrite.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/opt.h"
40 typedef struct RawVideoContext {
43 int frame_size; /* size of the frame in bytes */
45 int is_1_2_4_8_bpp; // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
51 int is_lt_16bpp; // 16bpp pixfmt and bits_per_coded_sample < 16
54 BswapDSPContext bbdsp;
56 unsigned int bitstream_buf_size;
59 static const AVOption options[]={
60 {"top", "top field first", offsetof(RawVideoContext, tff), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AV_OPT_FLAG_DECODING_PARAM|AV_OPT_FLAG_VIDEO_PARAM},
64 static const AVClass rawdec_class = {
65 .class_name = "rawdec",
67 .version = LIBAVUTIL_VERSION_INT,
70 static av_cold int raw_init_decoder(AVCodecContext *avctx)
72 RawVideoContext *context = avctx->priv_data;
73 const AVPixFmtDescriptor *desc;
75 ff_bswapdsp_init(&context->bbdsp);
77 if ( avctx->codec_tag == MKTAG('r','a','w',' ')
78 || avctx->codec_tag == MKTAG('N','O','1','6'))
79 avctx->pix_fmt = avpriv_find_pix_fmt(avpriv_pix_fmt_bps_mov,
80 avctx->bits_per_coded_sample);
81 else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
82 avctx->pix_fmt = avpriv_find_pix_fmt(avpriv_pix_fmt_bps_avi,
83 avctx->bits_per_coded_sample);
84 else if (avctx->codec_tag && (avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0))
85 avctx->pix_fmt = avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, avctx->codec_tag);
86 else if (avctx->pix_fmt == AV_PIX_FMT_NONE && avctx->bits_per_coded_sample)
87 avctx->pix_fmt = avpriv_find_pix_fmt(avpriv_pix_fmt_bps_avi,
88 avctx->bits_per_coded_sample);
90 desc = av_pix_fmt_desc_get(avctx->pix_fmt);
92 av_log(avctx, AV_LOG_ERROR, "Invalid pixel format.\n");
93 return AVERROR(EINVAL);
96 if (desc->flags & AV_PIX_FMT_FLAG_PAL) {
97 context->palette = av_buffer_alloc(AVPALETTE_SIZE);
98 if (!context->palette)
99 return AVERROR(ENOMEM);
100 memset(context->palette->data, 0, AVPALETTE_SIZE);
101 if (avctx->bits_per_coded_sample == 1)
102 memset(context->palette->data, 0xff, 4);
105 if ((avctx->extradata_size >= 9 &&
106 !memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
107 avctx->codec_tag == MKTAG('c','y','u','v') ||
108 avctx->codec_tag == MKTAG(3, 0, 0, 0) ||
109 avctx->codec_tag == MKTAG('W','R','A','W'))
112 if (avctx->pix_fmt == AV_PIX_FMT_MONOWHITE ||
113 avctx->pix_fmt == AV_PIX_FMT_MONOBLACK)
114 context->is_mono = 1;
115 else if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
116 context->is_pal8 = 1;
118 if (avctx->codec_tag == MKTAG('B','1','W','0') ||
119 avctx->codec_tag == MKTAG('B','0','W','1'))
120 context->is_nut_mono = 1;
121 else if (avctx->codec_tag == MKTAG('P','A','L',8))
122 context->is_nut_pal8 = 1;
124 if (avctx->codec_tag == AV_RL32("yuv2") &&
125 avctx->pix_fmt == AV_PIX_FMT_YUYV422)
126 context->is_yuv2 = 1;
131 static void flip(AVCodecContext *avctx, AVFrame *frame)
133 frame->data[0] += frame->linesize[0] * (avctx->height - 1);
134 frame->linesize[0] *= -1;
138 * Scale sample to 16-bit resolution
140 #define SCALE16(x, bits) (((x) << (16 - (bits))) | ((x) >> (2 * (bits) - 16)))
143 * Scale buffer to 16 bits per coded sample resolution
145 #define MKSCALE16(name, r16, w16) \
146 static void name(AVCodecContext *avctx, uint8_t * dst, const uint8_t *buf, int buf_size, int packed) \
150 for (i = 0; i + 1 < buf_size; i += 2) \
151 w16(dst + i, SCALE16(r16(buf + i), avctx->bits_per_coded_sample)); \
154 init_get_bits(&gb, buf, buf_size * 8); \
155 for (i = 0; i < avctx->width * avctx->height; i++) { \
156 int sample = get_bits(&gb, avctx->bits_per_coded_sample); \
157 w16(dst + i*2, SCALE16(sample, avctx->bits_per_coded_sample)); \
162 MKSCALE16(scale16be, AV_RB16, AV_WB16)
163 MKSCALE16(scale16le, AV_RL16, AV_WL16)
165 static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
168 const AVPixFmtDescriptor *desc;
169 RawVideoContext *context = avctx->priv_data;
170 const uint8_t *buf = avpkt->data;
171 int buf_size = avpkt->size;
172 int linesize_align = 4;
177 AVFrame *frame = data;
179 if (avctx->width <= 0) {
180 av_log(avctx, AV_LOG_ERROR, "width is not set\n");
181 return AVERROR_INVALIDDATA;
183 if (avctx->height <= 0) {
184 av_log(avctx, AV_LOG_ERROR, "height is not set\n");
185 return AVERROR_INVALIDDATA;
188 if (context->is_nut_mono)
189 stride = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
190 else if (context->is_nut_pal8)
191 stride = avctx->width;
193 stride = avpkt->size / avctx->height;
195 av_log(avctx, AV_LOG_DEBUG, "PACKET SIZE: %d, STRIDE: %d\n", avpkt->size, stride);
197 if (stride == 0 || avpkt->size < stride * avctx->height) {
198 av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size);
199 return AVERROR_INVALIDDATA;
202 desc = av_pix_fmt_desc_get(avctx->pix_fmt);
204 if ((avctx->bits_per_coded_sample == 8 || avctx->bits_per_coded_sample == 4 ||
205 avctx->bits_per_coded_sample == 2 || avctx->bits_per_coded_sample == 1 ||
206 (avctx->bits_per_coded_sample == 0 && (context->is_nut_pal8 || context->is_mono)) ) &&
207 (context->is_mono || context->is_pal8) &&
208 (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' ') ||
209 context->is_nut_mono || context->is_nut_pal8)) {
210 context->is_1_2_4_8_bpp = 1;
211 if (context->is_mono) {
212 int row_bytes = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
213 context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
214 FFALIGN(row_bytes, 16) * 8,
217 context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
218 FFALIGN(avctx->width, 16),
221 context->is_lt_16bpp = av_get_bits_per_pixel(desc) == 16 && avctx->bits_per_coded_sample > 8 && avctx->bits_per_coded_sample < 16;
222 context->frame_size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width,
225 if (context->frame_size < 0)
226 return context->frame_size;
228 need_copy = !avpkt->buf || context->is_1_2_4_8_bpp || context->is_yuv2 || context->is_lt_16bpp;
230 frame->pict_type = AV_PICTURE_TYPE_I;
231 frame->key_frame = 1;
233 res = ff_decode_frame_props(avctx, frame);
237 frame->pkt_pos = avctx->internal->last_pkt_props->pos;
238 frame->pkt_duration = avctx->internal->last_pkt_props->duration;
240 if (context->tff >= 0) {
241 frame->interlaced_frame = 1;
242 frame->top_field_first = context->tff;
245 if ((res = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
249 frame->buf[0] = av_buffer_alloc(FFMAX(context->frame_size, buf_size));
251 frame->buf[0] = av_buffer_ref(avpkt->buf);
253 return AVERROR(ENOMEM);
255 // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
256 if (context->is_1_2_4_8_bpp) {
257 int i, j, row_pix = 0;
258 uint8_t *dst = frame->buf[0]->data;
259 buf_size = context->frame_size - (context->is_pal8 ? AVPALETTE_SIZE : 0);
260 if (avctx->bits_per_coded_sample == 8 || context->is_nut_pal8 || context->is_mono) {
261 int pix_per_byte = context->is_mono ? 8 : 1;
262 for (i = 0, j = 0; j < buf_size && i<avpkt->size; i++, j++) {
264 row_pix += pix_per_byte;
265 if (row_pix >= avctx->width) {
266 i += stride - (i % stride) - 1;
267 j += 16 - (j % 16) - 1;
271 } else if (avctx->bits_per_coded_sample == 4) {
272 for (i = 0, j = 0; 2 * j + 1 < buf_size && i<avpkt->size; i++, j++) {
273 dst[2 * j + 0] = buf[i] >> 4;
274 dst[2 * j + 1] = buf[i] & 15;
276 if (row_pix >= avctx->width) {
277 i += stride - (i % stride) - 1;
278 j += 8 - (j % 8) - 1;
282 } else if (avctx->bits_per_coded_sample == 2) {
283 for (i = 0, j = 0; 4 * j + 3 < buf_size && i<avpkt->size; i++, j++) {
284 dst[4 * j + 0] = buf[i] >> 6;
285 dst[4 * j + 1] = buf[i] >> 4 & 3;
286 dst[4 * j + 2] = buf[i] >> 2 & 3;
287 dst[4 * j + 3] = buf[i] & 3;
289 if (row_pix >= avctx->width) {
290 i += stride - (i % stride) - 1;
291 j += 4 - (j % 4) - 1;
296 av_assert0(avctx->bits_per_coded_sample == 1);
297 for (i = 0, j = 0; 8 * j + 7 < buf_size && i<avpkt->size; i++, j++) {
298 dst[8 * j + 0] = buf[i] >> 7;
299 dst[8 * j + 1] = buf[i] >> 6 & 1;
300 dst[8 * j + 2] = buf[i] >> 5 & 1;
301 dst[8 * j + 3] = buf[i] >> 4 & 1;
302 dst[8 * j + 4] = buf[i] >> 3 & 1;
303 dst[8 * j + 5] = buf[i] >> 2 & 1;
304 dst[8 * j + 6] = buf[i] >> 1 & 1;
305 dst[8 * j + 7] = buf[i] & 1;
307 if (row_pix >= avctx->width) {
308 i += stride - (i % stride) - 1;
309 j += 2 - (j % 2) - 1;
316 } else if (context->is_lt_16bpp) {
317 uint8_t *dst = frame->buf[0]->data;
318 int packed = (avctx->codec_tag & 0xFFFFFF) == MKTAG('B','I','T', 0);
319 int swap = avctx->codec_tag >> 24;
321 if (packed && swap) {
322 av_fast_padded_malloc(&context->bitstream_buf, &context->bitstream_buf_size, buf_size);
323 if (!context->bitstream_buf)
324 return AVERROR(ENOMEM);
326 context->bbdsp.bswap16_buf(context->bitstream_buf, (const uint16_t*)buf, buf_size / 2);
328 context->bbdsp.bswap_buf(context->bitstream_buf, (const uint32_t*)buf, buf_size / 4);
330 return AVERROR_INVALIDDATA;
331 buf = context->bitstream_buf;
334 if (desc->flags & AV_PIX_FMT_FLAG_BE)
335 scale16be(avctx, dst, buf, buf_size, packed);
337 scale16le(avctx, dst, buf, buf_size, packed);
340 } else if (need_copy) {
341 memcpy(frame->buf[0]->data, buf, buf_size);
342 buf = frame->buf[0]->data;
345 if (avctx->codec_tag == MKTAG('A', 'V', '1', 'x') ||
346 avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
347 buf += buf_size - context->frame_size;
349 len = context->frame_size - (avctx->pix_fmt==AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0);
350 if (buf_size < len && ((avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0) || !need_copy)) {
351 av_log(avctx, AV_LOG_ERROR, "Invalid buffer size, packet size %d < expected frame_size %d\n", buf_size, len);
352 av_buffer_unref(&frame->buf[0]);
353 return AVERROR(EINVAL);
356 if ((res = av_image_fill_arrays(frame->data, frame->linesize,
358 avctx->width, avctx->height, 1)) < 0) {
359 av_buffer_unref(&frame->buf[0]);
363 if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
366 if (!context->palette)
367 context->palette = av_buffer_alloc(AVPALETTE_SIZE);
368 if (!context->palette) {
369 av_buffer_unref(&frame->buf[0]);
370 return AVERROR(ENOMEM);
372 ret = av_buffer_make_writable(&context->palette);
374 av_buffer_unref(&frame->buf[0]);
378 if (ff_copy_palette(context->palette->data, avpkt, avctx)) {
379 frame->palette_has_changed = 1;
380 } else if (context->is_nut_pal8) {
381 int vid_size = avctx->width * avctx->height;
382 int pal_size = avpkt->size - vid_size;
384 if (avpkt->size > vid_size && pal_size <= AVPALETTE_SIZE) {
385 const uint8_t *pal = avpkt->data + vid_size;
386 memcpy(context->palette->data, pal, pal_size);
387 frame->palette_has_changed = 1;
392 if ((avctx->pix_fmt==AV_PIX_FMT_RGB24 ||
393 avctx->pix_fmt==AV_PIX_FMT_BGR24 ||
394 avctx->pix_fmt==AV_PIX_FMT_GRAY8 ||
395 avctx->pix_fmt==AV_PIX_FMT_RGB555LE ||
396 avctx->pix_fmt==AV_PIX_FMT_RGB555BE ||
397 avctx->pix_fmt==AV_PIX_FMT_RGB565LE ||
398 avctx->pix_fmt==AV_PIX_FMT_MONOWHITE ||
399 avctx->pix_fmt==AV_PIX_FMT_MONOBLACK ||
400 avctx->pix_fmt==AV_PIX_FMT_PAL8) &&
401 FFALIGN(frame->linesize[0], linesize_align) * avctx->height <= buf_size)
402 frame->linesize[0] = FFALIGN(frame->linesize[0], linesize_align);
404 if (avctx->pix_fmt == AV_PIX_FMT_NV12 && avctx->codec_tag == MKTAG('N', 'V', '1', '2') &&
405 FFALIGN(frame->linesize[0], linesize_align) * avctx->height +
406 FFALIGN(frame->linesize[1], linesize_align) * ((avctx->height + 1) / 2) <= buf_size) {
407 int la0 = FFALIGN(frame->linesize[0], linesize_align);
408 frame->data[1] += (la0 - frame->linesize[0]) * avctx->height;
409 frame->linesize[0] = la0;
410 frame->linesize[1] = FFALIGN(frame->linesize[1], linesize_align);
413 if (avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->frame_size) {
414 frame->buf[1] = av_buffer_ref(context->palette);
415 if (!frame->buf[1]) {
416 av_buffer_unref(&frame->buf[0]);
417 return AVERROR(ENOMEM);
419 frame->data[1] = frame->buf[1]->data;
422 if (avctx->pix_fmt == AV_PIX_FMT_BGR24 &&
423 ((frame->linesize[0] + 3) & ~3) * avctx->height <= buf_size)
424 frame->linesize[0] = (frame->linesize[0] + 3) & ~3;
429 if (avctx->codec_tag == MKTAG('Y', 'V', '1', '2') ||
430 avctx->codec_tag == MKTAG('Y', 'V', '1', '6') ||
431 avctx->codec_tag == MKTAG('Y', 'V', '2', '4') ||
432 avctx->codec_tag == MKTAG('Y', 'V', 'U', '9'))
433 FFSWAP(uint8_t *, frame->data[1], frame->data[2]);
435 if (avctx->codec_tag == AV_RL32("I420") && (avctx->width+1)*(avctx->height+1) * 3/2 == buf_size) {
436 frame->data[1] = frame->data[1] + (avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height;
437 frame->data[2] = frame->data[2] + ((avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height)*5/4;
440 if (avctx->codec_tag == AV_RL32("yuv2") &&
441 avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
443 uint8_t *line = frame->data[0];
444 for (y = 0; y < avctx->height; y++) {
445 for (x = 0; x < avctx->width; x++)
446 line[2 * x + 1] ^= 0x80;
447 line += frame->linesize[0];
451 if (avctx->codec_tag == AV_RL32("b64a") &&
452 avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
453 uint8_t *dst = frame->data[0];
456 for (y = 0; y < avctx->height; y++) {
457 for (x = 0; x >> 3 < avctx->width; x += 8) {
458 v = AV_RB64(&dst[x]);
459 AV_WB64(&dst[x], v << 16 | v >> 48);
461 dst += frame->linesize[0];
465 if (avctx->field_order > AV_FIELD_PROGRESSIVE) { /* we have interlaced material flagged in container */
466 frame->interlaced_frame = 1;
467 if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB)
468 frame->top_field_first = 1;
475 static av_cold int raw_close_decoder(AVCodecContext *avctx)
477 RawVideoContext *context = avctx->priv_data;
479 av_buffer_unref(&context->palette);
480 av_freep(&context->bitstream_buf);
484 const AVCodec ff_rawvideo_decoder = {
486 .long_name = NULL_IF_CONFIG_SMALL("raw video"),
487 .type = AVMEDIA_TYPE_VIDEO,
488 .id = AV_CODEC_ID_RAWVIDEO,
489 .priv_data_size = sizeof(RawVideoContext),
490 .init = raw_init_decoder,
491 .close = raw_close_decoder,
492 .decode = raw_decode,
493 .priv_class = &rawdec_class,
494 .capabilities = AV_CODEC_CAP_PARAM_CHANGE,