/* JPEG decoding for DNG */
AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
+ AVPacket *jpkt; // encoded JPEG tile
AVFrame *jpgframe; // decoded JPEG tile
int get_subimage;
int fill_order;
uint32_t res[4];
int is_thumbnail;
+ unsigned last_tag;
int is_bayer;
uint8_t pattern[4];
unsigned black_level;
unsigned white_level;
- const uint16_t *dng_lut; // Pointer to DNG linearization table
+ uint16_t dng_lut[65536];
uint32_t sub_ifd;
uint16_t cur_page;
int deinvert_buf_size;
uint8_t *yuv_line;
unsigned int yuv_line_size;
- uint8_t *fax_buffer;
- unsigned int fax_buffer_size;
int geotag_count;
TiffGeoTag *geotags;
#define RET_GEOKEY(TYPE, array, element)\
if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
- key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_name_type_map))\
- return ff_tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
+ key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
+ return tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
static const char *get_geokey_name(int key)
{
#define RET_GEOKEY_VAL(TYPE, array)\
if (val >= TIFF_##TYPE##_OFFSET &&\
- val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_codes))\
- return av_strdup(ff_tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
+ val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_codes))\
+ return av_strdup(tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
switch (key) {
case TIFF_GT_MODEL_TYPE_GEOKEY:
RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
break;
case TIFF_PROJECTED_CS_TYPE_GEOKEY:
- ap = av_strdup(search_keyval(ff_tiff_proj_cs_type_codes, FF_ARRAY_ELEMS(ff_tiff_proj_cs_type_codes), val));
+ ap = av_strdup(search_keyval(tiff_proj_cs_type_codes, FF_ARRAY_ELEMS(tiff_proj_cs_type_codes), val));
if(ap) return ap;
break;
case TIFF_PROJECTION_GEOKEY:
- ap = av_strdup(search_keyval(ff_tiff_projection_codes, FF_ARRAY_ELEMS(ff_tiff_projection_codes), val));
+ ap = av_strdup(search_keyval(tiff_projection_codes, FF_ARRAY_ELEMS(tiff_projection_codes), val));
if(ap) return ap;
break;
case TIFF_PROJ_COORD_TRANS_GEOKEY:
};
}
+/**
+ * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
+ */
+static uint16_t av_always_inline dng_process_color16(uint16_t value,
+ const uint16_t *lut,
+ uint16_t black_level,
+ float scale_factor)
+{
+ float value_norm;
+
+ // Lookup table lookup
+ if (lut)
+ value = lut[value];
+
+ // Black level subtraction
+ value = av_clip_uint16_c((unsigned)value - black_level);
+
+ // Color scaling
+ value_norm = (float)value * scale_factor;
+
+ value = av_clip_uint16_c(value_norm * 65535);
+
+ return value;
+}
+
+static uint16_t av_always_inline dng_process_color8(uint16_t value,
+ const uint16_t *lut,
+ uint16_t black_level,
+ float scale_factor)
+{
+ return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
+}
+
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
const uint8_t *src, int src_stride, int width, int height,
- int is_single_comp, int is_u16);
+ int is_single_comp, int is_u16)
+{
+ int line, col;
+ float scale_factor;
+
+ scale_factor = 1.0f / (s->white_level - s->black_level);
+
+ if (is_single_comp) {
+ if (!is_u16)
+ return; /* <= 8bpp unsupported */
+
+ /* Image is double the width and half the height we need, each row comprises 2 rows of the output
+ (split vertically in the middle). */
+ for (line = 0; line < height / 2; line++) {
+ uint16_t *dst_u16 = (uint16_t *)dst;
+ uint16_t *src_u16 = (uint16_t *)src;
+
+ /* Blit first half of input row row to initial row of output */
+ for (col = 0; col < width; col++)
+ *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
+
+ /* Advance the destination pointer by a row (source pointer remains in the same place) */
+ dst += dst_stride * sizeof(uint16_t);
+ dst_u16 = (uint16_t *)dst;
+
+ /* Blit second half of input row row to next row of output */
+ for (col = 0; col < width; col++)
+ *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
+
+ dst += dst_stride * sizeof(uint16_t);
+ src += src_stride * sizeof(uint16_t);
+ }
+ } else {
+ /* Input and output image are the same size and the MJpeg decoder has done per-component
+ deinterleaving, so blitting here is straightforward. */
+ if (is_u16) {
+ for (line = 0; line < height; line++) {
+ uint16_t *dst_u16 = (uint16_t *)dst;
+ uint16_t *src_u16 = (uint16_t *)src;
+
+ for (col = 0; col < width; col++)
+ *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
+
+ dst += dst_stride * sizeof(uint16_t);
+ src += src_stride * sizeof(uint16_t);
+ }
+ } else {
+ for (line = 0; line < height; line++) {
+ uint8_t *dst_u8 = dst;
+ const uint8_t *src_u8 = src;
+
+ for (col = 0; col < width; col++)
+ *dst_u8++ = dng_process_color8(*src_u8++, s->dng_lut, s->black_level, scale_factor);
+
+ dst += dst_stride;
+ src += src_stride;
+ }
+ }
+ }
+}
static void av_always_inline horizontal_fill(TiffContext *s,
unsigned int bpp, uint8_t* dst,
z_stream zstream = { 0 };
int zret;
- zstream.next_in = (uint8_t *)src;
+ zstream.next_in = src;
zstream.avail_in = size;
zstream.next_out = dst;
zstream.avail_out = *len;
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
const uint8_t *src, int size, int width, int lines)
{
- int i, ret = 0;
int line;
- uint8_t *src2;
-
- av_fast_padded_malloc(&s->fax_buffer, &s->fax_buffer_size, size);
- src2 = s->fax_buffer;
-
- if (!src2) {
- av_log(s->avctx, AV_LOG_ERROR,
- "Error allocating temporary buffer\n");
- return AVERROR(ENOMEM);
- }
+ int ret;
- if (!s->fill_order) {
- memcpy(src2, src, size);
- } else {
- for (i = 0; i < size; i++)
- src2[i] = ff_reverse[src[i]];
+ if (s->fill_order) {
+ if ((ret = deinvert_buffer(s, src, size)) < 0)
+ return ret;
+ src = s->deinvert_buf;
}
- memset(src2 + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
- ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride,
+ ret = ff_ccitt_unpack(s->avctx, src, size, dst, lines, stride,
s->compr, s->fax_opts);
if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
for (line = 0; line < lines; line++) {
return ret;
}
-static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame);
+static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame,
+ int tile_byte_count, int dst_x, int dst_y, int w, int h)
+{
+ TiffContext *s = avctx->priv_data;
+ uint8_t *dst_data, *src_data;
+ uint32_t dst_offset; /* offset from dst buffer in pixels */
+ int is_single_comp, is_u16, pixel_size;
+ int ret;
+
+ if (tile_byte_count < 0 || tile_byte_count > bytestream2_get_bytes_left(&s->gb))
+ return AVERROR_INVALIDDATA;
+
+ /* Prepare a packet and send to the MJPEG decoder */
+ av_packet_unref(s->jpkt);
+ s->jpkt->data = (uint8_t*)s->gb.buffer;
+ s->jpkt->size = tile_byte_count;
+
+ if (s->is_bayer) {
+ MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
+ /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
+ image or not from its own data (and we need that information when decoding it). */
+ mjpegdecctx->bayer = 1;
+ }
+
+ ret = avcodec_send_packet(s->avctx_mjpeg, s->jpkt);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
+ return ret;
+ }
+
+ ret = avcodec_receive_frame(s->avctx_mjpeg, s->jpgframe);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
+
+ /* Normally skip, error if explode */
+ if (avctx->err_recognition & AV_EF_EXPLODE)
+ return AVERROR_INVALIDDATA;
+ else
+ return 0;
+ }
+
+ is_u16 = (s->bpp > 8);
+
+ /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
+
+ if (s->jpgframe->width != s->avctx_mjpeg->width ||
+ s->jpgframe->height != s->avctx_mjpeg->height ||
+ s->jpgframe->format != s->avctx_mjpeg->pix_fmt)
+ return AVERROR_INVALIDDATA;
+
+ /* See dng_blit for explanation */
+ if (s->avctx_mjpeg->width == w * 2 &&
+ s->avctx_mjpeg->height == h / 2 &&
+ s->avctx_mjpeg->pix_fmt == AV_PIX_FMT_GRAY16LE) {
+ is_single_comp = 1;
+ } else if (s->avctx_mjpeg->width >= w &&
+ s->avctx_mjpeg->height >= h &&
+ s->avctx_mjpeg->pix_fmt == (is_u16 ? AV_PIX_FMT_GRAY16 : AV_PIX_FMT_GRAY8)
+ ) {
+ is_single_comp = 0;
+ } else
+ return AVERROR_INVALIDDATA;
+
+ pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
+
+ if (is_single_comp && !is_u16) {
+ av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
+ av_frame_unref(s->jpgframe);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
+ dst_data = frame->data[0] + dst_offset * pixel_size;
+ src_data = s->jpgframe->data[0];
+
+ dng_blit(s,
+ dst_data,
+ frame->linesize[0] / pixel_size,
+ src_data,
+ s->jpgframe->linesize[0] / pixel_size,
+ w,
+ h,
+ is_single_comp,
+ is_u16);
+
+ av_frame_unref(s->jpgframe);
+
+ return 0;
+}
+
+static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
+{
+ TiffContext *s = avctx->priv_data;
+
+ s->jpgframe->width = s->width;
+ s->jpgframe->height = s->height;
+
+ s->avctx_mjpeg->width = s->width;
+ s->avctx_mjpeg->height = s->height;
+
+ return dng_decode_jpeg(avctx, frame, s->stripsize, 0, 0, s->width, s->height);
+}
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
const uint8_t *src, int size, int strip_start, int lines)
av_assert0(s->bpp == 24);
}
if (s->is_bayer) {
- width = (s->bpp * s->width + 7) >> 3;
+ av_assert0(width == (s->bpp * s->width + 7) >> 3);
}
if (p->format == AV_PIX_FMT_GRAY12) {
av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, width);
return 0;
}
+ if (is_dng && stride == 0)
+ return AVERROR_INVALIDDATA;
+
for (line = 0; line < lines; line++) {
if (src - ssrc > size) {
av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
/* Color processing for DNG images with uncompressed strips (non-tiled) */
if (is_dng) {
- int is_u16, pixel_size_bytes, pixel_size_bits;
+ int is_u16, pixel_size_bytes, pixel_size_bits, elements;
- is_u16 = (s->bpp > 8);
+ is_u16 = (s->bpp / s->bppcount > 8);
pixel_size_bits = (is_u16 ? 16 : 8);
pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
+ elements = width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount; // need to account for [1, 16] bpp
+ av_assert0 (elements * pixel_size_bytes <= FFABS(stride));
dng_blit(s,
dst,
0, // no stride, only 1 line
dst,
0, // no stride, only 1 line
- width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount, // need to account for [1, 16] bpp
+ elements,
1,
0, // single-component variation is only preset in JPEG-encoded DNGs
is_u16);
return 0;
}
-/**
- * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
- */
-static uint16_t av_always_inline dng_process_color16(uint16_t value,
- const uint16_t *lut,
- uint16_t black_level,
- float scale_factor) {
- float value_norm;
-
- // Lookup table lookup
- if (lut)
- value = lut[value];
-
- // Black level subtraction
- value = av_clip_uint16_c((unsigned)value - black_level);
-
- // Color scaling
- value_norm = (float)value * scale_factor;
-
- value = av_clip_uint16_c(value_norm * 65535);
-
- return value;
-}
-
-static uint16_t av_always_inline dng_process_color8(uint16_t value,
- const uint16_t *lut,
- uint16_t black_level,
- float scale_factor) {
- return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
-}
-
-static void dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
- const uint8_t *src, int src_stride,
- int width, int height, int is_single_comp, int is_u16)
-{
- int line, col;
- float scale_factor;
-
- scale_factor = 1.0f / (s->white_level - s->black_level);
-
- if (is_single_comp) {
- if (!is_u16)
- return; /* <= 8bpp unsupported */
-
- /* Image is double the width and half the height we need, each row comprises 2 rows of the output
- (split vertically in the middle). */
- for (line = 0; line < height / 2; line++) {
- uint16_t *dst_u16 = (uint16_t *)dst;
- uint16_t *src_u16 = (uint16_t *)src;
-
- /* Blit first half of input row row to initial row of output */
- for (col = 0; col < width; col++)
- *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
-
- /* Advance the destination pointer by a row (source pointer remains in the same place) */
- dst += dst_stride * sizeof(uint16_t);
- dst_u16 = (uint16_t *)dst;
-
- /* Blit second half of input row row to next row of output */
- for (col = 0; col < width; col++)
- *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
-
- dst += dst_stride * sizeof(uint16_t);
- src += src_stride * sizeof(uint16_t);
- }
- } else {
- /* Input and output image are the same size and the MJpeg decoder has done per-component
- deinterleaving, so blitting here is straightforward. */
- if (is_u16) {
- for (line = 0; line < height; line++) {
- uint16_t *dst_u16 = (uint16_t *)dst;
- uint16_t *src_u16 = (uint16_t *)src;
-
- for (col = 0; col < width; col++)
- *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
-
- dst += dst_stride * sizeof(uint16_t);
- src += src_stride * sizeof(uint16_t);
- }
- } else {
- for (line = 0; line < height; line++) {
- for (col = 0; col < width; col++)
- *dst++ = dng_process_color8(*src++, s->dng_lut, s->black_level, scale_factor);
-
- dst += dst_stride;
- src += src_stride;
- }
- }
- }
-}
-
-static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame,
- int tile_byte_count, int dst_x, int dst_y, int w, int h)
-{
- TiffContext *s = avctx->priv_data;
- AVPacket jpkt;
- uint8_t *dst_data, *src_data;
- uint32_t dst_offset; /* offset from dst buffer in pixels */
- int is_single_comp, is_u16, pixel_size;
- int ret;
-
- /* Prepare a packet and send to the MJPEG decoder */
- av_init_packet(&jpkt);
- jpkt.data = (uint8_t*)s->gb.buffer;
- jpkt.size = tile_byte_count;
-
- if (s->is_bayer) {
- MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
- /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
- image or not from its own data (and we need that information when decoding it). */
- mjpegdecctx->bayer = 1;
- }
-
- ret = avcodec_send_packet(s->avctx_mjpeg, &jpkt);
- if (ret < 0) {
- av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
- return ret;
- }
-
- ret = avcodec_receive_frame(s->avctx_mjpeg, s->jpgframe);
- if (ret < 0) {
- av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
-
- /* Normally skip, error if explode */
- if (avctx->err_recognition & AV_EF_EXPLODE)
- return AVERROR_INVALIDDATA;
- else
- return 0;
- }
-
- /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
-
- /* See dng_blit for explanation */
- is_single_comp = (s->avctx_mjpeg->width == w * 2 && s->avctx_mjpeg->height == h / 2);
-
- is_u16 = (s->bpp > 8);
- pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
-
- if (is_single_comp && !is_u16) {
- av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
- av_frame_unref(s->jpgframe);
- return AVERROR_PATCHWELCOME;
- }
-
- dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
- dst_data = frame->data[0] + dst_offset * pixel_size;
- src_data = s->jpgframe->data[0];
-
- dng_blit(s,
- dst_data,
- frame->linesize[0] / pixel_size,
- src_data,
- s->jpgframe->linesize[0] / pixel_size,
- w,
- h,
- is_single_comp,
- is_u16);
-
- av_frame_unref(s->jpgframe);
-
- return 0;
-}
-
-static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, AVPacket *avpkt)
+static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame,
+ const AVPacket *avpkt)
{
TiffContext *s = avctx->priv_data;
int tile_idx;
return avpkt->size;
}
-static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
-{
- TiffContext *s = avctx->priv_data;
-
- s->jpgframe->width = s->width;
- s->jpgframe->height = s->height;
-
- s->avctx_mjpeg->width = s->width;
- s->avctx_mjpeg->height = s->height;
-
- return dng_decode_jpeg(avctx, frame, s->stripsize, 0, 0, s->width, s->height);
-}
-
static int init_image(TiffContext *s, ThreadFrame *frame)
{
int ret;
case 10101:
case 10121:
case 10141:
- switch (AV_RL32(s->pattern)) {
- case 0x02010100:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_RGGB16LE : AV_PIX_FMT_BAYER_RGGB16BE;
- break;
- case 0x00010102:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_BGGR16LE : AV_PIX_FMT_BAYER_BGGR16BE;
- break;
- case 0x01000201:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GBRG16LE : AV_PIX_FMT_BAYER_GBRG16BE;
- break;
- case 0x01020001:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GRBG16LE : AV_PIX_FMT_BAYER_GRBG16BE;
- break;
- default:
- av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
- AV_RL32(s->pattern));
- return AVERROR_PATCHWELCOME;
- }
- /* Force endianness as mentioned in 'DNG Specification: Chapter 3: BitsPerSample'
- NOTE: The spec actually specifies big-endian, not sure why we need little-endian, but
- such images don't work otherwise. Examples are images produced by Zenmuse X7. */
- if ((s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG)
- && (s->bpp != 8 && s->bpp != 16 && s->bpp != 32)) {
- switch (s->avctx->pix_fmt) {
- case AV_PIX_FMT_BAYER_RGGB16BE: s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB16LE; break;
- case AV_PIX_FMT_BAYER_BGGR16BE: s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR16LE; break;
- case AV_PIX_FMT_BAYER_GBRG16BE: s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG16LE; break;
- case AV_PIX_FMT_BAYER_GRBG16BE: s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG16LE; break;
- }
- }
- break;
case 10161:
switch (AV_RL32(s->pattern)) {
case 0x02010100:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_RGGB16LE : AV_PIX_FMT_BAYER_RGGB16BE;
+ s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB16;
break;
case 0x00010102:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_BGGR16LE : AV_PIX_FMT_BAYER_BGGR16BE;
+ s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR16;
break;
case 0x01000201:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GBRG16LE : AV_PIX_FMT_BAYER_GBRG16BE;
+ s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG16;
break;
case 0x01020001:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GRBG16LE : AV_PIX_FMT_BAYER_GRBG16BE;
+ s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG16;
break;
default:
av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
{
+ AVFrameSideData *sd;
+ GetByteContext gb_temp;
unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
int i, start;
int pos;
if (ret < 0) {
goto end;
}
+ if (tag <= s->last_tag)
+ return AVERROR_INVALIDDATA;
+
+ // We ignore TIFF_STRIP_SIZE as it is sometimes in the logic but wrong order around TIFF_STRIP_OFFS
+ if (tag != TIFF_STRIP_SIZE)
+ s->last_tag = tag;
off = bytestream2_tell(&s->gb);
if (count == 1) {
case TIFF_RATIONAL:
value = ff_tget(&s->gb, TIFF_LONG, s->le);
value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
+ if (!value2) {
+ av_log(s->avctx, AV_LOG_ERROR, "Invalid denominator in rational\n");
+ return AVERROR_INVALIDDATA;
+ }
+
break;
case TIFF_STRING:
if (count <= 4) {
s->height = value;
break;
case TIFF_BPP:
- if (count > 5U) {
+ if (count > 5 || count <= 0) {
av_log(s->avctx, AV_LOG_ERROR,
"This format is not supported (bpp=%d, %d components)\n",
value, count);
"Samples per pixel requires a single value, many provided\n");
return AVERROR_INVALIDDATA;
}
- if (value > 5U) {
+ if (value > 5 || value <= 0) {
av_log(s->avctx, AV_LOG_ERROR,
- "Samples per pixel %d is too large\n", value);
+ "Invalid samples per pixel %d\n", value);
return AVERROR_INVALIDDATA;
}
if (s->bppcount == 1)
else if (count > 1)
s->sub_ifd = ff_tget(&s->gb, TIFF_LONG, s->le); /** Only get the first SubIFD */
break;
- case DNG_LINEARIZATION_TABLE: {
- uint32_t lut_offset = value;
- uint32_t lut_size = count;
- uint32_t lut_wanted_size = 1 << s->bpp;
- if (lut_wanted_size != lut_size)
- av_log(s->avctx, AV_LOG_WARNING, "DNG contains LUT with invalid size (%"PRIu32"), disabling LUT\n", lut_size);
- else if (lut_offset >= bytestream2_size(&s->gb))
- av_log(s->avctx, AV_LOG_WARNING, "DNG contains LUT with invalid offset (%"PRIu32"), disabling LUT\n", lut_offset);
- else
- s->dng_lut = (uint16_t*)(s->gb.buffer + lut_offset);
+ case DNG_LINEARIZATION_TABLE:
+ if (count > FF_ARRAY_ELEMS(s->dng_lut))
+ return AVERROR_INVALIDDATA;
+ for (int i = 0; i < count; i++)
+ s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
break;
- }
case DNG_BLACK_LEVEL:
if (count > 1) { /* Use the first value in the pattern (assume they're all the same) */
if (type == TIFF_RATIONAL) {
value = ff_tget(&s->gb, TIFF_LONG, s->le);
value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
+ if (!value2) {
+ av_log(s->avctx, AV_LOG_ERROR, "Invalid black level denominator\n");
+ return AVERROR_INVALIDDATA;
+ }
s->black_level = value / value2;
} else
break;
case TIFF_GEO_KEY_DIRECTORY:
if (s->geotag_count) {
- avpriv_request_sample(s->avctx, "Multiple geo key directories\n");
+ avpriv_request_sample(s->avctx, "Multiple geo key directories");
return AVERROR_INVALIDDATA;
}
ADD_METADATA(1, "GeoTIFF_Version", NULL);
}
}
break;
+ case TIFF_ICC_PROFILE:
+ gb_temp = s->gb;
+ bytestream2_seek(&gb_temp, SEEK_SET, off);
+
+ if (bytestream2_get_bytes_left(&gb_temp) < count)
+ return AVERROR_INVALIDDATA;
+
+ sd = av_frame_new_side_data(frame, AV_FRAME_DATA_ICC_PROFILE, count);
+ if (!sd)
+ return AVERROR(ENOMEM);
+
+ bytestream2_get_bufferu(&gb_temp, sd->data, count);
+ break;
case TIFF_ARTIST:
ADD_METADATA(count, "artist", NULL);
break;
GetByteContext stripdata;
int retry_for_subifd, retry_for_page;
int is_dng;
+ int has_tile_bits, has_strip_bits;
bytestream2_init(&s->gb, avpkt->data, avpkt->size);
s->is_tiled = 0;
s->is_jpeg = 0;
s->cur_page = 0;
- s->dng_lut = NULL;
+ s->last_tag = 0;
+
+ for (i = 0; i < 65536; i++)
+ s->dng_lut[i] = i;
+
free_geotags(s);
// Reset these offsets so we can tell if they were set this frame
return AVERROR_INVALIDDATA;
}
if (off <= last_off) {
- avpriv_request_sample(s->avctx, "non increasing IFD offset\n");
+ avpriv_request_sample(s->avctx, "non increasing IFD offset");
return AVERROR_INVALIDDATA;
}
if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
}
if (is_dng) {
+ int bps;
+
+ if (s->bpp % s->bppcount)
+ return AVERROR_INVALIDDATA;
+ bps = s->bpp / s->bppcount;
+ if (bps < 8 || bps > 32)
+ return AVERROR_INVALIDDATA;
+
if (s->white_level == 0)
- s->white_level = (1 << s->bpp) - 1; /* Default value as per the spec */
+ s->white_level = (1LL << bps) - 1; /* Default value as per the spec */
if (s->white_level <= s->black_level) {
av_log(avctx, AV_LOG_ERROR, "BlackLevel (%"PRId32") must be less than WhiteLevel (%"PRId32")\n",
s->black_level, s->white_level);
return AVERROR_INVALIDDATA;
}
+
+ if (s->planar)
+ return AVERROR_PATCHWELCOME;
}
if (!s->is_tiled && !s->strippos && !s->stripoff) {
return AVERROR_INVALIDDATA;
}
+ has_tile_bits = s->is_tiled || s->tile_byte_counts_offset || s->tile_offsets_offset || s->tile_width || s->tile_length || s->tile_count;
+ has_strip_bits = s->strippos || s->strips || s->stripoff || s->rps || s->sot || s->sstype || s->stripsize || s->stripsizesoff;
+
+ if (has_tile_bits && has_strip_bits) {
+ int tiled_dng = s->is_tiled && is_dng;
+ av_log(avctx, tiled_dng ? AV_LOG_WARNING : AV_LOG_ERROR, "Tiled TIFF is not allowed to strip\n");
+ if (!tiled_dng)
+ return AVERROR_INVALIDDATA;
+ }
+
/* now we have the data and may start decoding */
if ((ret = init_image(s, &frame)) < 0)
return ret;
- if (!s->is_tiled) {
+ if (!s->is_tiled || has_strip_bits) {
if (s->strips == 1 && !s->stripsize) {
av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
s->stripsize = avpkt->size - s->stripoff;
/* Allocate JPEG frame */
s->jpgframe = av_frame_alloc();
- if (!s->jpgframe)
+ s->jpkt = av_packet_alloc();
+ if (!s->jpgframe || !s->jpkt)
return AVERROR(ENOMEM);
/* Prepare everything needed for JPEG decoding */
s->avctx_mjpeg->flags2 = avctx->flags2;
s->avctx_mjpeg->dct_algo = avctx->dct_algo;
s->avctx_mjpeg->idct_algo = avctx->idct_algo;
- ret = ff_codec_open2_recursive(s->avctx_mjpeg, codec, NULL);
+ ret = avcodec_open2(s->avctx_mjpeg, codec, NULL);
if (ret < 0) {
- av_frame_free(&s->jpgframe);
- avcodec_free_context(&s->avctx_mjpeg);
return ret;
}
s->deinvert_buf_size = 0;
av_freep(&s->yuv_line);
s->yuv_line_size = 0;
- av_freep(&s->fax_buffer);
- s->fax_buffer_size = 0;
av_frame_free(&s->jpgframe);
+ av_packet_free(&s->jpkt);
avcodec_free_context(&s->avctx_mjpeg);
return 0;
}
.version = LIBAVUTIL_VERSION_INT,
};
-AVCodec ff_tiff_decoder = {
+const AVCodec ff_tiff_decoder = {
.name = "tiff",
.long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
.type = AVMEDIA_TYPE_VIDEO,
.init = tiff_init,
.close = tiff_end,
.decode = decode_frame,
- .init_thread_copy = ONLY_IF_THREADS_ENABLED(tiff_init),
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
+ .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.priv_class = &tiff_decoder_class,
};