uint8_t pattern[4];
unsigned black_level;
unsigned white_level;
- const uint16_t *dng_lut; // Pointer to DNG linearization table
+ uint16_t dng_lut[65536];
uint32_t sub_ifd;
uint16_t cur_page;
}
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
- const uint8_t *src, int src_stride, int width, int height, int is_u16);
+ const uint8_t *src, int src_stride, int width, int height,
+ int is_single_comp, int is_u16);
static void av_always_inline horizontal_fill(TiffContext *s,
unsigned int bpp, uint8_t* dst,
dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
}
break;
- case 12: {
- uint16_t *dst16 = (uint16_t *)dst;
- GetBitContext gb;
- init_get_bits8(&gb, src, width);
- for (int i = 0; i < s->width; i++) {
- dst16[i] = get_bits(&gb, 12) << 4;
- }
- }
+ case 10:
+ case 12:
+ case 14: {
+ uint16_t *dst16 = (uint16_t *)dst;
+ int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
+ uint8_t shift = is_dng ? 0 : 16 - bpp;
+ GetBitContext gb;
+
+ init_get_bits8(&gb, src, width);
+ for (int i = 0; i < s->width; i++) {
+ dst16[i] = get_bits(&gb, bpp) << shift;
+ }
+ }
break;
default:
if (usePtr) {
return ret;
}
+static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame);
+
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
const uint8_t *src, int size, int strip_start, int lines)
{
is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
+ /* Decode JPEG-encoded DNGs with strips */
+ if (s->compr == TIFF_NEWJPEG && is_dng) {
+ if (s->strips > 1) {
+ av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
+ return AVERROR_PATCHWELCOME;
+ }
+ if ((ret = dng_decode_strip(s->avctx, p)) < 0)
+ return ret;
+ return 0;
+ }
+
for (line = 0; line < lines; line++) {
if (src - ssrc > size) {
av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
0, // no stride, only 1 line
width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount, // need to account for [1, 16] bpp
1,
+ 0, // single-component variation is only preset in JPEG-encoded DNGs
is_u16);
}
return 0;
}
-static float av_always_inline linear_to_srgb(float value) {
- if (value <= 0.0031308f)
- return value * 12.92f;
- else
- return powf(value * 1.055f, 1.0f / 2.4f) - 0.055f;
-}
-
/**
* Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
- * Then convert to sRGB color space.
*/
static uint16_t av_always_inline dng_process_color16(uint16_t value,
const uint16_t *lut,
// Color scaling
value_norm = (float)value * scale_factor;
- // Color space conversion (sRGB)
- value = av_clip_uint16_c((uint16_t)(linear_to_srgb(value_norm) * 0xFFFF));
+ value = av_clip_uint16_c(value_norm * 65535);
return value;
}
static void dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
const uint8_t *src, int src_stride,
- int width, int height, int is_u16)
+ int width, int height, int is_single_comp, int is_u16)
{
int line, col;
float scale_factor;
scale_factor = 1.0f / (s->white_level - s->black_level);
- if (is_u16) {
- for (line = 0; line < height; line++) {
+ if (is_single_comp) {
+ if (!is_u16)
+ return; /* <= 8bpp unsupported */
+
+ /* Image is double the width and half the height we need, each row comprises 2 rows of the output
+ (split vertically in the middle). */
+ for (line = 0; line < height / 2; line++) {
uint16_t *dst_u16 = (uint16_t *)dst;
uint16_t *src_u16 = (uint16_t *)src;
+ /* Blit first half of input row row to initial row of output */
+ for (col = 0; col < width; col++)
+ *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
+
+ /* Advance the destination pointer by a row (source pointer remains in the same place) */
+ dst += dst_stride * sizeof(uint16_t);
+ dst_u16 = (uint16_t *)dst;
+
+ /* Blit second half of input row row to next row of output */
for (col = 0; col < width; col++)
*dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
src += src_stride * sizeof(uint16_t);
}
} else {
- for (line = 0; line < height; line++) {
- for (col = 0; col < width; col++)
- *dst++ = dng_process_color8(*src++, s->dng_lut, s->black_level, scale_factor);
+ /* Input and output image are the same size and the MJpeg decoder has done per-component
+ deinterleaving, so blitting here is straightforward. */
+ if (is_u16) {
+ for (line = 0; line < height; line++) {
+ uint16_t *dst_u16 = (uint16_t *)dst;
+ uint16_t *src_u16 = (uint16_t *)src;
+
+ for (col = 0; col < width; col++)
+ *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
+
+ dst += dst_stride * sizeof(uint16_t);
+ src += src_stride * sizeof(uint16_t);
+ }
+ } else {
+ for (line = 0; line < height; line++) {
+ for (col = 0; col < width; col++)
+ *dst++ = dng_process_color8(*src++, s->dng_lut, s->black_level, scale_factor);
- dst += dst_stride;
- src += src_stride;
+ dst += dst_stride;
+ src += src_stride;
+ }
}
}
}
-static int dng_decode_jpeg_tile(AVCodecContext *avctx, AVFrame *frame,
- int tile_byte_count, int x, int y, int w, int h)
+static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame,
+ int tile_byte_count, int dst_x, int dst_y, int w, int h)
{
TiffContext *s = avctx->priv_data;
AVPacket jpkt;
uint8_t *dst_data, *src_data;
uint32_t dst_offset; /* offset from dst buffer in pixels */
- int is_u16, pixel_size;
+ int is_single_comp, is_u16, pixel_size;
int ret;
/* Prepare a packet and send to the MJPEG decoder */
jpkt.data = (uint8_t*)s->gb.buffer;
jpkt.size = tile_byte_count;
+ if (s->is_bayer) {
+ MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
+ /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
+ image or not from its own data (and we need that information when decoding it). */
+ mjpegdecctx->bayer = 1;
+ }
+
ret = avcodec_send_packet(s->avctx_mjpeg, &jpkt);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
/* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
+ /* See dng_blit for explanation */
+ is_single_comp = (s->avctx_mjpeg->width == w * 2 && s->avctx_mjpeg->height == h / 2);
+
is_u16 = (s->bpp > 8);
pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
- dst_offset = x + frame->linesize[0] * y / pixel_size;
+ if (is_single_comp && !is_u16) {
+ av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
+ av_frame_unref(s->jpgframe);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
dst_data = frame->data[0] + dst_offset * pixel_size;
src_data = s->jpgframe->data[0];
s->jpgframe->linesize[0] / pixel_size,
w,
h,
+ is_single_comp,
is_u16);
av_frame_unref(s->jpgframe);
return 0;
}
-static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame)
+static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, AVPacket *avpkt)
{
TiffContext *s = avctx->priv_data;
int tile_idx;
int pos_x = 0, pos_y = 0;
int ret;
+ s->jpgframe->width = s->tile_width;
+ s->jpgframe->height = s->tile_length;
+
+ s->avctx_mjpeg->width = s->tile_width;
+ s->avctx_mjpeg->height = s->tile_length;
+
has_width_leftover = (s->width % s->tile_width != 0);
has_height_leftover = (s->height % s->tile_length != 0);
bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
/* Decode JPEG tile and copy it in the reference frame */
- ret = dng_decode_jpeg_tile(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
+ ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
if (ret < 0)
return ret;
}
}
- return 0;
-}
+ /* Frame is ready to be output */
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
-static int dng_decode(AVCodecContext *avctx, AVFrame *frame, AVPacket *avpkt) {
- int ret;
+ return avpkt->size;
+}
+static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
+{
TiffContext *s = avctx->priv_data;
- s->jpgframe->width = s->tile_width;
- s->jpgframe->height = s->tile_length;
-
- s->avctx_mjpeg->width = s->tile_width;
- s->avctx_mjpeg->height = s->tile_length;
-
- /* Decode all tiles in a frame */
- ret = dng_decode_tiles(avctx, frame);
- if (ret < 0)
- return ret;
+ s->jpgframe->width = s->width;
+ s->jpgframe->height = s->height;
- /* Frame is ready to be output */
- frame->pict_type = AV_PICTURE_TYPE_I;
- frame->key_frame = 1;
+ s->avctx_mjpeg->width = s->width;
+ s->avctx_mjpeg->height = s->height;
- return avpkt->size;
+ return dng_decode_jpeg(avctx, frame, s->stripsize, 0, 0, s->width, s->height);
}
static int init_image(TiffContext *s, ThreadFrame *frame)
return AVERROR_PATCHWELCOME;
}
break;
+ case 10101:
case 10121:
- switch (AV_RL32(s->pattern)) {
- case 0x02010100:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_RGGB16LE : AV_PIX_FMT_BAYER_RGGB16BE;
- break;
- case 0x00010102:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_BGGR16LE : AV_PIX_FMT_BAYER_BGGR16BE;
- break;
- case 0x01000201:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GBRG16LE : AV_PIX_FMT_BAYER_GBRG16BE;
- break;
- case 0x01020001:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GRBG16LE : AV_PIX_FMT_BAYER_GRBG16BE;
- break;
- default:
- av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
- AV_RL32(s->pattern));
- return AVERROR_PATCHWELCOME;
- }
- /* Force endianness as mentioned in 'DNG Specification: Chapter 3: BitsPerSample'
- NOTE: The spec actually specifies big-endian, not sure why we need little-endian, but
- such images don't work otherwise. Examples are images produced by Zenmuse X7. */
- if ((s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG)
- && (s->bpp != 8 && s->bpp != 16 && s->bpp != 32)) {
- switch (s->avctx->pix_fmt) {
- case AV_PIX_FMT_BAYER_RGGB16BE: s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB16LE; break;
- case AV_PIX_FMT_BAYER_BGGR16BE: s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR16LE; break;
- case AV_PIX_FMT_BAYER_GBRG16BE: s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG16LE; break;
- case AV_PIX_FMT_BAYER_GRBG16BE: s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG16LE; break;
- }
- }
- break;
+ case 10141:
case 10161:
switch (AV_RL32(s->pattern)) {
case 0x02010100:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_RGGB16LE : AV_PIX_FMT_BAYER_RGGB16BE;
+ s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB16;
break;
case 0x00010102:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_BGGR16LE : AV_PIX_FMT_BAYER_BGGR16BE;
+ s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR16;
break;
case 0x01000201:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GBRG16LE : AV_PIX_FMT_BAYER_GBRG16BE;
+ s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG16;
break;
case 0x01020001:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GRBG16LE : AV_PIX_FMT_BAYER_GRBG16BE;
+ s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG16;
break;
default:
av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
case TIFF_RATIONAL:
value = ff_tget(&s->gb, TIFF_LONG, s->le);
value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
+ if (!value2) {
+ av_log(s->avctx, AV_LOG_ERROR, "Invalid denominator in rational\n");
+ return AVERROR_INVALIDDATA;
+ }
+
break;
case TIFF_STRING:
if (count <= 4) {
else if (count > 1)
s->sub_ifd = ff_tget(&s->gb, TIFF_LONG, s->le); /** Only get the first SubIFD */
break;
- case DNG_LINEARIZATION_TABLE: {
- uint32_t lut_offset = value;
- uint32_t lut_size = count;
- uint32_t lut_wanted_size = 1 << s->bpp;
- if (lut_wanted_size != lut_size)
- av_log(s->avctx, AV_LOG_WARNING, "DNG contains LUT with invalid size (%"PRIu32"), disabling LUT\n", lut_size);
- else if (lut_offset >= bytestream2_size(&s->gb))
- av_log(s->avctx, AV_LOG_WARNING, "DNG contains LUT with invalid offset (%"PRIu32"), disabling LUT\n", lut_offset);
- else
- s->dng_lut = (uint16_t*)(s->gb.buffer + lut_offset);
+ case DNG_LINEARIZATION_TABLE:
+ for (int i = 0; i < FFMIN(count, 1 << s->bpp); i++)
+ s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
break;
- }
case DNG_BLACK_LEVEL:
if (count > 1) { /* Use the first value in the pattern (assume they're all the same) */
if (type == TIFF_RATIONAL) {
value = ff_tget(&s->gb, TIFF_LONG, s->le);
value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
+ if (!value2) {
+ av_log(s->avctx, AV_LOG_ERROR, "Invalid black level denominator\n");
+ return AVERROR_INVALIDDATA;
+ }
s->black_level = value / value2;
} else
case TIFF_PHOTOMETRIC_SEPARATED:
case TIFF_PHOTOMETRIC_YCBCR:
case TIFF_PHOTOMETRIC_CFA:
+ case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
s->photometric = value;
break;
case TIFF_PHOTOMETRIC_ALPHA_MASK:
case TIFF_PHOTOMETRIC_ITU_LAB:
case TIFF_PHOTOMETRIC_LOG_L:
case TIFF_PHOTOMETRIC_LOG_LUV:
- case TIFF_PHOTOMETRIC_LINEAR_RAW:
avpriv_report_missing_feature(s->avctx,
"PhotometricInterpretation 0x%04X",
value);
GetByteContext stripsizes;
GetByteContext stripdata;
int retry_for_subifd, retry_for_page;
+ int is_dng;
bytestream2_init(&s->gb, avpkt->data, avpkt->size);
s->is_tiled = 0;
s->is_jpeg = 0;
s->cur_page = 0;
- s->dng_lut = NULL;
+
+ for (i = 0; i < 65536; i++)
+ s->dng_lut[i] = i;
+
free_geotags(s);
// Reset these offsets so we can tell if they were set this frame
goto again;
}
+ /* At this point we've decided on which (Sub)IFD to process */
+
+ is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
+
for (i = 0; i<s->geotag_count; i++) {
const char *keyname = get_geokey_name(s->geotags[i].key);
if (!keyname) {
}
}
+ if (is_dng) {
+ if (s->white_level == 0)
+ s->white_level = (1 << s->bpp) - 1; /* Default value as per the spec */
+
+ if (s->white_level <= s->black_level) {
+ av_log(avctx, AV_LOG_ERROR, "BlackLevel (%"PRId32") must be less than WhiteLevel (%"PRId32")\n",
+ s->black_level, s->white_level);
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
if (!s->is_tiled && !s->strippos && !s->stripoff) {
av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
return AVERROR_INVALIDDATA;
}
+
/* now we have the data and may start decoding */
if ((ret = init_image(s, &frame)) < 0)
return ret;
}
}
+ if (s->photometric == TIFF_PHOTOMETRIC_LINEAR_RAW ||
+ s->photometric == TIFF_PHOTOMETRIC_CFA) {
+ p->color_trc = AVCOL_TRC_LINEAR;
+ } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
+ p->color_trc = AVCOL_TRC_GAMMA22;
+ }
+
/* Handle DNG images with JPEG-compressed tiles */
- if ((s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG) && s->is_tiled) {
+ if (is_dng && s->is_tiled) {
if (!s->is_jpeg) {
avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
return AVERROR_PATCHWELCOME;
avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
return AVERROR_PATCHWELCOME;
} else {
- if ((ret = dng_decode(avctx, (AVFrame*)data, avpkt)) > 0)
+ if ((ret = dng_decode_tiles(avctx, (AVFrame*)data, avpkt)) > 0)
*got_frame = 1;
return ret;
}
FFSWAP(int, p->linesize[0], p->linesize[1]);
}
- if (s->is_bayer && s->white_level && s->bpp == 16 &&
- !(s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG)) {
+ if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
uint16_t *dst = (uint16_t *)p->data[0];
for (i = 0; i < s->height; i++) {
for (j = 0; j < s->width; j++)
s->avctx_mjpeg->idct_algo = avctx->idct_algo;
ret = ff_codec_open2_recursive(s->avctx_mjpeg, codec, NULL);
if (ret < 0) {
- av_frame_free(&s->jpgframe);
- avcodec_free_context(&s->avctx_mjpeg);
return ret;
}
.decode = decode_frame,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(tiff_init),
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
+ .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.priv_class = &tiff_decoder_class,
};