X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Ftiff.c;h=112f5b52f472b236f41f2dd03b13de4ed6a7a9c9;hb=ebdc5c419aef0d9eed8c1ec57b30238194c1db0a;hp=cfe0c2771471de05433f03b5facbd0da2467b070;hpb=27e30c73d722ec13e59753dea91be00859c72bf2;p=ffmpeg diff --git a/libavcodec/tiff.c b/libavcodec/tiff.c index cfe0c277147..112f5b52f47 100644 --- a/libavcodec/tiff.c +++ b/libavcodec/tiff.c @@ -37,6 +37,7 @@ #include "libavutil/avstring.h" #include "libavutil/intreadwrite.h" #include "libavutil/imgutils.h" +#include "libavutil/opt.h" #include "avcodec.h" #include "bytestream.h" #include "faxcompr.h" @@ -46,11 +47,15 @@ #include "tiff.h" #include "tiff_data.h" #include "thread.h" +#include "get_bits.h" typedef struct TiffContext { + AVClass *class; AVCodecContext *avctx; GetByteContext gb; + int get_subimage; + int width, height; unsigned int bpp, bppcount; uint32_t palette[256]; @@ -65,6 +70,12 @@ typedef struct TiffContext { int fill_order; uint32_t res[4]; + int is_bayer; + uint8_t pattern[4]; + unsigned white_level; + + uint32_t sub_ifd; + int strips, rps, sstype; int sot; int stripsizesoff, stripsize, stripoff, strippos; @@ -236,7 +247,8 @@ static int add_metadata(int count, int type, }; } -static void av_always_inline horizontal_fill(unsigned int bpp, uint8_t* dst, +static void av_always_inline horizontal_fill(TiffContext *s, + unsigned int bpp, uint8_t* dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset) { @@ -267,6 +279,15 @@ static void av_always_inline horizontal_fill(unsigned int bpp, uint8_t* dst, dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4; } break; + case 12: { + uint16_t *dst16 = (uint16_t *)dst; + GetBitContext gb; + init_get_bits8(&gb, src, width); + for (int i = 0; i < s->width; i++) { + dst16[i] = get_bits(&gb, 12) << 4; + } + } + break; default: if (usePtr) { memcpy(dst + offset, src, width); @@ -289,6 +310,19 @@ static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size) return 0; } +static void unpack_gray(TiffContext *s, AVFrame *p, + const uint8_t *src, int lnum, int width, int bpp) +{ + GetBitContext gb; + uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]); + + init_get_bits8(&gb, src, width); + + for (int i = 0; i < s->width; i++) { + dst[i] = get_bits(&gb, bpp); + } +} + static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum) { @@ -368,7 +402,7 @@ static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride src = zbuf; for (line = 0; line < lines; line++) { if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) { - horizontal_fill(s->bpp, dst, 1, src, 0, width, 0); + horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0); } else { memcpy(dst, src, width); } @@ -433,7 +467,7 @@ static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride src = buf; for (line = 0; line < lines; line++) { if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) { - horizontal_fill(s->bpp, dst, 1, src, 0, width, 0); + horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0); } else { memcpy(dst, src, width); } @@ -476,7 +510,7 @@ static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, s->compr, s->fax_opts); if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) for (line = 0; line < lines; line++) { - horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0); + horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0); dst += stride; } return ret; @@ -516,6 +550,18 @@ static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int strid av_assert0(width <= bytes_per_row); av_assert0(s->bpp == 24); } + if (s->is_bayer) { + width = (s->bpp * s->width + 7) >> 3; + } + if (p->format == AV_PIX_FMT_GRAY12) { + av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, width); + if (s->yuv_line == NULL) { + av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n"); + return AVERROR(ENOMEM); + } + dst = s->yuv_line; + stride = 0; + } if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) { #if CONFIG_ZLIB @@ -559,10 +605,12 @@ static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int strid return AVERROR_INVALIDDATA; } if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) - horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0); + horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0); if (is_yuv) { unpack_yuv(s, p, dst, strip_start + line); line += s->subsampling[1] - 1; + } else if (p->format == AV_PIX_FMT_GRAY12) { + unpack_gray(s, p, dst, strip_start + line, width, s->bpp); } dst += stride; } @@ -571,7 +619,7 @@ static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int strid if (s->compr == TIFF_CCITT_RLE || s->compr == TIFF_G3 || s->compr == TIFF_G4) { - if (is_yuv) + if (is_yuv || p->format == AV_PIX_FMT_GRAY12) return AVERROR_INVALIDDATA; return tiff_unpack_fax(s, dst, stride, src, size, width, lines); @@ -595,7 +643,7 @@ static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int strid return AVERROR_INVALIDDATA; if (!s->fill_order) { - horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8), + horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer), dst, 1, src, 0, width, 0); } else { int i; @@ -619,7 +667,7 @@ static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int strid "Copy went out of bounds\n"); return AVERROR_INVALIDDATA; } - horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8), + horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8), dst, 1, src, 0, code, pixels); src += code; pixels += code; @@ -631,7 +679,7 @@ static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int strid return AVERROR_INVALIDDATA; } c = *src++; - horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8), + horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8), dst, 0, NULL, c, code, pixels); pixels += code; } @@ -646,6 +694,8 @@ static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int strid if (is_yuv) { unpack_yuv(s, p, dst, strip_start + line); line += s->subsampling[1] - 1; + } else if (p->format == AV_PIX_FMT_GRAY12) { + unpack_gray(s, p, dst, strip_start + line, width, s->bpp); } dst += stride; } @@ -665,7 +715,7 @@ static int init_image(TiffContext *s, ThreadFrame *frame) return AVERROR_INVALIDDATA; } - switch (s->planar * 1000 + s->bpp * 10 + s->bppcount) { + switch (s->planar * 1000 + s->bpp * 10 + s->bppcount + s->is_bayer * 10000) { case 11: if (!s->palette_is_set) { s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK; @@ -681,6 +731,69 @@ static int init_image(TiffContext *s, ThreadFrame *frame) case 81: s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8; break; + case 121: + s->avctx->pix_fmt = AV_PIX_FMT_GRAY12; + break; + case 10081: + switch (AV_RL32(s->pattern)) { + case 0x02010100: + s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB8; + break; + case 0x00010102: + s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR8; + break; + case 0x01000201: + s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG8; + break; + case 0x01020001: + s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG8; + break; + default: + av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n", + AV_RL32(s->pattern)); + return AVERROR_PATCHWELCOME; + } + break; + case 10121: + switch (AV_RL32(s->pattern)) { + case 0x02010100: + s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_RGGB16LE : AV_PIX_FMT_BAYER_RGGB16BE; + break; + case 0x00010102: + s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_BGGR16LE : AV_PIX_FMT_BAYER_BGGR16BE; + break; + case 0x01000201: + s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GBRG16LE : AV_PIX_FMT_BAYER_GBRG16BE; + break; + case 0x01020001: + s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GRBG16LE : AV_PIX_FMT_BAYER_GRBG16BE; + break; + default: + av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n", + AV_RL32(s->pattern)); + return AVERROR_PATCHWELCOME; + } + break; + case 10161: + switch (AV_RL32(s->pattern)) { + case 0x02010100: + s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_RGGB16LE : AV_PIX_FMT_BAYER_RGGB16BE; + break; + case 0x00010102: + s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_BGGR16LE : AV_PIX_FMT_BAYER_BGGR16BE; + break; + case 0x01000201: + s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GBRG16LE : AV_PIX_FMT_BAYER_GBRG16BE; + break; + case 0x01020001: + s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GRBG16LE : AV_PIX_FMT_BAYER_GRBG16BE; + break; + default: + av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n", + AV_RL32(s->pattern)); + return AVERROR_PATCHWELCOME; + } + break; case 243: if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) { if (s->subsampling[0] == 1 && s->subsampling[1] == 1) { @@ -712,7 +825,7 @@ static int init_image(TiffContext *s, ThreadFrame *frame) s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE; break; case 324: - s->avctx->pix_fmt = AV_PIX_FMT_RGBA; + s->avctx->pix_fmt = s->photometric == TIFF_PHOTOMETRIC_SEPARATED ? AV_PIX_FMT_RGB0 : AV_PIX_FMT_RGBA; break; case 483: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE; @@ -961,21 +1074,41 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame) case TIFF_PREDICTOR: s->predictor = value; break; + case TIFF_SUB_IFDS: + s->sub_ifd = value; + break; + case TIFF_WHITE_LEVEL: + s->white_level = value; + break; + case TIFF_CFA_PATTERN_DIM: + if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 && + ff_tget(&s->gb, type, s->le) != 2)) { + av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n"); + return AVERROR_INVALIDDATA; + } + break; + case TIFF_CFA_PATTERN: + s->is_bayer = 1; + s->pattern[0] = ff_tget(&s->gb, type, s->le); + s->pattern[1] = ff_tget(&s->gb, type, s->le); + s->pattern[2] = ff_tget(&s->gb, type, s->le); + s->pattern[3] = ff_tget(&s->gb, type, s->le); + break; case TIFF_PHOTOMETRIC: switch (value) { case TIFF_PHOTOMETRIC_WHITE_IS_ZERO: case TIFF_PHOTOMETRIC_BLACK_IS_ZERO: case TIFF_PHOTOMETRIC_RGB: case TIFF_PHOTOMETRIC_PALETTE: + case TIFF_PHOTOMETRIC_SEPARATED: case TIFF_PHOTOMETRIC_YCBCR: + case TIFF_PHOTOMETRIC_CFA: s->photometric = value; break; case TIFF_PHOTOMETRIC_ALPHA_MASK: - case TIFF_PHOTOMETRIC_SEPARATED: case TIFF_PHOTOMETRIC_CIE_LAB: case TIFF_PHOTOMETRIC_ICC_LAB: case TIFF_PHOTOMETRIC_ITU_LAB: - case TIFF_PHOTOMETRIC_CFA: case TIFF_PHOTOMETRIC_LOG_L: case TIFF_PHOTOMETRIC_LOG_LUV: case TIFF_PHOTOMETRIC_LINEAR_RAW: @@ -1192,7 +1325,7 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame) default: if (s->avctx->err_recognition & AV_EF_EXPLODE) { av_log(s->avctx, AV_LOG_ERROR, - "Unknown or unsupported tag %d/0X%0X\n", + "Unknown or unsupported tag %d/0x%0X\n", tag, tag); return AVERROR_INVALIDDATA; } @@ -1235,10 +1368,13 @@ static int decode_frame(AVCodecContext *avctx, } s->le = le; // TIFF_BPP is not a required tag and defaults to 1 +again: s->bppcount = s->bpp = 1; s->photometric = TIFF_PHOTOMETRIC_NONE; s->compr = TIFF_RAW; s->fill_order = 0; + s->white_level = 0; + s->is_bayer = 0; free_geotags(s); // Reset these offsets so we can tell if they were set this frame @@ -1253,6 +1389,16 @@ static int decode_frame(AVCodecContext *avctx, return ret; } + if (s->sub_ifd && s->get_subimage) { + off = s->sub_ifd; + if (off >= UINT_MAX - 14 || avpkt->size < off + 14) { + av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n"); + return AVERROR_INVALIDDATA; + } + s->sub_ifd = 0; + goto again; + } + for (i = 0; igeotag_count; i++) { const char *keyname = get_geokey_name(s->geotags[i].key); if (!keyname) { @@ -1303,6 +1449,8 @@ static int decode_frame(AVCodecContext *avctx, planes = s->planar ? s->bppcount : 1; for (plane = 0; plane < planes; plane++) { + int remaining = avpkt->size; + int decoded_height; stride = p->linesize[plane]; dst = p->data[plane]; for (i = 0; i < s->height; i += s->rps) { @@ -1318,10 +1466,11 @@ static int decode_frame(AVCodecContext *avctx, else soff = s->stripoff; - if (soff > avpkt->size || ssize > avpkt->size - soff) { + if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) { av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n"); return AVERROR_INVALIDDATA; } + remaining -= ssize; if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i, FFMIN(s->rps, s->height - i))) < 0) { if (avctx->err_recognition & AV_EF_EXPLODE) @@ -1329,6 +1478,8 @@ static int decode_frame(AVCodecContext *avctx, break; } } + decoded_height = FFMIN(i, s->height); + if (s->predictor == 2) { if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) { av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported"); @@ -1345,7 +1496,7 @@ static int decode_frame(AVCodecContext *avctx, s->avctx->pix_fmt == AV_PIX_FMT_YA16LE || s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE || s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) { - for (i = 0; i < s->height; i++) { + for (i = 0; i < decoded_height; i++) { for (j = soff; j < ssize; j += 2) AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff)); dst += stride; @@ -1356,13 +1507,13 @@ static int decode_frame(AVCodecContext *avctx, s->avctx->pix_fmt == AV_PIX_FMT_YA16BE || s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE || s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) { - for (i = 0; i < s->height; i++) { + for (i = 0; i < decoded_height; i++) { for (j = soff; j < ssize; j += 2) AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff)); dst += stride; } } else { - for (i = 0; i < s->height; i++) { + for (i = 0; i < decoded_height; i++) { for (j = soff; j < ssize; j++) dst[j] += dst[j - soff]; dst += stride; @@ -1379,6 +1530,24 @@ static int decode_frame(AVCodecContext *avctx, dst += stride; } } + + if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED && + s->avctx->pix_fmt == AV_PIX_FMT_RGB0) { + dst = p->data[plane]; + for (i = 0; i < s->height; i++) { + for (j = 0; j < s->width; j++) { + int k = 255 - dst[4 * j + 3]; + int r = (255 - dst[4 * j ]) * k; + int g = (255 - dst[4 * j + 1]) * k; + int b = (255 - dst[4 * j + 2]) * k; + dst[4 * j ] = r * 257 >> 16; + dst[4 * j + 1] = g * 257 >> 16; + dst[4 * j + 2] = b * 257 >> 16; + dst[4 * j + 3] = 255; + } + dst += p->linesize[plane]; + } + } } if (s->planar && s->bppcount > 2) { @@ -1388,6 +1557,15 @@ static int decode_frame(AVCodecContext *avctx, FFSWAP(int, p->linesize[0], p->linesize[1]); } + if (s->is_bayer && s->white_level && s->bpp == 16) { + uint16_t *dst = (uint16_t *)p->data[0]; + for (i = 0; i < s->height; i++) { + for (j = 0; j < s->width; j++) + dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535); + dst += stride / 2; + } + } + *got_frame = 1; return avpkt->size; @@ -1419,11 +1597,26 @@ static av_cold int tiff_end(AVCodecContext *avctx) ff_lzw_decode_close(&s->lzw); av_freep(&s->deinvert_buf); s->deinvert_buf_size = 0; + av_freep(&s->yuv_line); + s->yuv_line_size = 0; av_freep(&s->fax_buffer); s->fax_buffer_size = 0; return 0; } +#define OFFSET(x) offsetof(TiffContext, x) +static const AVOption tiff_options[] = { + { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM }, + { NULL }, +}; + +static const AVClass tiff_decoder_class = { + .class_name = "TIFF decoder", + .item_name = av_default_item_name, + .option = tiff_options, + .version = LIBAVUTIL_VERSION_INT, +}; + AVCodec ff_tiff_decoder = { .name = "tiff", .long_name = NULL_IF_CONFIG_SMALL("TIFF image"), @@ -1435,4 +1628,5 @@ AVCodec ff_tiff_decoder = { .decode = decode_frame, .init_thread_copy = ONLY_IF_THREADS_ENABLED(tiff_init), .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS, + .priv_class = &tiff_decoder_class, };