typedef struct TiffEncoderContext {
AVClass *class; ///< for private options
AVCodecContext *avctx;
- AVFrame picture;
int width; ///< picture width
int height; ///< picture height
unsigned int bpp; ///< bits per pixel
int compr; ///< compression level
int bpp_tab_size; ///< bpp_tab size
- int photometric_interpretation; ///< photometric interpretation
+ enum TiffPhotometric photometric_interpretation; ///< photometric interpretation
int strips; ///< number of strips
int rps; ///< row per strip
uint8_t entries[TIFF_MAX_ENTRY * 12]; ///< entries in header
* @param count The number of values
* @param ptr_val Pointer to values
*/
-static void add_entry(TiffEncoderContext *s, enum TiffTags tag,
- enum TiffTypes type, int count, const void *ptr_val)
+static int add_entry(TiffEncoderContext *s, enum TiffTags tag,
+ enum TiffTypes type, int count, const void *ptr_val)
{
uint8_t *entries_ptr = s->entries + 12 * s->num_entries;
tnput(&entries_ptr, count, ptr_val, type, 0);
} else {
bytestream_put_le32(&entries_ptr, *s->buf - s->buf_start);
- check_size(s, count * type_sizes2[type]);
+ if (check_size(s, count * type_sizes2[type]))
+ return AVERROR_INVALIDDATA;
tnput(s->buf, count, ptr_val, type, 0);
}
s->num_entries++;
+ return 0;
}
-static void add_entry1(TiffEncoderContext *s,
- enum TiffTags tag, enum TiffTypes type, int val)
+static int add_entry1(TiffEncoderContext *s,
+ enum TiffTags tag, enum TiffTypes type, int val)
{
uint16_t w = val;
uint32_t dw = val;
- add_entry(s, tag, type, 1, type == TIFF_SHORT ? (void *)&w : (void *)&dw);
+ return add_entry(s, tag, type, 1,
+ type == TIFF_SHORT ? (void *)&w : (void *)&dw);
}
/**
}
}
-static void pack_yuv(TiffEncoderContext *s, uint8_t *dst, int lnum)
+static void pack_yuv(TiffEncoderContext *s, const AVFrame *p,
+ uint8_t *dst, int lnum)
{
- AVFrame *p = &s->picture;
int i, j, k;
int w = (s->width - 1) / s->subsampling[0] + 1;
uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
}
}
+#define ADD_ENTRY(s, tag, type, count, ptr_val) \
+ do { \
+ ret = add_entry(s, tag, type, count, ptr_val); \
+ if (ret < 0) \
+ goto fail; \
+ } while(0);
+
+#define ADD_ENTRY1(s, tag, type, val) \
+ do { \
+ ret = add_entry1(s, tag, type, val); \
+ if (ret < 0) \
+ goto fail; \
+ } while(0);
+
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
TiffEncoderContext *s = avctx->priv_data;
- AVFrame *const p = &s->picture;
+ const AVFrame *const p = pict;
int i;
uint8_t *ptr;
uint8_t *offset;
int bytes_per_row;
uint32_t res[2] = { 72, 1 }; // image resolution (72/1)
uint16_t bpp_tab[] = { 8, 8, 8, 8 };
- int ret;
+ int ret = 0;
int is_yuv = 0;
uint8_t *yuv_line = NULL;
int shift_h, shift_v;
+ int packet_size;
const AVPixFmtDescriptor *pfd;
s->avctx = avctx;
- *p = *pict;
- p->pict_type = AV_PICTURE_TYPE_I;
- p->key_frame = 1;
- avctx->coded_frame = &s->picture;
-
s->width = avctx->width;
s->height = avctx->height;
s->subsampling[0] = 1;
s->subsampling[1] = 1;
switch (avctx->pix_fmt) {
+ case AV_PIX_FMT_RGBA64LE:
case AV_PIX_FMT_RGB48LE:
case AV_PIX_FMT_GRAY16LE:
+ case AV_PIX_FMT_RGBA:
case AV_PIX_FMT_RGB24:
case AV_PIX_FMT_GRAY8:
case AV_PIX_FMT_PAL8:
pfd = av_pix_fmt_desc_get(avctx->pix_fmt);
s->bpp = av_get_bits_per_pixel(pfd);
if (pfd->flags & AV_PIX_FMT_FLAG_PAL)
- s->photometric_interpretation = 3;
+ s->photometric_interpretation = TIFF_PHOTOMETRIC_PALETTE;
else if (pfd->flags & AV_PIX_FMT_FLAG_RGB)
- s->photometric_interpretation = 2;
+ s->photometric_interpretation = TIFF_PHOTOMETRIC_RGB;
else
- s->photometric_interpretation = 1;
+ s->photometric_interpretation = TIFF_PHOTOMETRIC_BLACK_IS_ZERO;
s->bpp_tab_size = pfd->nb_components;
for (i = 0; i < s->bpp_tab_size; i++)
bpp_tab[i] = s->bpp / s->bpp_tab_size;
break;
case AV_PIX_FMT_MONOBLACK:
s->bpp = 1;
- s->photometric_interpretation = 1;
+ s->photometric_interpretation = TIFF_PHOTOMETRIC_BLACK_IS_ZERO;
s->bpp_tab_size = 0;
break;
case AV_PIX_FMT_MONOWHITE:
s->bpp = 1;
- s->photometric_interpretation = 0;
+ s->photometric_interpretation = TIFF_PHOTOMETRIC_WHITE_IS_ZERO;
s->bpp_tab_size = 0;
break;
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUV410P:
case AV_PIX_FMT_YUV411P:
av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &shift_h, &shift_v);
- s->photometric_interpretation = 6;
+ s->photometric_interpretation = TIFF_PHOTOMETRIC_YCBCR;
s->bpp = 8 + (16 >> (shift_h + shift_v));
s->subsampling[0] = 1 << shift_h;
s->subsampling[1] = 1 << shift_v;
strips = (s->height - 1) / s->rps + 1;
+ packet_size = avctx->height * ((avctx->width * s->bpp + 7) >> 3) * 2 +
+ avctx->height * 4 + FF_MIN_BUFFER_SIZE;
+
if (!pkt->data &&
- (ret = av_new_packet(pkt,
- avctx->width * avctx->height * s->bpp * 2 +
- avctx->height * 4 + FF_MIN_BUFFER_SIZE)) < 0) {
+ (ret = av_new_packet(pkt, packet_size)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
return ret;
}
s->buf = &ptr;
s->buf_size = pkt->size;
- if (check_size(s, 8))
+ if (check_size(s, 8)) {
+ ret = AVERROR(EINVAL);
goto fail;
+ }
// write header
bytestream_put_le16(&ptr, 0x4949);
offset = ptr;
bytestream_put_le32(&ptr, 0);
- strip_sizes = av_mallocz(sizeof(*strip_sizes) * strips);
- strip_offsets = av_mallocz(sizeof(*strip_offsets) * strips);
+ strip_sizes = av_mallocz_array(strips, sizeof(*strip_sizes));
+ strip_offsets = av_mallocz_array(strips, sizeof(*strip_offsets));
if (!strip_sizes || !strip_offsets) {
ret = AVERROR(ENOMEM);
goto fail;
s->subsampling[0] * s->subsampling[1] + 7) >> 3;
if (is_yuv) {
yuv_line = av_malloc(bytes_per_row);
- if (yuv_line == NULL) {
+ if (!yuv_line) {
av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
ret = AVERROR(ENOMEM);
goto fail;
zn = 0;
for (j = 0; j < s->rps; j++) {
if (is_yuv) {
- pack_yuv(s, yuv_line, j);
+ pack_yuv(s, p, yuv_line, j);
memcpy(zbuf + zn, yuv_line, bytes_per_row);
j += s->subsampling[1] - 1;
} else
strip_offsets[i / s->rps] = ptr - pkt->data;
}
if (is_yuv) {
- pack_yuv(s, yuv_line, i);
+ pack_yuv(s, p, yuv_line, i);
ret = encode_strip(s, yuv_line, ptr, bytes_per_row, s->compr);
i += s->subsampling[1] - 1;
} else
s->num_entries = 0;
- add_entry1(s, TIFF_SUBFILE, TIFF_LONG, 0);
- add_entry1(s, TIFF_WIDTH, TIFF_LONG, s->width);
- add_entry1(s, TIFF_HEIGHT, TIFF_LONG, s->height);
+ ADD_ENTRY1(s, TIFF_SUBFILE, TIFF_LONG, 0);
+ ADD_ENTRY1(s, TIFF_WIDTH, TIFF_LONG, s->width);
+ ADD_ENTRY1(s, TIFF_HEIGHT, TIFF_LONG, s->height);
if (s->bpp_tab_size)
- add_entry(s, TIFF_BPP, TIFF_SHORT, s->bpp_tab_size, bpp_tab);
+ ADD_ENTRY(s, TIFF_BPP, TIFF_SHORT, s->bpp_tab_size, bpp_tab);
- add_entry1(s, TIFF_COMPR, TIFF_SHORT, s->compr);
- add_entry1(s, TIFF_INVERT, TIFF_SHORT, s->photometric_interpretation);
- add_entry(s, TIFF_STRIP_OFFS, TIFF_LONG, strips, strip_offsets);
+ ADD_ENTRY1(s, TIFF_COMPR, TIFF_SHORT, s->compr);
+ ADD_ENTRY1(s, TIFF_PHOTOMETRIC, TIFF_SHORT, s->photometric_interpretation);
+ ADD_ENTRY(s, TIFF_STRIP_OFFS, TIFF_LONG, strips, strip_offsets);
if (s->bpp_tab_size)
- add_entry1(s, TIFF_SAMPLES_PER_PIXEL, TIFF_SHORT, s->bpp_tab_size);
+ ADD_ENTRY1(s, TIFF_SAMPLES_PER_PIXEL, TIFF_SHORT, s->bpp_tab_size);
- add_entry1(s, TIFF_ROWSPERSTRIP, TIFF_LONG, s->rps);
- add_entry(s, TIFF_STRIP_SIZE, TIFF_LONG, strips, strip_sizes);
- add_entry(s, TIFF_XRES, TIFF_RATIONAL, 1, res);
- add_entry(s, TIFF_YRES, TIFF_RATIONAL, 1, res);
- add_entry1(s, TIFF_RES_UNIT, TIFF_SHORT, 2);
+ ADD_ENTRY1(s, TIFF_ROWSPERSTRIP, TIFF_LONG, s->rps);
+ ADD_ENTRY(s, TIFF_STRIP_SIZE, TIFF_LONG, strips, strip_sizes);
+ ADD_ENTRY(s, TIFF_XRES, TIFF_RATIONAL, 1, res);
+ ADD_ENTRY(s, TIFF_YRES, TIFF_RATIONAL, 1, res);
+ ADD_ENTRY1(s, TIFF_RES_UNIT, TIFF_SHORT, 2);
if (!(avctx->flags & CODEC_FLAG_BITEXACT))
- add_entry(s, TIFF_SOFTWARE_NAME, TIFF_STRING,
+ ADD_ENTRY(s, TIFF_SOFTWARE_NAME, TIFF_STRING,
strlen(LIBAVCODEC_IDENT) + 1, LIBAVCODEC_IDENT);
if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
pal[i + 256] = ((rgb >> 8) & 0xff) * 257;
pal[i + 512] = (rgb & 0xff) * 257;
}
- add_entry(s, TIFF_PAL, TIFF_SHORT, 256 * 3, pal);
+ ADD_ENTRY(s, TIFF_PAL, TIFF_SHORT, 256 * 3, pal);
}
if (is_yuv) {
/** according to CCIR Recommendation 601.1 */
uint32_t refbw[12] = { 15, 1, 235, 1, 128, 1, 240, 1, 128, 1, 240, 1 };
- add_entry(s, TIFF_YCBCR_SUBSAMPLING, TIFF_SHORT, 2, s->subsampling);
- add_entry(s, TIFF_REFERENCE_BW, TIFF_RATIONAL, 6, refbw);
+ ADD_ENTRY(s, TIFF_YCBCR_SUBSAMPLING, TIFF_SHORT, 2, s->subsampling);
+ ADD_ENTRY(s, TIFF_REFERENCE_BW, TIFF_RATIONAL, 6, refbw);
}
// write offset to dir
bytestream_put_le32(&offset, ptr - pkt->data);
return ret;
}
+static av_cold int encode_init(AVCodecContext *avctx)
+{
+ avctx->coded_frame = av_frame_alloc();
+ if (!avctx->coded_frame)
+ return AVERROR(ENOMEM);
+
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
+ avctx->coded_frame->key_frame = 1;
+
+ return 0;
+}
+
+static av_cold int encode_close(AVCodecContext *avctx)
+{
+ av_frame_free(&avctx->coded_frame);
+ return 0;
+}
+
#define OFFSET(x) offsetof(TiffEncoderContext, x)
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_TIFF,
.priv_data_size = sizeof(TiffEncoderContext),
+ .init = encode_init,
+ .close = encode_close,
.encode2 = encode_frame,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB48LE, AV_PIX_FMT_PAL8,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_RGBA64LE,
AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16LE,
AV_PIX_FMT_MONOBLACK, AV_PIX_FMT_MONOWHITE,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,