uint8_t *stats;
int stats_size;
int stats_offset;
+ int uv_hshift;
+ int uv_vshift;
+ int keyframe_mask;
} TheoraContext;
/*!
static int get_stats(AVCodecContext *avctx, int eos)
{
+#ifdef TH_ENCCTL_2PASS_OUT
TheoraContext *h = avctx->priv_data;
uint8_t *buf;
int bytes;
av_base64_encode(avctx->stats_out, b64_size, h->stats, h->stats_offset);
}
return 0;
+#else
+ av_log(avctx, AV_LOG_ERROR, "libtheora too old to support 2pass\n");
+ return -1;
+#endif
}
// libtheora won't read the entire buffer we give it at once, so we have to
// repeatedly submit it...
static int submit_stats(AVCodecContext *avctx)
{
+#ifdef TH_ENCCTL_2PASS_IN
TheoraContext *h = avctx->priv_data;
int bytes;
if (!h->stats) {
h->stats_offset += bytes;
}
return 0;
+#else
+ av_log(avctx, AV_LOG_ERROR, "libtheora too old to support 2pass\n");
+ return -1;
+#endif
}
static av_cold int encode_init(AVCodecContext* avc_context)
t_info.aspect_numerator = 1;
t_info.aspect_denominator = 1;
}
- t_info.colorspace = TH_CS_UNSPECIFIED;
- t_info.pixel_fmt = TH_PF_420;
+
+ if (avc_context->color_primaries == AVCOL_PRI_BT470M)
+ t_info.colorspace = TH_CS_ITU_REC_470M;
+ else if (avc_context->color_primaries == AVCOL_PRI_BT470BG)
+ t_info.colorspace = TH_CS_ITU_REC_470BG;
+ else
+ t_info.colorspace = TH_CS_UNSPECIFIED;
+
+ if (avc_context->pix_fmt == PIX_FMT_YUV420P)
+ t_info.pixel_fmt = TH_PF_420;
+ else if (avc_context->pix_fmt == PIX_FMT_YUV422P)
+ t_info.pixel_fmt = TH_PF_422;
+ else if (avc_context->pix_fmt == PIX_FMT_YUV444P)
+ t_info.pixel_fmt = TH_PF_444;
+ else {
+ av_log(avc_context, AV_LOG_ERROR, "Unsupported pix_fmt\n");
+ return -1;
+ }
+ avcodec_get_chroma_sub_sample(avc_context->pix_fmt, &h->uv_hshift, &h->uv_vshift);
if (avc_context->flags & CODEC_FLAG_QSCALE) {
/* to be constant with the libvorbis implementation, clip global_quality to 0 - 10
return -1;
}
+ h->keyframe_mask = (1 << t_info.keyframe_granule_shift) - 1;
/* Clear up theora_info struct */
th_info_clear(&t_info);
ogg_packet o_packet;
int result, i;
- assert(avc_context->pix_fmt == PIX_FMT_YUV420P);
-
// EOS, finish and get 1st pass stats if applicable
if (!frame) {
th_encode_packetout(h->t_state, 1, &o_packet);
/* Copy planes to the theora yuv_buffer */
for (i = 0; i < 3; i++) {
- t_yuv_buffer[i].width = FFALIGN(avc_context->width, 16) >> !!i;
- t_yuv_buffer[i].height = FFALIGN(avc_context->height, 16) >> !!i;
+ t_yuv_buffer[i].width = FFALIGN(avc_context->width, 16) >> (i && h->uv_hshift);
+ t_yuv_buffer[i].height = FFALIGN(avc_context->height, 16) >> (i && h->uv_vshift);
t_yuv_buffer[i].stride = frame->linesize[i];
t_yuv_buffer[i].data = frame->data[i];
}
}
memcpy(outbuf, o_packet.packet, o_packet.bytes);
- // HACK: does not take codec delay into account (neither does the decoder though)
+ // HACK: assumes no encoder delay, this is true until libtheora becomes
+ // multithreaded (which will be disabled unless explictly requested)
avc_context->coded_frame->pts = frame->pts;
+ avc_context->coded_frame->key_frame = !(o_packet.granulepos & h->keyframe_mask);
return o_packet.bytes;
}
return 0;
}
-static const enum PixelFormat supported_pixel_formats[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
-
/*! AVCodec struct exposed to libavcodec */
AVCodec libtheora_encoder = {
.name = "libtheora",
- .type = CODEC_TYPE_VIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_THEORA,
.priv_data_size = sizeof(TheoraContext),
.init = encode_init,
.close = encode_close,
.encode = encode_frame,
.capabilities = CODEC_CAP_DELAY, // needed to get the statsfile summary
- .pix_fmts = supported_pixel_formats,
+ .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("libtheora Theora"),
};