int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
{
- if (avpkt->size < 0) {
- av_log(avctx, AV_LOG_ERROR, "Invalid negative user packet size %d\n", avpkt->size);
- return AVERROR(EINVAL);
- }
if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
return AVERROR(EINVAL);
}
+ av_assert0(!avpkt->data);
+
if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned
- av_assert0(!avpkt->data || avpkt->data != avctx->internal->byte_buffer);
- if (!avpkt->data || avpkt->size < size) {
- av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
- avpkt->data = avctx->internal->byte_buffer;
- avpkt->size = avctx->internal->byte_buffer_size;
- }
+ av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
+ avpkt->data = avctx->internal->byte_buffer;
+ avpkt->size = size;
}
- if (avpkt->data) {
- AVBufferRef *buf = avpkt->buf;
-
- if (avpkt->size < size) {
- av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %"PRId64")\n", avpkt->size, size);
- return AVERROR(EINVAL);
- }
-
- av_init_packet(avpkt);
- avpkt->buf = buf;
- avpkt->size = size;
- return 0;
- } else {
+ if (!avpkt->data) {
int ret = av_new_packet(avpkt, size);
if (ret < 0)
av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
return ret;
}
+
+ return 0;
+}
+
+int avcodec_default_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int flags)
+{
+ int ret;
+
+ if (avpkt->size < 0 || avpkt->size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
+ return AVERROR(EINVAL);
+
+ if (avpkt->data || avpkt->buf) {
+ av_log(avctx, AV_LOG_ERROR, "avpkt->{data,buf} != NULL in avcodec_default_get_encode_buffer()\n");
+ return AVERROR(EINVAL);
+ }
+
+ ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %d\n", avpkt->size);
+ return ret;
+ }
+ avpkt->data = avpkt->buf->data;
+ memset(avpkt->data + avpkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+
+ return 0;
+}
+
+int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
+{
+ int ret;
+
+ if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
+ return AVERROR(EINVAL);
+
+ av_assert0(!avpkt->data && !avpkt->buf);
+
+ avpkt->size = size;
+ ret = avctx->get_encode_buffer(avctx, avpkt, flags);
+ if (ret < 0)
+ goto fail;
+
+ if (!avpkt->data || !avpkt->buf) {
+ av_log(avctx, AV_LOG_ERROR, "No buffer returned by get_encode_buffer()\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ ret = 0;
+fail:
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_encode_buffer() failed\n");
+ av_packet_unref(avpkt);
+ }
+
+ return ret;
}
/**
return ret;
}
-int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
- AVPacket *avpkt,
- const AVFrame *frame,
- int *got_packet_ptr)
-{
- AVFrame *extended_frame = NULL;
- AVFrame *padded_frame = NULL;
- int ret;
- AVPacket user_pkt = *avpkt;
- int needs_realloc = !user_pkt.data;
-
- *got_packet_ptr = 0;
-
- if (!avctx->codec->encode2) {
- av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
- return AVERROR(ENOSYS);
- }
-
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
- av_packet_unref(avpkt);
- return 0;
- }
-
- /* ensure that extended_data is properly set */
- if (frame && !frame->extended_data) {
- if (av_sample_fmt_is_planar(avctx->sample_fmt) &&
- avctx->channels > AV_NUM_DATA_POINTERS) {
- av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, "
- "with more than %d channels, but extended_data is not set.\n",
- AV_NUM_DATA_POINTERS);
- return AVERROR(EINVAL);
- }
- av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
-
- extended_frame = av_frame_alloc();
- if (!extended_frame)
- return AVERROR(ENOMEM);
-
- memcpy(extended_frame, frame, sizeof(AVFrame));
- extended_frame->extended_data = extended_frame->data;
- frame = extended_frame;
- }
-
- /* extract audio service type metadata */
- if (frame) {
- AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
- if (sd && sd->size >= sizeof(enum AVAudioServiceType))
- avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
- }
-
- /* check for valid frame size */
- if (frame) {
- if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
- if (frame->nb_samples > avctx->frame_size) {
- av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n");
- ret = AVERROR(EINVAL);
- goto end;
- }
- } else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
- /* if we already got an undersized frame, that must have been the last */
- if (avctx->internal->last_audio_frame) {
- av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame (avcodec_encode_audio2)\n", avctx->frame_size);
- ret = AVERROR(EINVAL);
- goto end;
- }
-
- if (frame->nb_samples < avctx->frame_size) {
- if (!(padded_frame = av_frame_alloc())) {
- ret = AVERROR(ENOMEM);
- goto end;
- }
- ret = pad_last_frame(avctx, padded_frame, frame);
- if (ret < 0)
- goto end;
-
- frame = padded_frame;
- avctx->internal->last_audio_frame = 1;
- }
-
- if (frame->nb_samples != avctx->frame_size) {
- av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d) (avcodec_encode_audio2)\n", frame->nb_samples, avctx->frame_size);
- ret = AVERROR(EINVAL);
- goto end;
- }
- }
- }
-
- av_assert0(avctx->codec->encode2);
-
- ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
- if (!ret) {
- if (*got_packet_ptr) {
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
- if (avpkt->pts == AV_NOPTS_VALUE)
- avpkt->pts = frame->pts;
- if (!avpkt->duration)
- avpkt->duration = ff_samples_to_time_base(avctx,
- frame->nb_samples);
- }
- avpkt->dts = avpkt->pts;
- } else {
- avpkt->size = 0;
- }
- }
- if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
- needs_realloc = 0;
- if (user_pkt.data) {
- if (user_pkt.size >= avpkt->size) {
- memcpy(user_pkt.data, avpkt->data, avpkt->size);
- } else {
- av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
- avpkt->size = user_pkt.size;
- ret = -1;
- }
- avpkt->buf = user_pkt.buf;
- avpkt->data = user_pkt.data;
- } else if (!avpkt->buf) {
- ret = av_packet_make_refcounted(avpkt);
- if (ret < 0)
- goto end;
- }
- }
-
- if (!ret) {
- if (needs_realloc && avpkt->data) {
- ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
- if (ret >= 0)
- avpkt->data = avpkt->buf->data;
- }
- if (frame)
- avctx->frame_number++;
- }
-
- if (ret < 0 || !*got_packet_ptr) {
- av_packet_unref(avpkt);
- goto end;
- }
-
- /* NOTE: if we add any audio encoders which output non-keyframe packets,
- * this needs to be moved to the encoders, but for now we can do it
- * here to simplify things */
- avpkt->flags |= AV_PKT_FLAG_KEY;
-
-end:
- av_frame_free(&padded_frame);
- av_free(extended_frame);
-
- return ret;
-}
-
-int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
- AVPacket *avpkt,
- const AVFrame *frame,
- int *got_packet_ptr)
-{
- int ret;
- AVPacket user_pkt = *avpkt;
- int needs_realloc = !user_pkt.data;
-
- *got_packet_ptr = 0;
-
- if (!avctx->codec->encode2) {
- av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
- return AVERROR(ENOSYS);
- }
-
- if ((avctx->flags&AV_CODEC_FLAG_PASS1) && avctx->stats_out)
- avctx->stats_out[0] = '\0';
-
- if (!frame &&
- !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
- (avctx->internal->frame_thread_encoder && avctx->active_thread_type & FF_THREAD_FRAME))) {
- av_packet_unref(avpkt);
- return 0;
- }
-
- if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
- return AVERROR(EINVAL);
-
- if (frame && frame->format == AV_PIX_FMT_NONE)
- av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
- if (frame && (frame->width == 0 || frame->height == 0))
- av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
-
- av_assert0(avctx->codec->encode2);
-
-
- if (CONFIG_FRAME_THREAD_ENCODER &&
- avctx->internal->frame_thread_encoder && (avctx->active_thread_type & FF_THREAD_FRAME))
- ret = ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr);
- else {
- ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
- if (*got_packet_ptr && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
- avpkt->pts = avpkt->dts = frame->pts;
- }
- av_assert0(ret <= 0);
-
- emms_c();
-
- if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
- needs_realloc = 0;
- if (user_pkt.data) {
- if (user_pkt.size >= avpkt->size) {
- memcpy(user_pkt.data, avpkt->data, avpkt->size);
- } else {
- av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
- avpkt->size = user_pkt.size;
- ret = -1;
- }
- avpkt->buf = user_pkt.buf;
- avpkt->data = user_pkt.data;
- } else if (!avpkt->buf) {
- ret = av_packet_make_refcounted(avpkt);
- if (ret < 0)
- return ret;
- }
- }
-
- if (!ret) {
- if (!*got_packet_ptr)
- avpkt->size = 0;
-
- if (needs_realloc && avpkt->data) {
- ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
- if (ret >= 0)
- avpkt->data = avpkt->buf->data;
- }
-
- if (frame)
- avctx->frame_number++;
- }
-
- if (ret < 0 || !*got_packet_ptr)
- av_packet_unref(avpkt);
-
- return ret;
-}
-
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
const AVSubtitle *sub)
{
if (CONFIG_FRAME_THREAD_ENCODER &&
avci->frame_thread_encoder && (avctx->active_thread_type & FF_THREAD_FRAME))
+ /* This might modify frame, but it doesn't matter, because
+ * the frame properties used below are not used for video
+ * (due to the delay inherent in frame threaded encoding, it makes
+ * no sense to use the properties of the current frame anyway). */
ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet);
else {
ret = avctx->codec->encode2(avctx, avpkt, frame, &got_packet);
if (avctx->codec->receive_packet) {
ret = avctx->codec->receive_packet(avctx, avpkt);
- if (!ret)
+ if (ret < 0)
+ av_packet_unref(avpkt);
+ else
// Encoders must always return ref-counted buffers.
// Side-data only packets have no data and can be not ref-counted.
av_assert0(!avpkt->data || avpkt->buf);
return 0;
}
+
+#if FF_API_OLD_ENCDEC
+static int compat_encode(AVCodecContext *avctx, AVPacket *avpkt,
+ int *got_packet, const AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+ AVPacket user_pkt;
+ int ret;
+
+ *got_packet = 0;
+
+ if (frame && avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
+ if (frame->format == AV_PIX_FMT_NONE)
+ av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
+ if (frame->width == 0 || frame->height == 0)
+ av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
+ }
+
+ if (avctx->codec->capabilities & AV_CODEC_CAP_DR1) {
+ av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_encode_* API does not support "
+ "AV_CODEC_CAP_DR1 encoders\n");
+ return AVERROR(ENOSYS);
+ }
+
+ ret = avcodec_send_frame(avctx, frame);
+ if (ret == AVERROR_EOF)
+ ret = 0;
+ else if (ret == AVERROR(EAGAIN)) {
+ /* we fully drain all the output in each encode call, so this should not
+ * ever happen */
+ return AVERROR_BUG;
+ } else if (ret < 0)
+ return ret;
+
+ av_packet_move_ref(&user_pkt, avpkt);
+ while (ret >= 0) {
+ ret = avcodec_receive_packet(avctx, avpkt);
+ if (ret < 0) {
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ ret = 0;
+ goto finish;
+ }
+
+ if (avpkt != avci->compat_encode_packet) {
+ if (avpkt->data && user_pkt.data) {
+ if (user_pkt.size >= avpkt->size) {
+ memcpy(user_pkt.data, avpkt->data, avpkt->size);
+ av_buffer_unref(&avpkt->buf);
+ avpkt->buf = user_pkt.buf;
+ avpkt->data = user_pkt.data;
+FF_DISABLE_DEPRECATION_WARNINGS
+ av_init_packet(&user_pkt);
+FF_ENABLE_DEPRECATION_WARNINGS
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
+ av_packet_unref(avpkt);
+ ret = AVERROR(EINVAL);
+ goto finish;
+ }
+ }
+
+ *got_packet = 1;
+ avpkt = avci->compat_encode_packet;
+ } else {
+ if (!avci->compat_decode_warned) {
+ av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_encode_* "
+ "API cannot return all the packets for this encoder. "
+ "Some packets will be dropped. Update your code to the "
+ "new encoding API to fix this.\n");
+ avci->compat_decode_warned = 1;
+ av_packet_unref(avpkt);
+ }
+ }
+
+ if (avci->draining)
+ break;
+ }
+
+finish:
+ if (ret < 0)
+ av_packet_unref(&user_pkt);
+
+ return ret;
+}
+
+int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
+ AVPacket *avpkt,
+ const AVFrame *frame,
+ int *got_packet_ptr)
+{
+ int ret = compat_encode(avctx, avpkt, got_packet_ptr, frame);
+
+ if (ret < 0)
+ av_packet_unref(avpkt);
+
+ return ret;
+}
+
+int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
+ AVPacket *avpkt,
+ const AVFrame *frame,
+ int *got_packet_ptr)
+{
+ int ret = compat_encode(avctx, avpkt, got_packet_ptr, frame);
+
+ if (ret < 0)
+ av_packet_unref(avpkt);
+
+ return ret;
+}
+#endif
+
+int ff_encode_preinit(AVCodecContext *avctx)
+{
+ int i;
+#if FF_API_CODED_FRAME
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->coded_frame = av_frame_alloc();
+ if (!avctx->coded_frame) {
+ return AVERROR(ENOMEM);
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->codec->sample_fmts) {
+ for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) {
+ if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
+ break;
+ if (avctx->channels == 1 &&
+ av_get_planar_sample_fmt(avctx->sample_fmt) ==
+ av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) {
+ avctx->sample_fmt = avctx->codec->sample_fmts[i];
+ break;
+ }
+ }
+ if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
+ char buf[128];
+ snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt);
+ av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n",
+ (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf));
+ return AVERROR(EINVAL);
+ }
+ }
+ if (avctx->codec->pix_fmts) {
+ for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
+ if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
+ break;
+ if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE
+ && !((avctx->codec_id == AV_CODEC_ID_MJPEG || avctx->codec_id == AV_CODEC_ID_LJPEG)
+ && avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)) {
+ char buf[128];
+ snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt);
+ av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n",
+ (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf));
+ return AVERROR(EINVAL);
+ }
+ if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P ||
+ avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P ||
+ avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P ||
+ avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P ||
+ avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P)
+ avctx->color_range = AVCOL_RANGE_JPEG;
+ }
+ if (avctx->codec->supported_samplerates) {
+ for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
+ if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
+ break;
+ if (avctx->codec->supported_samplerates[i] == 0) {
+ av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
+ avctx->sample_rate);
+ return AVERROR(EINVAL);
+ }
+ }
+ if (avctx->sample_rate < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
+ avctx->sample_rate);
+ return AVERROR(EINVAL);
+ }
+ if (avctx->codec->channel_layouts) {
+ if (!avctx->channel_layout) {
+ av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n");
+ } else {
+ for (i = 0; avctx->codec->channel_layouts[i] != 0; i++)
+ if (avctx->channel_layout == avctx->codec->channel_layouts[i])
+ break;
+ if (avctx->codec->channel_layouts[i] == 0) {
+ char buf[512];
+ av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
+ av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf);
+ return AVERROR(EINVAL);
+ }
+ }
+ }
+ if (avctx->channel_layout && avctx->channels) {
+ int channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
+ if (channels != avctx->channels) {
+ char buf[512];
+ av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
+ av_log(avctx, AV_LOG_ERROR,
+ "Channel layout '%s' with %d channels does not match number of specified channels %d\n",
+ buf, channels, avctx->channels);
+ return AVERROR(EINVAL);
+ }
+ } else if (avctx->channel_layout) {
+ avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
+ }
+ if (avctx->channels < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Specified number of channels %d is not supported\n",
+ avctx->channels);
+ return AVERROR(EINVAL);
+ }
+ if(avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt);
+ if ( avctx->bits_per_raw_sample < 0
+ || (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) {
+ av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n",
+ avctx->bits_per_raw_sample, pixdesc->comp[0].depth);
+ avctx->bits_per_raw_sample = pixdesc->comp[0].depth;
+ }
+ if (avctx->width <= 0 || avctx->height <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "dimensions not set\n");
+ return AVERROR(EINVAL);
+ }
+ }
+ if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
+ && avctx->bit_rate>0 && avctx->bit_rate<1000) {
+ av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate);
+ }
+
+ if (!avctx->rc_initial_buffer_occupancy)
+ avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4;
+
+ if (avctx->ticks_per_frame && avctx->time_base.num &&
+ avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
+ av_log(avctx, AV_LOG_ERROR,
+ "ticks_per_frame %d too large for the timebase %d/%d.",
+ avctx->ticks_per_frame,
+ avctx->time_base.num,
+ avctx->time_base.den);
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->hw_frames_ctx) {
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ if (frames_ctx->format != avctx->pix_fmt) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n");
+ return AVERROR(EINVAL);
+ }
+ if (avctx->sw_pix_fmt != AV_PIX_FMT_NONE &&
+ avctx->sw_pix_fmt != frames_ctx->sw_format) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Mismatching AVCodecContext.sw_pix_fmt (%s) "
+ "and AVHWFramesContext.sw_format (%s)\n",
+ av_get_pix_fmt_name(avctx->sw_pix_fmt),
+ av_get_pix_fmt_name(frames_ctx->sw_format));
+ return AVERROR(EINVAL);
+ }
+ avctx->sw_pix_fmt = frames_ctx->sw_format;
+ }
+
+ return 0;
+}