static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
{
AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
- AVCodecContext *avctx = ost->enc_ctx;
+ AVCodecContext *avctx = ost->st->codec;
int ret;
if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
if (got_packet) {
- if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
- if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
- if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
+ av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
pkt.pts = ost->sync_opts;
- if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
- if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
+ av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
}
}
+static void finish_output_stream(OutputStream *ost)
+{
+ OutputFile *of = output_files[ost->file_index];
+ int i;
+
+ ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
+
+ if (of->shortest) {
+ for (i = 0; i < of->ctx->nb_streams; i++)
+ output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
+ }
+}
+
/**
* Get and encode new output from any of the filtergraphs, without causing
* activity.
av_free_packet(&pkt);
continue;
}
- if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
- if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
- if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
+ av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
pkt_size = pkt.size;
write_frame(os, &pkt, ost);
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
// The following line may be required in some cases where there is no parser
// or the parser does not has_b_frames correctly
-// ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
- av_log_ask_for_sample(ist->dec_ctx, "has_b_frames is larger in decoder than demuxer");
+ if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
+ ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
+ } else
+ av_log_ask_for_sample(
+ ist->dec_ctx,
+ "has_b_frames is larger in decoder than demuxer %d > %d ",
+ ist->dec_ctx->has_b_frames,
+ ist->st->codec->has_b_frames
+ );
}
if (*got_output || ret<0 || pkt->size)
if (out_codec) {
encoder_name = out_codec->name;
out_codec_name = avcodec_descriptor_get(out_codec->id)->name;
- if (!strcmp(encoder_name, in_codec_name))
+ if (!strcmp(encoder_name, out_codec_name))
encoder_name = "native";
}
if (ost->source_index == ifile->ist_index + i &&
(ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
- close_output_stream(ost);
+ finish_output_stream(ost);
}
}