static int run_as_daemon = 0;
static int nb_frames_dup = 0;
+static unsigned dup_warning = 1000;
static int nb_frames_drop = 0;
static int64_t decode_error_stat[2];
+static int want_sdp = 1;
+
static int current_time;
AVIOContext *progress_avio = NULL;
void term_init(void)
{
#if HAVE_TERMIOS_H
- if(!run_as_daemon){
+ if (!run_as_daemon && stdin_interaction) {
struct termios tty;
if (tcgetattr (0, &tty) == 0) {
oldtty = tty;
FilterGraph *fg = filtergraphs[i];
avfilter_graph_free(&fg->graph);
for (j = 0; j < fg->nb_inputs; j++) {
+ av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
av_freep(&fg->inputs[j]->name);
av_freep(&fg->inputs[j]);
}
av_freep(&fg->inputs);
for (j = 0; j < fg->nb_outputs; j++) {
av_freep(&fg->outputs[j]->name);
+ av_freep(&fg->outputs[j]->formats);
+ av_freep(&fg->outputs[j]->channel_layouts);
+ av_freep(&fg->outputs[j]->sample_rates);
av_freep(&fg->outputs[j]);
}
av_freep(&fg->outputs);
avcodec_free_context(&ost->enc_ctx);
avcodec_parameters_free(&ost->ref_par);
+ while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
+ AVPacket pkt;
+ av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
+ av_packet_unref(&pkt);
+ }
+ av_fifo_freep(&ost->muxing_queue);
+
av_freep(&output_streams[i]);
}
#if HAVE_PTHREADS
av_frame_free(&ist->sub2video.frame);
av_freep(&ist->filters);
av_freep(&ist->hwaccel_device);
+ av_freep(&ist->dts_buffer);
avcodec_free_context(&ist->dec_ctx);
}
}
-static void write_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
+static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
{
+ AVFormatContext *s = of->ctx;
AVStream *st = ost->st;
int ret;
+ if (!of->header_written) {
+ AVPacket tmp_pkt;
+ /* the muxer is not initialized yet, buffer the packet */
+ if (!av_fifo_space(ost->muxing_queue)) {
+ int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
+ ost->max_muxing_queue_size);
+ if (new_size <= av_fifo_size(ost->muxing_queue)) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Too many packets buffered for output stream %d:%d.\n",
+ ost->file_index, ost->st->index);
+ exit_program(1);
+ }
+ ret = av_fifo_realloc2(ost->muxing_queue, new_size);
+ if (ret < 0)
+ exit_program(1);
+ }
+ av_packet_move_ref(&tmp_pkt, pkt);
+ av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
+ return;
+ }
+
if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
(st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
pkt->pts = pkt->dts = AV_NOPTS_VALUE;
}
}
-static void output_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
+static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
{
int ret = 0;
if (ost->nb_bitstream_filters) {
int idx;
+ av_packet_split_side_data(pkt);
ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
if (ret < 0)
goto finish;
while (idx) {
/* get a packet from the previous filter up the chain */
ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
+ if (ret == AVERROR(EAGAIN)) {
+ ret = 0;
+ idx--;
+ continue;
+ } else if (ret < 0)
+ goto finish;
/* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
* the api states this shouldn't happen after init(). Propagate it here to the
* muxer and to the next filters in the chain to workaround this.
goto finish;
ost->bsf_extradata_updated[idx - 1] |= 1;
}
- if (ret == AVERROR(EAGAIN)) {
- ret = 0;
- idx--;
- continue;
- } else if (ret < 0)
- goto finish;
/* send it to the next filter down the chain or to the muxer */
if (idx < ost->nb_bitstream_filters) {
goto finish;
idx++;
} else
- write_packet(s, pkt, ost);
+ write_packet(of, pkt, ost);
}
} else
- write_packet(s, pkt, ost);
+ write_packet(of, pkt, ost);
finish:
if (ret < 0 && ret != AVERROR_EOF) {
return 1;
}
-static void do_audio_out(AVFormatContext *s, OutputStream *ost,
+static void do_audio_out(OutputFile *of, OutputStream *ost,
AVFrame *frame)
{
AVCodecContext *enc = ost->enc_ctx;
AVPacket pkt;
- int got_packet = 0;
+ int ret;
av_init_packet(&pkt);
pkt.data = NULL;
enc->time_base.num, enc->time_base.den);
}
- if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
- av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
- exit_program(1);
- }
- update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
+ ret = avcodec_send_frame(enc, frame);
+ if (ret < 0)
+ goto error;
+
+ while (1) {
+ ret = avcodec_receive_packet(enc, &pkt);
+ if (ret == AVERROR(EAGAIN))
+ break;
+ if (ret < 0)
+ goto error;
+
+ update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
- if (got_packet) {
av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
if (debug_ts) {
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
}
- output_packet(s, &pkt, ost);
+ output_packet(of, &pkt, ost);
}
+
+ return;
+error:
+ av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
+ exit_program(1);
}
-static void do_subtitle_out(AVFormatContext *s,
+static void do_subtitle_out(OutputFile *of,
OutputStream *ost,
- InputStream *ist,
AVSubtitle *sub)
{
int subtitle_out_max_size = 1024 * 1024;
pkt.pts += 90 * sub->end_display_time;
}
pkt.dts = pkt.pts;
- output_packet(s, &pkt, ost);
+ output_packet(of, &pkt, ost);
}
}
-static void do_video_out(AVFormatContext *s,
+static void do_video_out(OutputFile *of,
OutputStream *ost,
AVFrame *next_picture,
double sync_ipts)
format_video_sync = video_sync_method;
if (format_video_sync == VSYNC_AUTO) {
- if(!strcmp(s->oformat->name, "avi")) {
+ if(!strcmp(of->ctx->oformat->name, "avi")) {
format_video_sync = VSYNC_VFR;
} else
- format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
+ format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
if ( ist
&& format_video_sync == VSYNC_CFR
&& input_files[ist->file_index]->ctx->nb_streams == 1
}
nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
+ if (nb_frames_dup > dup_warning) {
+ av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
+ dup_warning *= 10;
+ }
}
ost->last_dropped = nb_frames == nb0_frames && next_picture;
return;
#if FF_API_LAVF_FMT_RAWPICTURE
- if (s->oformat->flags & AVFMT_RAWPICTURE &&
+ if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
/* raw pictures are written as AVPicture structure to
avoid any copies. We support temporarily the older
pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
pkt.flags |= AV_PKT_FLAG_KEY;
- output_packet(s, &pkt, ost);
+ output_packet(of, &pkt, ost);
} else
#endif
{
- int got_packet, forced_keyframe = 0;
+ int forced_keyframe = 0;
double pts_time;
if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
ost->frames_encoded++;
- ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
- update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
- if (ret < 0) {
- av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
- exit_program(1);
- }
+ ret = avcodec_send_frame(enc, in_picture);
+ if (ret < 0)
+ goto error;
+
+ while (1) {
+ ret = avcodec_receive_packet(enc, &pkt);
+ update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
+ if (ret == AVERROR(EAGAIN))
+ break;
+ if (ret < 0)
+ goto error;
- if (got_packet) {
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
}
frame_size = pkt.size;
- output_packet(s, &pkt, ost);
+ output_packet(of, &pkt, ost);
/* if two pass, output log */
if (ost->logfile && enc->stats_out) {
av_frame_ref(ost->last_frame, next_picture);
else
av_frame_free(&ost->last_frame);
+
+ return;
+error:
+ av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
+ exit_program(1);
}
static double psnr(double d)
"Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
} else if (flush && ret == AVERROR_EOF) {
if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
- do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
+ do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
}
break;
}
enc->time_base.num, enc->time_base.den);
}
- do_video_out(of->ctx, ost, filtered_frame, float_pts);
+ do_video_out(of, ost, filtered_frame, float_pts);
break;
case AVMEDIA_TYPE_AUDIO:
if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
"Audio filter graph output is not normalized and encoder does not support parameter changes\n");
break;
}
- do_audio_out(of->ctx, ost, filtered_frame);
+ do_audio_out(of, ost, filtered_frame);
break;
default:
// TODO support subtitle filters
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
AVCodecContext *enc = ost->enc_ctx;
- AVFormatContext *os = output_files[ost->file_index]->ctx;
+ OutputFile *of = output_files[ost->file_index];
int stop_encoding = 0;
if (!ost->encoding_needed)
if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
continue;
#if FF_API_LAVF_FMT_RAWPICTURE
- if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
+ if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
continue;
#endif
+ if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
+ continue;
+
+ avcodec_send_frame(enc, NULL);
+
for (;;) {
- int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
- const char *desc;
+ const char *desc = NULL;
switch (enc->codec_type) {
case AVMEDIA_TYPE_AUDIO:
- encode = avcodec_encode_audio2;
desc = "audio";
break;
case AVMEDIA_TYPE_VIDEO:
- encode = avcodec_encode_video2;
desc = "video";
break;
default:
- stop_encoding = 1;
+ av_assert0(0);
}
- if (encode) {
+ if (1) {
AVPacket pkt;
int pkt_size;
- int got_packet;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
update_benchmark(NULL);
- ret = encode(enc, &pkt, NULL, &got_packet);
+ ret = avcodec_receive_packet(enc, &pkt);
update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
- if (ret < 0) {
+ if (ret < 0 && ret != AVERROR_EOF) {
av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
desc,
av_err2str(ret));
if (ost->logfile && enc->stats_out) {
fprintf(ost->logfile, "%s", enc->stats_out);
}
- if (!got_packet) {
+ if (ret == AVERROR_EOF) {
stop_encoding = 1;
break;
}
}
av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
pkt_size = pkt.size;
- output_packet(os, &pkt, ost);
+ output_packet(of, &pkt, ost);
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
do_video_stats(ost, pkt_size);
}
}
#endif
- output_packet(of->ctx, &opkt, ost);
+ output_packet(of, &opkt, ost);
}
int guess_input_channel_layout(InputStream *ist)
}
}
+// This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
+// There is the following difference: if you got a frame, you must call
+// it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
+// (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
+static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
+{
+ int ret;
+
+ *got_frame = 0;
+
+ if (pkt) {
+ ret = avcodec_send_packet(avctx, pkt);
+ // In particular, we don't expect AVERROR(EAGAIN), because we read all
+ // decoded frames with avcodec_receive_frame() until done.
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
+ }
+
+ ret = avcodec_receive_frame(avctx, frame);
+ if (ret < 0 && ret != AVERROR(EAGAIN))
+ return ret;
+ if (ret >= 0)
+ *got_frame = 1;
+
+ return 0;
+}
+
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
{
AVFrame *decoded_frame, *f;
decoded_frame = ist->decoded_frame;
update_benchmark(NULL);
- ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
+ ret = decode(avctx, decoded_frame, got_output, pkt);
update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
if (ret >= 0 && avctx->sample_rate <= 0) {
ret = AVERROR_INVALIDDATA;
}
- check_decode_result(ist, got_output, ret);
+ if (ret != AVERROR_EOF)
+ check_decode_result(ist, got_output, ret);
if (!*got_output || ret < 0)
return ret;
ist->resample_channel_layout = decoded_frame->channel_layout;
ist->resample_channels = avctx->channels;
+ for (i = 0; i < ist->nb_filters; i++) {
+ err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
+ if (err < 0) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Error reconfiguring input stream %d:%d filter %d\n",
+ ist->file_index, ist->st->index, i);
+ goto fail;
+ }
+ }
+
for (i = 0; i < nb_filtergraphs; i++)
if (ist_in_filtergraph(filtergraphs[i], ist)) {
FilterGraph *fg = filtergraphs[i];
}
}
- /* if the decoder provides a pts, use it instead of the last packet pts.
- the decoder could be delaying output by a packet or more. */
if (decoded_frame->pts != AV_NOPTS_VALUE) {
- ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
- decoded_frame_tb = avctx->time_base;
- } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
- decoded_frame->pts = decoded_frame->pkt_pts;
decoded_frame_tb = ist->st->time_base;
- } else if (pkt->pts != AV_NOPTS_VALUE) {
+ } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
decoded_frame->pts = pkt->pts;
decoded_frame_tb = ist->st->time_base;
}else {
decoded_frame->pts = ist->dts;
decoded_frame_tb = AV_TIME_BASE_Q;
}
- pkt->pts = AV_NOPTS_VALUE;
if (decoded_frame->pts != AV_NOPTS_VALUE)
decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
(AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
}
decoded_frame->pts = AV_NOPTS_VALUE;
+fail:
av_frame_unref(ist->filter_frame);
av_frame_unref(decoded_frame);
return err < 0 ? err : ret;
}
-static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
+static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
{
AVFrame *decoded_frame, *f;
int i, ret = 0, err = 0, resample_changed;
int64_t best_effort_timestamp;
+ int64_t dts = AV_NOPTS_VALUE;
AVRational *frame_sample_aspect;
+ AVPacket avpkt;
+
+ // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
+ // reason. This seems like a semi-critical bug. Don't trigger EOF, and
+ // skip the packet.
+ if (!eof && pkt && pkt->size == 0)
+ return 0;
if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
return AVERROR(ENOMEM);
if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
return AVERROR(ENOMEM);
decoded_frame = ist->decoded_frame;
- pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
+ if (ist->dts != AV_NOPTS_VALUE)
+ dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
+ if (pkt) {
+ avpkt = *pkt;
+ avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
+ }
+
+ // The old code used to set dts on the drain packet, which does not work
+ // with the new API anymore.
+ if (eof) {
+ void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
+ if (!new)
+ return AVERROR(ENOMEM);
+ ist->dts_buffer = new;
+ ist->dts_buffer[ist->nb_dts_buffer++] = dts;
+ }
update_benchmark(NULL);
- ret = avcodec_decode_video2(ist->dec_ctx,
- decoded_frame, got_output, pkt);
+ ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
// The following line may be required in some cases where there is no parser
ist->st->codecpar->video_delay);
}
- check_decode_result(ist, got_output, ret);
+ if (ret != AVERROR_EOF)
+ check_decode_result(ist, got_output, ret);
if (*got_output && ret >= 0) {
if (ist->dec_ctx->width != decoded_frame->width ||
ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
+
+ if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
+ best_effort_timestamp = ist->dts_buffer[0];
+
+ for (i = 0; i < ist->nb_dts_buffer - 1; i++)
+ ist->dts_buffer[i] = ist->dts_buffer[i + 1];
+ ist->nb_dts_buffer--;
+ }
+
if(best_effort_timestamp != AV_NOPTS_VALUE) {
int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
ist->st->time_base.num, ist->st->time_base.den);
}
- pkt->size = 0;
-
if (ist->st->sample_aspect_ratio.num)
decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
ist->resample_height = decoded_frame->height;
ist->resample_pix_fmt = decoded_frame->format;
+ for (i = 0; i < ist->nb_filters; i++) {
+ err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
+ if (err < 0) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Error reconfiguring input stream %d:%d filter %d\n",
+ ist->file_index, ist->st->index, i);
+ goto fail;
+ }
+ }
+
for (i = 0; i < nb_filtergraphs; i++) {
if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
configure_filtergraph(filtergraphs[i]) < 0) {
break;
} else
f = decoded_frame;
- ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
- if (ret == AVERROR_EOF) {
- ret = 0; /* ignore */
- } else if (ret < 0) {
+ err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
+ if (err == AVERROR_EOF) {
+ err = 0; /* ignore */
+ } else if (err < 0) {
av_log(NULL, AV_LOG_FATAL,
- "Failed to inject frame into filter network: %s\n", av_err2str(ret));
+ "Failed to inject frame into filter network: %s\n", av_err2str(err));
exit_program(1);
}
}
|| ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
continue;
- do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
+ do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
}
out:
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
{
int ret = 0, i;
- int got_output = 0;
+ int repeating = 0;
+ int eof_reached = 0;
AVPacket avpkt;
if (!ist->saw_first_ts) {
av_init_packet(&avpkt);
avpkt.data = NULL;
avpkt.size = 0;
- goto handle_eof;
} else {
avpkt = *pkt;
}
- if (pkt->dts != AV_NOPTS_VALUE) {
+ if (pkt && pkt->dts != AV_NOPTS_VALUE) {
ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
ist->next_pts = ist->pts = ist->dts;
}
// while we have more to decode or while the decoder did output something on EOF
- while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
- int duration;
- handle_eof:
+ while (ist->decoding_needed) {
+ int duration = 0;
+ int got_output = 0;
ist->pts = ist->next_pts;
ist->dts = ist->next_dts;
- if (avpkt.size && avpkt.size != pkt->size &&
- !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
- av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
- "Multiple frames in a packet from stream %d\n", pkt->stream_index);
- ist->showed_multi_packet_warning = 1;
- }
-
switch (ist->dec_ctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
- ret = decode_audio (ist, &avpkt, &got_output);
+ ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
break;
case AVMEDIA_TYPE_VIDEO:
- ret = decode_video (ist, &avpkt, &got_output);
- if (avpkt.duration) {
- duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
- } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
- int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
- duration = ((int64_t)AV_TIME_BASE *
- ist->dec_ctx->framerate.den * ticks) /
- ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
- } else
- duration = 0;
+ ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
+ if (!repeating || !pkt || got_output) {
+ if (pkt && pkt->duration) {
+ duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
+ } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
+ int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
+ duration = ((int64_t)AV_TIME_BASE *
+ ist->dec_ctx->framerate.den * ticks) /
+ ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
+ }
- if(ist->dts != AV_NOPTS_VALUE && duration) {
- ist->next_dts += duration;
- }else
- ist->next_dts = AV_NOPTS_VALUE;
+ if(ist->dts != AV_NOPTS_VALUE && duration) {
+ ist->next_dts += duration;
+ }else
+ ist->next_dts = AV_NOPTS_VALUE;
+ }
if (got_output)
ist->next_pts += duration; //FIXME the duration is not correct in some cases
break;
case AVMEDIA_TYPE_SUBTITLE:
+ if (repeating)
+ break;
ret = transcode_subtitles(ist, &avpkt, &got_output);
+ if (!pkt && ret >= 0)
+ ret = AVERROR_EOF;
break;
default:
return -1;
}
+ if (ret == AVERROR_EOF) {
+ eof_reached = 1;
+ break;
+ }
+
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
ist->file_index, ist->st->index, av_err2str(ret));
if (exit_on_error)
exit_program(1);
+ // Decoding might not terminate if we're draining the decoder, and
+ // the decoder keeps returning an error.
+ // This should probably be considered a libavcodec issue.
+ // Sample: fate-vsynth1-dnxhd-720p-hr-lb
+ if (!pkt)
+ eof_reached = 1;
break;
}
- avpkt.dts=
- avpkt.pts= AV_NOPTS_VALUE;
+ if (!got_output)
+ break;
- // touch data and size only if not EOF
- if (pkt) {
- if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
- ret = avpkt.size;
- avpkt.data += ret;
- avpkt.size -= ret;
- }
- if (!got_output) {
- continue;
- }
- if (got_output && !pkt)
+ // During draining, we might get multiple output frames in this loop.
+ // ffmpeg.c does not drain the filter chain on configuration changes,
+ // which means if we send multiple frames at once to the filters, and
+ // one of those frames changes configuration, the buffered frames will
+ // be lost. This can upset certain FATE tests.
+ // Decode only 1 frame per call on EOF to appease these FATE tests.
+ // The ideal solution would be to rewrite decoding to use the new
+ // decoding API in a better way.
+ if (!pkt)
break;
+
+ repeating = 1;
}
/* after flushing, send an EOF on all the filter inputs attached to the stream */
/* except when looping we need to flush but not to send an EOF */
- if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
+ if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
int ret = send_filter_eof(ist);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
do_streamcopy(ist, ost, pkt);
}
- return got_output;
+ return !eof_reached;
}
static void print_sdp(void)
int i;
int j;
AVIOContext *sdp_pb;
- AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
+ AVFormatContext **avc;
+ for (i = 0; i < nb_output_files; i++) {
+ if (!output_files[i]->header_written)
+ return;
+ }
+
+ avc = av_malloc_array(nb_output_files, sizeof(*avc));
if (!avc)
exit_program(1);
for (i = 0, j = 0; i < nb_output_files; i++) {
}
continue;
}
+
+ if (ist->hw_frames_ctx) {
+ s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
+ if (!s->hw_frames_ctx)
+ return AV_PIX_FMT_NONE;
+ }
+
ist->active_hwaccel_id = hwaccel->id;
ist->hwaccel_pix_fmt = *p;
break;
static int init_input_stream(int ist_index, char *error, int error_len)
{
- int ret;
+ int i, ret;
InputStream *ist = input_streams[ist_index];
+ for (i = 0; i < ist->nb_filters; i++) {
+ ret = ifilter_parameters_from_decoder(ist->filters[i], ist->dec_ctx);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
+ return ret;
+ }
+ }
+
if (ist->decoding_needed) {
AVCodec *codec = ist->dec;
if (!codec) {
return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
}
+/* open the muxer when all the streams are initialized */
+static int check_init_output_file(OutputFile *of, int file_index)
+{
+ int ret, i;
+
+ for (i = 0; i < of->ctx->nb_streams; i++) {
+ OutputStream *ost = output_streams[of->ost_index + i];
+ if (!ost->initialized)
+ return 0;
+ }
+
+ of->ctx->interrupt_callback = int_cb;
+
+ ret = avformat_write_header(of->ctx, &of->opts);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Could not write header for output file #%d "
+ "(incorrect codec parameters ?): %s\n",
+ file_index, av_err2str(ret));
+ return ret;
+ }
+ //assert_avoptions(of->opts);
+ of->header_written = 1;
+
+ av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
+
+ if (sdp_filename || want_sdp)
+ print_sdp();
+
+ /* flush the muxing queues */
+ for (i = 0; i < of->ctx->nb_streams; i++) {
+ OutputStream *ost = output_streams[of->ost_index + i];
+
+ while (av_fifo_size(ost->muxing_queue)) {
+ AVPacket pkt;
+ av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
+ write_packet(of, &pkt, ost);
+ }
+ }
+
+ return 0;
+}
+
static int init_output_bsfs(OutputStream *ost)
{
AVBSFContext *ctx;
// copy timebase while removing common factors
ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
+ // copy disposition
+ ost->st->disposition = ist->st->disposition;
+
if (ist->st->nb_side_data) {
ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
sizeof(*ist->st->side_data));
return 0;
}
+static void set_encoder_id(OutputFile *of, OutputStream *ost)
+{
+ AVDictionaryEntry *e;
+
+ uint8_t *encoder_string;
+ int encoder_string_len;
+ int format_flags = 0;
+ int codec_flags = 0;
+
+ if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
+ return;
+
+ e = av_dict_get(of->opts, "fflags", NULL, 0);
+ if (e) {
+ const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
+ if (!o)
+ return;
+ av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
+ }
+ e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
+ if (e) {
+ const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
+ if (!o)
+ return;
+ av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
+ }
+
+ encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
+ encoder_string = av_mallocz(encoder_string_len);
+ if (!encoder_string)
+ exit_program(1);
+
+ if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
+ av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
+ else
+ av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
+ av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
+ av_dict_set(&ost->st->metadata, "encoder", encoder_string,
+ AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
+}
+
+static void parse_forced_key_frames(char *kf, OutputStream *ost,
+ AVCodecContext *avctx)
+{
+ char *p;
+ int n = 1, i, size, index = 0;
+ int64_t t, *pts;
+
+ for (p = kf; *p; p++)
+ if (*p == ',')
+ n++;
+ size = n;
+ pts = av_malloc_array(size, sizeof(*pts));
+ if (!pts) {
+ av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
+ exit_program(1);
+ }
+
+ p = kf;
+ for (i = 0; i < n; i++) {
+ char *next = strchr(p, ',');
+
+ if (next)
+ *next++ = 0;
+
+ if (!memcmp(p, "chapters", 8)) {
+
+ AVFormatContext *avf = output_files[ost->file_index]->ctx;
+ int j;
+
+ if (avf->nb_chapters > INT_MAX - size ||
+ !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
+ sizeof(*pts)))) {
+ av_log(NULL, AV_LOG_FATAL,
+ "Could not allocate forced key frames array.\n");
+ exit_program(1);
+ }
+ t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
+ t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
+
+ for (j = 0; j < avf->nb_chapters; j++) {
+ AVChapter *c = avf->chapters[j];
+ av_assert1(index < size);
+ pts[index++] = av_rescale_q(c->start, c->time_base,
+ avctx->time_base) + t;
+ }
+
+ } else {
+
+ t = parse_time_or_die("force_key_frames", p, 1);
+ av_assert1(index < size);
+ pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
+
+ }
+
+ p = next;
+ }
+
+ av_assert0(index == size);
+ qsort(pts, size, sizeof(*pts), compare_int64);
+ ost->forced_kf_count = size;
+ ost->forced_kf_pts = pts;
+}
+
+static int init_output_stream_encode(OutputStream *ost)
+{
+ InputStream *ist = get_input_stream(ost);
+ AVCodecContext *enc_ctx = ost->enc_ctx;
+ AVCodecContext *dec_ctx = NULL;
+ AVFormatContext *oc = output_files[ost->file_index]->ctx;
+ int j, ret;
+
+ set_encoder_id(output_files[ost->file_index], ost);
+
+ if (ist) {
+ ost->st->disposition = ist->st->disposition;
+
+ dec_ctx = ist->dec_ctx;
+
+ enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
+ } else {
+ for (j = 0; j < oc->nb_streams; j++) {
+ AVStream *st = oc->streams[j];
+ if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
+ break;
+ }
+ if (j == oc->nb_streams)
+ if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
+ ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
+ ost->st->disposition = AV_DISPOSITION_DEFAULT;
+ }
+
+ if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
+ enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
+ filtergraph_is_simple(ost->filter->graph)) {
+ FilterGraph *fg = ost->filter->graph;
+
+ if (configure_filtergraph(fg)) {
+ av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
+ exit_program(1);
+ }
+ }
+
+ if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ if (!ost->frame_rate.num)
+ ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
+ if (ist && !ost->frame_rate.num)
+ ost->frame_rate = ist->framerate;
+ if (ist && !ost->frame_rate.num)
+ ost->frame_rate = ist->st->r_frame_rate;
+ if (ist && !ost->frame_rate.num) {
+ ost->frame_rate = (AVRational){25, 1};
+ av_log(NULL, AV_LOG_WARNING,
+ "No information "
+ "about the input framerate is available. Falling "
+ "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
+ "if you want a different framerate.\n",
+ ost->file_index, ost->index);
+ }
+// ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
+ if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
+ int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
+ ost->frame_rate = ost->enc->supported_framerates[idx];
+ }
+ // reduce frame rate for mpeg4 to be within the spec limits
+ if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
+ av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
+ ost->frame_rate.num, ost->frame_rate.den, 65535);
+ }
+ }
+
+ switch (enc_ctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
+ if (dec_ctx)
+ enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
+ av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
+ enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
+ enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
+ enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
+ enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ enc_ctx->time_base = av_inv_q(ost->frame_rate);
+ if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
+ enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
+ if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
+ && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
+ av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
+ "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
+ }
+ for (j = 0; j < ost->forced_kf_count; j++)
+ ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
+ AV_TIME_BASE_Q,
+ enc_ctx->time_base);
+
+ enc_ctx->width = ost->filter->filter->inputs[0]->w;
+ enc_ctx->height = ost->filter->filter->inputs[0]->h;
+ enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
+ ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
+ av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
+ ost->filter->filter->inputs[0]->sample_aspect_ratio;
+ if (!strncmp(ost->enc->name, "libx264", 7) &&
+ enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
+ ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
+ av_log(NULL, AV_LOG_WARNING,
+ "No pixel format specified, %s for H.264 encoding chosen.\n"
+ "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
+ av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
+ if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
+ enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
+ ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
+ av_log(NULL, AV_LOG_WARNING,
+ "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
+ "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
+ av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
+ enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
+ if (dec_ctx)
+ enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
+ av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
+
+ ost->st->avg_frame_rate = ost->frame_rate;
+
+ if (!dec_ctx ||
+ enc_ctx->width != dec_ctx->width ||
+ enc_ctx->height != dec_ctx->height ||
+ enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
+ enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
+ }
+
+ if (ost->forced_keyframes) {
+ if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
+ ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
+ forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
+ return ret;
+ }
+ ost->forced_keyframes_expr_const_values[FKF_N] = 0;
+ ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
+
+ // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
+ // parse it only for static kf timings
+ } else if(strncmp(ost->forced_keyframes, "source", 6)) {
+ parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
+ }
+ }
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ enc_ctx->time_base = (AVRational){1, 1000};
+ if (!enc_ctx->width) {
+ enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
+ enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
+ }
+ break;
+ case AVMEDIA_TYPE_DATA:
+ break;
+ default:
+ abort();
+ break;
+ }
+
+ return 0;
+}
+
static int init_output_stream(OutputStream *ost, char *error, int error_len)
{
int ret = 0;
AVCodecContext *dec = NULL;
InputStream *ist;
+ ret = init_output_stream_encode(ost);
+ if (ret < 0)
+ return ret;
+
if ((ist = get_input_stream(ost)))
dec = ist->dec_ctx;
if (dec && dec->subtitle_header) {
return ret;
}
+ // parse user provided disposition, and update stream values
+ if (ost->disposition) {
+ static const AVOption opts[] = {
+ { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
+ { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
+ { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
+ { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
+ { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
+ { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
+ { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
+ { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
+ { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
+ { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
+ { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
+ { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
+ { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
+ { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
+ { NULL },
+ };
+ static const AVClass class = {
+ .class_name = "",
+ .item_name = av_default_item_name,
+ .option = opts,
+ .version = LIBAVUTIL_VERSION_INT,
+ };
+ const AVClass *pclass = &class;
+
+ ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
+ if (ret < 0)
+ return ret;
+ }
+
/* initialize bitstream filters for the output stream
* needs to be done here, because the codec id for streamcopy is not
* known until now */
if (ret < 0)
return ret;
- return ret;
-}
-
-static void parse_forced_key_frames(char *kf, OutputStream *ost,
- AVCodecContext *avctx)
-{
- char *p;
- int n = 1, i, size, index = 0;
- int64_t t, *pts;
-
- for (p = kf; *p; p++)
- if (*p == ',')
- n++;
- size = n;
- pts = av_malloc_array(size, sizeof(*pts));
- if (!pts) {
- av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
- exit_program(1);
- }
-
- p = kf;
- for (i = 0; i < n; i++) {
- char *next = strchr(p, ',');
-
- if (next)
- *next++ = 0;
-
- if (!memcmp(p, "chapters", 8)) {
-
- AVFormatContext *avf = output_files[ost->file_index]->ctx;
- int j;
-
- if (avf->nb_chapters > INT_MAX - size ||
- !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
- sizeof(*pts)))) {
- av_log(NULL, AV_LOG_FATAL,
- "Could not allocate forced key frames array.\n");
- exit_program(1);
- }
- t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
- t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
-
- for (j = 0; j < avf->nb_chapters; j++) {
- AVChapter *c = avf->chapters[j];
- av_assert1(index < size);
- pts[index++] = av_rescale_q(c->start, c->time_base,
- avctx->time_base) + t;
- }
-
- } else {
-
- t = parse_time_or_die("force_key_frames", p, 1);
- av_assert1(index < size);
- pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
-
- }
+ ost->initialized = 1;
- p = next;
- }
+ ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
+ if (ret < 0)
+ return ret;
- av_assert0(index == size);
- qsort(pts, size, sizeof(*pts), compare_int64);
- ost->forced_kf_count = size;
- ost->forced_kf_pts = pts;
+ return ret;
}
static void report_new_stream(int input_index, AVPacket *pkt)
file->nb_streams_warn = pkt->stream_index + 1;
}
-static void set_encoder_id(OutputFile *of, OutputStream *ost)
-{
- AVDictionaryEntry *e;
-
- uint8_t *encoder_string;
- int encoder_string_len;
- int format_flags = 0;
- int codec_flags = 0;
-
- if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
- return;
-
- e = av_dict_get(of->opts, "fflags", NULL, 0);
- if (e) {
- const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
- if (!o)
- return;
- av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
- }
- e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
- if (e) {
- const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
- if (!o)
- return;
- av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
- }
-
- encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
- encoder_string = av_mallocz(encoder_string_len);
- if (!encoder_string)
- exit_program(1);
-
- if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
- av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
- else
- av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
- av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
- av_dict_set(&ost->st->metadata, "encoder", encoder_string,
- AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
-}
-
static int transcode_init(void)
{
int ret = 0, i, j, k;
OutputStream *ost;
InputStream *ist;
char error[1024] = {0};
- int want_sdp = 1;
for (i = 0; i < nb_filtergraphs; i++) {
FilterGraph *fg = filtergraphs[i];
input_streams[j + ifile->ist_index]->start = av_gettime_relative();
}
- /* for each output stream, we compute the right encoding parameters */
+ /* hwaccel transcoding */
for (i = 0; i < nb_output_streams; i++) {
ost = output_streams[i];
- oc = output_files[ost->file_index]->ctx;
- ist = get_input_stream(ost);
-
- if (ost->attachment_filename)
- continue;
-
- if (ist) {
- ost->st->disposition = ist->st->disposition;
- } else {
- for (j=0; j<oc->nb_streams; j++) {
- AVStream *st = oc->streams[j];
- if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
- break;
- }
- if (j == oc->nb_streams)
- if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
- ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
- ost->st->disposition = AV_DISPOSITION_DEFAULT;
- }
if (!ost->stream_copy) {
- AVCodecContext *enc_ctx = ost->enc_ctx;
- AVCodecContext *dec_ctx = NULL;
-
- set_encoder_id(output_files[ost->file_index], ost);
-
- if (ist) {
- dec_ctx = ist->dec_ctx;
-
- enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
- }
-
#if CONFIG_LIBMFX
if (qsv_transcode_init(ost))
exit_program(1);
if (cuvid_transcode_init(ost))
exit_program(1);
#endif
-
- if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
- enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
- filtergraph_is_simple(ost->filter->graph)) {
- FilterGraph *fg = ost->filter->graph;
- if (configure_filtergraph(fg)) {
- av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
- exit_program(1);
- }
- }
-
- if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
- if (!ost->frame_rate.num)
- ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
- if (ist && !ost->frame_rate.num)
- ost->frame_rate = ist->framerate;
- if (ist && !ost->frame_rate.num)
- ost->frame_rate = ist->st->r_frame_rate;
- if (ist && !ost->frame_rate.num) {
- ost->frame_rate = (AVRational){25, 1};
- av_log(NULL, AV_LOG_WARNING,
- "No information "
- "about the input framerate is available. Falling "
- "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
- "if you want a different framerate.\n",
- ost->file_index, ost->index);
- }
-// ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
- if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
- int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
- ost->frame_rate = ost->enc->supported_framerates[idx];
- }
- // reduce frame rate for mpeg4 to be within the spec limits
- if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
- av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
- ost->frame_rate.num, ost->frame_rate.den, 65535);
- }
- }
-
- switch (enc_ctx->codec_type) {
- case AVMEDIA_TYPE_AUDIO:
- enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
- if (dec_ctx)
- enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
- av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
- enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
- enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
- enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
- enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
- break;
- case AVMEDIA_TYPE_VIDEO:
- enc_ctx->time_base = av_inv_q(ost->frame_rate);
- if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
- enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
- if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
- && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
- av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
- "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
- }
- for (j = 0; j < ost->forced_kf_count; j++)
- ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
- AV_TIME_BASE_Q,
- enc_ctx->time_base);
-
- enc_ctx->width = ost->filter->filter->inputs[0]->w;
- enc_ctx->height = ost->filter->filter->inputs[0]->h;
- enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
- ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
- av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
- ost->filter->filter->inputs[0]->sample_aspect_ratio;
- if (!strncmp(ost->enc->name, "libx264", 7) &&
- enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
- ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
- av_log(NULL, AV_LOG_WARNING,
- "No pixel format specified, %s for H.264 encoding chosen.\n"
- "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
- av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
- if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
- enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
- ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
- av_log(NULL, AV_LOG_WARNING,
- "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
- "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
- av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
- enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
- if (dec_ctx)
- enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
- av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
-
- ost->st->avg_frame_rate = ost->frame_rate;
-
- if (!dec_ctx ||
- enc_ctx->width != dec_ctx->width ||
- enc_ctx->height != dec_ctx->height ||
- enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
- enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
- }
-
- if (ost->forced_keyframes) {
- if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
- ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
- forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
- if (ret < 0) {
- av_log(NULL, AV_LOG_ERROR,
- "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
- return ret;
- }
- ost->forced_keyframes_expr_const_values[FKF_N] = 0;
- ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
- ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
- ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
-
- // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
- // parse it only for static kf timings
- } else if(strncmp(ost->forced_keyframes, "source", 6)) {
- parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
- }
- }
- break;
- case AVMEDIA_TYPE_SUBTITLE:
- enc_ctx->time_base = (AVRational){1, 1000};
- if (!enc_ctx->width) {
- enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
- enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
- }
- break;
- case AVMEDIA_TYPE_DATA:
- break;
- default:
- abort();
- break;
- }
- }
-
- if (ost->disposition) {
- static const AVOption opts[] = {
- { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
- { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
- { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
- { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
- { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
- { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
- { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
- { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
- { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
- { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
- { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
- { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
- { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
- { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
- { NULL },
- };
- static const AVClass class = {
- .class_name = "",
- .item_name = av_default_item_name,
- .option = opts,
- .version = LIBAVUTIL_VERSION_INT,
- };
- const AVClass *pclass = &class;
-
- ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
- if (ret < 0)
- goto dump_format;
}
}
}
}
- /* open files and write file headers */
+ /* write headers for files with no streams */
for (i = 0; i < nb_output_files; i++) {
oc = output_files[i]->ctx;
- oc->interrupt_callback = int_cb;
- if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
- snprintf(error, sizeof(error),
- "Could not write header for output file #%d "
- "(incorrect codec parameters ?): %s",
- i, av_err2str(ret));
- ret = AVERROR(EINVAL);
- goto dump_format;
- }
-// assert_avoptions(output_files[i]->opts);
- if (strcmp(oc->oformat->name, "rtp")) {
- want_sdp = 0;
+ if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
+ ret = check_init_output_file(output_files[i], i);
+ if (ret < 0)
+ goto dump_format;
}
}
dump_format:
- /* dump the file output parameters - cannot be done before in case
- of stream copy */
- for (i = 0; i < nb_output_files; i++) {
- av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
- }
-
/* dump the stream mapping */
av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
for (i = 0; i < nb_input_streams; i++) {
return ret;
}
- if (sdp_filename || want_sdp) {
- print_sdp();
- }
-
transcode_init_done = 1;
return 0;
"+ increase verbosity\n"
"- decrease verbosity\n"
"c Send command to first matching filter supporting it\n"
- "C Send/Que command to all matching filters\n"
+ "C Send/Queue command to all matching filters\n"
"D cycle through available debug modes\n"
"h dump packets/hex press to cycle through the 3 states\n"
"q quit\n"
/* write the trailer if needed and close file */
for (i = 0; i < nb_output_files; i++) {
os = output_files[i]->ctx;
+ if (!output_files[i]->header_written) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Nothing was written into output file %d (%s), because "
+ "at least one of its streams received no packets.\n",
+ i, os->filename);
+ continue;
+ }
if ((ret = av_write_trailer(os)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
if (exit_on_error)
int main(int argc, char **argv)
{
- int ret;
+ int i, ret;
int64_t ti;
init_dynload();
show_banner(argc, argv, options);
- term_init();
-
/* parse options and open all input/output files */
ret = ffmpeg_parse_options(argc, argv);
if (ret < 0)
// exit_program(1);
// }
+ for (i = 0; i < nb_output_files; i++) {
+ if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
+ want_sdp = 0;
+ }
+
current_time = ti = getutime();
if (transcode() < 0)
exit_program(1);