X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=fftools%2Fffmpeg.c;h=3ad11452da530db60a3a4efc87f74585903c3771;hb=3a370868dc33061a20d1fd99274e65167d7a78ac;hp=ad95a0e4179918ac0942249d290866f02e104fbd;hpb=f30a41a6086eb8c10f66090739a2a4f8491c3c7a;p=ffmpeg diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c index ad95a0e4179..3ad11452da5 100644 --- a/fftools/ffmpeg.c +++ b/fftools/ffmpeg.c @@ -136,6 +136,7 @@ static int nb_frames_dup = 0; static unsigned dup_warning = 1000; static int nb_frames_drop = 0; static int64_t decode_error_stat[2]; +static unsigned nb_output_dumped = 0; static int want_sdp = 1; @@ -344,6 +345,7 @@ static volatile int received_nb_signals = 0; static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0); static volatile int ffmpeg_exited = 0; static int main_return_code = 0; +static int64_t copy_ts_first_pts = AV_NOPTS_VALUE; static void sigterm_handler(int sig) @@ -392,8 +394,30 @@ static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) } #endif +#ifdef __linux__ +#define SIGNAL(sig, func) \ + do { \ + action.sa_handler = func; \ + sigaction(sig, &action, NULL); \ + } while (0) +#else +#define SIGNAL(sig, func) \ + signal(sig, func) +#endif + void term_init(void) { +#if defined __linux__ + struct sigaction action = {0}; + action.sa_handler = sigterm_handler; + + /* block other interrupts while processing this one */ + sigfillset(&action.sa_mask); + + /* restart interruptible functions (i.e. don't fail with EINTR) */ + action.sa_flags = SA_RESTART; +#endif + #if HAVE_TERMIOS_H if (!run_as_daemon && stdin_interaction) { struct termios tty; @@ -412,14 +436,14 @@ void term_init(void) tcsetattr (0, TCSANOW, &tty); } - signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */ + SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */ } #endif - signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */ - signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */ + SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */ + SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */ #ifdef SIGXCPU - signal(SIGXCPU, sigterm_handler); + SIGNAL(SIGXCPU, sigterm_handler); #endif #ifdef SIGPIPE signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */ @@ -528,6 +552,7 @@ static void ffmpeg_cleanup(int ret) for (j = 0; j < fg->nb_outputs; j++) { OutputFilter *ofilter = fg->outputs[j]; + avfilter_inout_free(&ofilter->out_tmp); av_freep(&ofilter->name); av_freep(&ofilter->formats); av_freep(&ofilter->channel_layouts); @@ -567,6 +592,7 @@ static void ffmpeg_cleanup(int ret) av_frame_free(&ost->filtered_frame); av_frame_free(&ost->last_frame); + av_packet_free(&ost->pkt); av_dict_free(&ost->encoder_opts); av_freep(&ost->forced_keyframes); @@ -585,9 +611,9 @@ static void ffmpeg_cleanup(int ret) if (ost->muxing_queue) { while (av_fifo_size(ost->muxing_queue)) { - AVPacket pkt; + AVPacket *pkt; av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL); - av_packet_unref(&pkt); + av_packet_free(&pkt); } av_fifo_freep(&ost->muxing_queue); } @@ -599,6 +625,7 @@ static void ffmpeg_cleanup(int ret) #endif for (i = 0; i < nb_input_files; i++) { avformat_close_input(&input_files[i]->ctx); + av_packet_free(&input_files[i]->pkt); av_freep(&input_files[i]); } for (i = 0; i < nb_input_streams; i++) { @@ -606,6 +633,7 @@ static void ffmpeg_cleanup(int ret) av_frame_free(&ist->decoded_frame); av_frame_free(&ist->filter_frame); + av_packet_free(&ist->pkt); av_dict_free(&ist->decoder_opts); avsubtitle_free(&ist->prev_sub.subtitle); av_frame_free(&ist->sub2video.frame); @@ -663,7 +691,7 @@ void assert_avoptions(AVDictionary *m) } } -static void abort_codec_experimental(AVCodec *c, int encoder) +static void abort_codec_experimental(const AVCodec *c, int encoder) { exit_program(1); } @@ -721,11 +749,16 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u } if (!of->header_written) { - AVPacket tmp_pkt = {0}; + AVPacket *tmp_pkt; /* the muxer is not initialized yet, buffer the packet */ if (!av_fifo_space(ost->muxing_queue)) { - int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue), - ost->max_muxing_queue_size); + unsigned int are_we_over_size = + (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold; + int new_size = are_we_over_size ? + FFMIN(2 * av_fifo_size(ost->muxing_queue), + ost->max_muxing_queue_size) : + 2 * av_fifo_size(ost->muxing_queue); + if (new_size <= av_fifo_size(ost->muxing_queue)) { av_log(NULL, AV_LOG_ERROR, "Too many packets buffered for output stream %d:%d.\n", @@ -739,7 +772,11 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u ret = av_packet_make_refcounted(pkt); if (ret < 0) exit_program(1); - av_packet_move_ref(&tmp_pkt, pkt); + tmp_pkt = av_packet_alloc(); + if (!tmp_pkt) + exit_program(1); + av_packet_move_ref(tmp_pkt, pkt); + ost->muxing_queue_data_size += tmp_pkt->size; av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL); return; } @@ -896,16 +933,82 @@ static int check_recording_time(OutputStream *ost) return 1; } +static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, + AVFrame *frame) +{ + double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision + AVCodecContext *enc = ost->enc_ctx; + if (!frame || frame->pts == AV_NOPTS_VALUE || + !enc || !ost->filter || !ost->filter->graph->graph) + goto early_exit; + + { + AVFilterContext *filter = ost->filter->filter; + + int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; + AVRational filter_tb = av_buffersink_get_time_base(filter); + AVRational tb = enc->time_base; + int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16); + + tb.den <<= extra_bits; + float_pts = + av_rescale_q(frame->pts, filter_tb, tb) - + av_rescale_q(start_time, AV_TIME_BASE_Q, tb); + float_pts /= 1 << extra_bits; + // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers + float_pts += FFSIGN(float_pts) * 1.0 / (1<<17); + + frame->pts = + av_rescale_q(frame->pts, filter_tb, enc->time_base) - + av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base); + } + +early_exit: + + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n", + frame ? av_ts2str(frame->pts) : "NULL", + frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL", + float_pts, + enc ? enc->time_base.num : -1, + enc ? enc->time_base.den : -1); + } + + return float_pts; +} + +static int init_output_stream(OutputStream *ost, AVFrame *frame, + char *error, int error_len); + +static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, + unsigned int fatal) +{ + int ret = AVERROR_BUG; + char error[1024] = {0}; + + if (ost->initialized) + return 0; + + ret = init_output_stream(ost, frame, error, sizeof(error)); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", + ost->file_index, ost->index, error); + + if (fatal) + exit_program(1); + } + + return ret; +} + static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame) { AVCodecContext *enc = ost->enc_ctx; - AVPacket pkt; + AVPacket *pkt = ost->pkt; int ret; - av_init_packet(&pkt); - pkt.data = NULL; - pkt.size = 0; + adjust_frame_pts_to_encoder_tb(of, ost, frame); if (!check_recording_time(ost)) return; @@ -916,7 +1019,6 @@ static void do_audio_out(OutputFile *of, OutputStream *ost, ost->samples_encoded += frame->nb_samples; ost->frames_encoded++; - av_assert0(pkt.size || !pkt.data); update_benchmark(NULL); if (debug_ts) { av_log(NULL, AV_LOG_INFO, "encoder <- type:audio " @@ -930,7 +1032,8 @@ static void do_audio_out(OutputFile *of, OutputStream *ost, goto error; while (1) { - ret = avcodec_receive_packet(enc, &pkt); + av_packet_unref(pkt); + ret = avcodec_receive_packet(enc, pkt); if (ret == AVERROR(EAGAIN)) break; if (ret < 0) @@ -938,16 +1041,16 @@ static void do_audio_out(OutputFile *of, OutputStream *ost, update_benchmark("encode_audio %d.%d", ost->file_index, ost->index); - av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase); + av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase); if (debug_ts) { av_log(NULL, AV_LOG_INFO, "encoder -> type:audio " "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n", - av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base), - av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base)); + av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base), + av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base)); } - output_packet(of, &pkt, ost, 0); + output_packet(of, pkt, ost, 0); } return; @@ -963,7 +1066,7 @@ static void do_subtitle_out(OutputFile *of, int subtitle_out_max_size = 1024 * 1024; int subtitle_out_size, nb, i; AVCodecContext *enc; - AVPacket pkt; + AVPacket *pkt = ost->pkt; int64_t pts; if (sub->pts == AV_NOPTS_VALUE) { @@ -1021,41 +1124,43 @@ static void do_subtitle_out(OutputFile *of, exit_program(1); } - av_init_packet(&pkt); - pkt.data = subtitle_out; - pkt.size = subtitle_out_size; - pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase); - pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase); + av_packet_unref(pkt); + pkt->data = subtitle_out; + pkt->size = subtitle_out_size; + pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase); + pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase); if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) { /* XXX: the pts correction is handled here. Maybe handling it in the codec would be better */ if (i == 0) - pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase); + pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase); else - pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase); + pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase); } - pkt.dts = pkt.pts; - output_packet(of, &pkt, ost, 0); + pkt->dts = pkt->pts; + output_packet(of, pkt, ost, 0); } } static void do_video_out(OutputFile *of, OutputStream *ost, - AVFrame *next_picture, - double sync_ipts) + AVFrame *next_picture) { int ret, format_video_sync; - AVPacket pkt; + AVPacket *pkt = ost->pkt; AVCodecContext *enc = ost->enc_ctx; - AVCodecParameters *mux_par = ost->st->codecpar; AVRational frame_rate; int nb_frames, nb0_frames, i; double delta, delta0; double duration = 0; + double sync_ipts = AV_NOPTS_VALUE; int frame_size = 0; InputStream *ist = NULL; AVFilterContext *filter = ost->filter->filter; + init_output_stream_wrapper(ost, next_picture, 1); + sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture); + if (ost->source_index >= 0) ist = input_streams[ost->source_index]; @@ -1188,9 +1293,6 @@ static void do_video_out(OutputFile *of, AVFrame *in_picture; int forced_keyframe = 0; double pts_time; - av_init_packet(&pkt); - pkt.data = NULL; - pkt.size = 0; if (i < nb0_frames && ost->last_frame) { in_picture = ost->last_frame; @@ -1205,18 +1307,6 @@ static void do_video_out(OutputFile *of, if (!check_recording_time(ost)) return; - if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) && - ost->top_field_first >= 0) - in_picture->top_field_first = !!ost->top_field_first; - - if (in_picture->interlaced_frame) { - if (enc->codec->id == AV_CODEC_ID_MJPEG) - mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB; - else - mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT; - } else - mux_par->field_order = AV_FIELD_PROGRESSIVE; - in_picture->quality = enc->global_quality; in_picture->pict_type = 0; @@ -1281,7 +1371,8 @@ static void do_video_out(OutputFile *of, av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC); while (1) { - ret = avcodec_receive_packet(enc, &pkt); + av_packet_unref(pkt); + ret = avcodec_receive_packet(enc, pkt); update_benchmark("encode_video %d.%d", ost->file_index, ost->index); if (ret == AVERROR(EAGAIN)) break; @@ -1291,24 +1382,24 @@ static void do_video_out(OutputFile *of, if (debug_ts) { av_log(NULL, AV_LOG_INFO, "encoder -> type:video " "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n", - av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base), - av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base)); + av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base), + av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base)); } - if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY)) - pkt.pts = ost->sync_opts; + if (pkt->pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY)) + pkt->pts = ost->sync_opts; - av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase); + av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase); if (debug_ts) { av_log(NULL, AV_LOG_INFO, "encoder -> type:video " "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n", - av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase), - av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase)); + av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->mux_timebase), + av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->mux_timebase)); } - frame_size = pkt.size; - output_packet(of, &pkt, ost, 0); + frame_size = pkt->size; + output_packet(of, pkt, ost, 0); /* if two pass, output log */ if (ost->logfile && enc->stats_out) { @@ -1389,8 +1480,6 @@ static void do_video_stats(OutputStream *ost, int frame_size) } } -static int init_output_stream(OutputStream *ost, char *error, int error_len); - static void finish_output_stream(OutputStream *ost) { OutputFile *of = output_files[ost->file_index]; @@ -1427,23 +1516,27 @@ static int reap_filters(int flush) continue; filter = ost->filter->filter; - if (!ost->initialized) { - char error[1024] = ""; - ret = init_output_stream(ost, error, sizeof(error)); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", - ost->file_index, ost->index, error); - exit_program(1); - } - } + /* + * Unlike video, with audio the audio frame size matters. + * Currently we are fully reliant on the lavfi filter chain to + * do the buffering deed for us, and thus the frame size parameter + * needs to be set accordingly. Where does one get the required + * frame size? From the initialized AVCodecContext of an audio + * encoder. Thus, if we have gotten to an audio stream, initialize + * the encoder earlier than receiving the first AVFrame. + */ + if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO) + init_output_stream_wrapper(ost, NULL, 1); + if (!ost->pkt && !(ost->pkt = av_packet_alloc())) { + return AVERROR(ENOMEM); + } if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) { return AVERROR(ENOMEM); } filtered_frame = ost->filtered_frame; while (1) { - double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision ret = av_buffersink_get_frame_flags(filter, filtered_frame, AV_BUFFERSINK_FLAG_NO_REQUEST); if (ret < 0) { @@ -1452,7 +1545,7 @@ static int reap_filters(int flush) "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret)); } else if (flush && ret == AVERROR_EOF) { if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO) - do_video_out(of, ost, NULL, AV_NOPTS_VALUE); + do_video_out(of, ost, NULL); } break; } @@ -1460,38 +1553,13 @@ static int reap_filters(int flush) av_frame_unref(filtered_frame); continue; } - if (filtered_frame->pts != AV_NOPTS_VALUE) { - int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; - AVRational filter_tb = av_buffersink_get_time_base(filter); - AVRational tb = enc->time_base; - int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16); - - tb.den <<= extra_bits; - float_pts = - av_rescale_q(filtered_frame->pts, filter_tb, tb) - - av_rescale_q(start_time, AV_TIME_BASE_Q, tb); - float_pts /= 1 << extra_bits; - // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers - float_pts += FFSIGN(float_pts) * 1.0 / (1<<17); - - filtered_frame->pts = - av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) - - av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base); - } switch (av_buffersink_get_type(filter)) { case AVMEDIA_TYPE_VIDEO: if (!ost->frame_aspect_ratio.num) enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio; - if (debug_ts) { - av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n", - av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base), - float_pts, - enc->time_base.num, enc->time_base.den); - } - - do_video_out(of, ost, filtered_frame, float_pts); + do_video_out(of, ost, filtered_frame); break; case AVMEDIA_TYPE_AUDIO: if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) && @@ -1643,6 +1711,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti double speed; int64_t pts = INT64_MIN + 1; static int64_t last_time = -1; + static int first_report = 1; static int qp_histogram[52]; int hours, mins, secs, us; const char *hours_sign; @@ -1655,9 +1724,9 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti if (!is_last_report) { if (last_time == -1) { last_time = cur_time; - return; } - if ((cur_time - last_time) < 500000) + if (((cur_time - last_time) < stats_period && !first_report) || + (first_report && nb_output_dumped < nb_output_files)) return; last_time = cur_time; } @@ -1740,9 +1809,17 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti vid = 1; } /* compute min output value */ - if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) + if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) { pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st), ost->st->time_base, AV_TIME_BASE_Q)); + if (copy_ts) { + if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1) + copy_ts_first_pts = pts; + if (copy_ts_first_pts != AV_NOPTS_VALUE) + pts -= copy_ts_first_pts; + } + } + if (is_last_report) nb_frames_drop += ost->last_dropped; } @@ -1826,6 +1903,8 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti } } + first_report = 0; + if (is_last_report) print_final_stats(total_size); } @@ -1859,7 +1938,6 @@ static void flush_encoders(void) // Maybe we should just let encoding fail instead. if (!ost->initialized) { FilterGraph *fg = ost->filter->graph; - char error[1024] = ""; av_log(NULL, AV_LOG_WARNING, "Finishing stream %d:%d without any data written to it.\n", @@ -1885,12 +1963,7 @@ static void flush_encoders(void) finish_output_stream(ost); } - ret = init_output_stream(ost, error, sizeof(error)); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", - ost->file_index, ost->index, error); - exit_program(1); - } + init_output_stream_wrapper(ost, NULL, 1); } if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO) @@ -1898,7 +1971,7 @@ static void flush_encoders(void) for (;;) { const char *desc = NULL; - AVPacket pkt; + AVPacket *pkt = ost->pkt; int pkt_size; switch (enc->codec_type) { @@ -1912,13 +1985,10 @@ static void flush_encoders(void) av_assert0(0); } - av_init_packet(&pkt); - pkt.data = NULL; - pkt.size = 0; - update_benchmark(NULL); - while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) { + av_packet_unref(pkt); + while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) { ret = avcodec_send_frame(enc, NULL); if (ret < 0) { av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n", @@ -1939,16 +2009,16 @@ static void flush_encoders(void) fprintf(ost->logfile, "%s", enc->stats_out); } if (ret == AVERROR_EOF) { - output_packet(of, &pkt, ost, 1); + output_packet(of, pkt, ost, 1); break; } if (ost->finished & MUXER_FINISHED) { - av_packet_unref(&pkt); + av_packet_unref(pkt); continue; } - av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase); - pkt_size = pkt.size; - output_packet(of, &pkt, ost, 0); + av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase); + pkt_size = pkt->size; + output_packet(of, pkt, ost, 0); if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) { do_video_stats(ost, pkt_size); } @@ -1982,14 +2052,12 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p InputFile *f = input_files [ist->file_index]; int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase); - AVPacket opkt; + AVPacket *opkt = ost->pkt; + av_packet_unref(opkt); // EOF: flush output bitstream filters. if (!pkt) { - av_init_packet(&opkt); - opkt.data = NULL; - opkt.size = 0; - output_packet(of, &opkt, ost, 1); + output_packet(of, opkt, ost, 1); return; } @@ -2027,30 +2095,30 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) ost->sync_opts++; - if (av_packet_ref(&opkt, pkt) < 0) + if (av_packet_ref(opkt, pkt) < 0) exit_program(1); if (pkt->pts != AV_NOPTS_VALUE) - opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time; + opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time; if (pkt->dts == AV_NOPTS_VALUE) { - opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase); + opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase); } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size); if(!duration) duration = ist->dec_ctx->frame_size; - opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts, + opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts, (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last, ost->mux_timebase); /* dts will be set immediately afterwards to what pts is now */ - opkt.pts = opkt.dts - ost_tb_start_time; + opkt->pts = opkt->dts - ost_tb_start_time; } else - opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); - opkt.dts -= ost_tb_start_time; + opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); + opkt->dts -= ost_tb_start_time; - opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase); + opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase); - output_packet(of, &opkt, ost, 0); + output_packet(of, opkt, ost, 0); } int guess_input_channel_layout(InputStream *ist) @@ -2325,7 +2393,6 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ int i, ret = 0, err = 0; int64_t best_effort_timestamp; int64_t dts = AV_NOPTS_VALUE; - AVPacket avpkt; // With fate-indeo3-2, we're getting 0-sized packets before EOF for some // reason. This seems like a semi-critical bug. Don't trigger EOF, and @@ -2341,8 +2408,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ if (ist->dts != AV_NOPTS_VALUE) dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base); if (pkt) { - avpkt = *pkt; - avpkt.dts = dts; // ffmpeg.c probably shouldn't do this + pkt->dts = dts; // ffmpeg.c probably shouldn't do this } // The old code used to set dts on the drain packet, which does not work @@ -2356,7 +2422,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ } update_benchmark(NULL); - ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL); + ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt); update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index); if (ret < 0) *decode_failed = 1; @@ -2370,7 +2436,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ av_log(ist->dec_ctx, AV_LOG_WARNING, "video_delay is larger in decoder than demuxer %d > %d.\n" "If you want to help, upload a sample " - "of this file to ftp://upload.ffmpeg.org/incoming/ " + "of this file to https://streams.videolan.org/upload/ " "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n", ist->dec_ctx->has_b_frames, ist->st->codecpar->video_delay); @@ -2515,6 +2581,8 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, for (i = 0; i < nb_output_streams; i++) { OutputStream *ost = output_streams[i]; + if (!ost->pkt && !(ost->pkt = av_packet_alloc())) + exit_program(1); if (!check_output_constraints(ist, ost) || !ost->encoding_needed || ost->enc->type != AVMEDIA_TYPE_SUBTITLE) continue; @@ -2550,7 +2618,12 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo int repeating = 0; int eof_reached = 0; - AVPacket avpkt; + AVPacket *avpkt; + + if (!ist->pkt && !(ist->pkt = av_packet_alloc())) + return AVERROR(ENOMEM); + avpkt = ist->pkt; + if (!ist->saw_first_ts) { ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0; ist->pts = 0; @@ -2566,13 +2639,11 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo if (ist->next_pts == AV_NOPTS_VALUE) ist->next_pts = ist->pts; - if (!pkt) { - /* EOF handling */ - av_init_packet(&avpkt); - avpkt.data = NULL; - avpkt.size = 0; - } else { - avpkt = *pkt; + if (pkt) { + av_packet_unref(avpkt); + ret = av_packet_ref(avpkt, pkt); + if (ret < 0) + return ret; } if (pkt && pkt->dts != AV_NOPTS_VALUE) { @@ -2593,11 +2664,12 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo switch (ist->dec_ctx->codec_type) { case AVMEDIA_TYPE_AUDIO: - ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output, + ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output, &decode_failed); + av_packet_unref(avpkt); break; case AVMEDIA_TYPE_VIDEO: - ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt, + ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt, &decode_failed); if (!repeating || !pkt || got_output) { if (pkt && pkt->duration) { @@ -2622,13 +2694,15 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo ist->next_pts += duration_dts; } } + av_packet_unref(avpkt); break; case AVMEDIA_TYPE_SUBTITLE: if (repeating) break; - ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed); + ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed); if (!pkt && ret >= 0) ret = AVERROR_EOF; + av_packet_unref(avpkt); break; default: return -1; @@ -2717,6 +2791,8 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo for (i = 0; i < nb_output_streams; i++) { OutputStream *ost = output_streams[i]; + if (!ost->pkt && !(ost->pkt = av_packet_alloc())) + exit_program(1); if (!check_output_constraints(ist, ost) || ost->encoding_needed) continue; @@ -2873,7 +2949,7 @@ static int init_input_stream(int ist_index, char *error, int error_len) InputStream *ist = input_streams[ist_index]; if (ist->decoding_needed) { - AVCodec *codec = ist->dec; + const AVCodec *codec = ist->dec; if (!codec) { snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d", avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index); @@ -2883,9 +2959,10 @@ static int init_input_stream(int ist_index, char *error, int error_len) ist->dec_ctx->opaque = ist; ist->dec_ctx->get_format = get_format; ist->dec_ctx->get_buffer2 = get_buffer; +#if LIBAVCODEC_VERSION_MAJOR < 60 ist->dec_ctx->thread_safe_callbacks = 1; +#endif - av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0); if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE && (ist->decoding_needed & DECODING_FOR_OST)) { av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE); @@ -2969,6 +3046,7 @@ static int check_init_output_file(OutputFile *of, int file_index) of->header_written = 1; av_dump_format(of->ctx, file_index, of->ctx->url, 1); + nb_output_dumped++; if (sdp_filename || want_sdp) print_sdp(); @@ -2982,9 +3060,11 @@ static int check_init_output_file(OutputFile *of, int file_index) ost->mux_timebase = ost->st->time_base; while (av_fifo_size(ost->muxing_queue)) { - AVPacket pkt; + AVPacket *pkt; av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL); - write_packet(of, &pkt, ost, 1); + ost->muxing_queue_data_size -= pkt->size; + write_packet(of, pkt, ost, 1); + av_packet_free(&pkt); } } @@ -3064,15 +3144,23 @@ static int init_output_stream_streamcopy(OutputStream *ost) if (!ost->frame_rate.num) ost->frame_rate = ist->framerate; - ost->st->avg_frame_rate = ost->frame_rate; + + if (ost->frame_rate.num) + ost->st->avg_frame_rate = ost->frame_rate; + else + ost->st->avg_frame_rate = ist->st->avg_frame_rate; ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb); if (ret < 0) return ret; // copy timebase while removing common factors - if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) - ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1}); + if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) { + if (ost->frame_rate.num) + ost->st->time_base = av_inv_q(ost->frame_rate); + else + ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1}); + } // copy estimated duration as a hint to the muxer if (ost->st->duration <= 0 && ist->st->duration > 0) @@ -3262,7 +3350,7 @@ static void init_encoder_time_base(OutputStream *ost, AVRational default_time_ba enc_ctx->time_base = default_time_base; } -static int init_output_stream_encode(OutputStream *ost) +static int init_output_stream_encode(OutputStream *ost, AVFrame *frame) { InputStream *ist = get_input_stream(ost); AVCodecContext *enc_ctx = ost->enc_ctx; @@ -3302,7 +3390,7 @@ static int init_output_stream_encode(OutputStream *ost) ost->frame_rate = ist->framerate; if (ist && !ost->frame_rate.num) ost->frame_rate = ist->st->r_frame_rate; - if (ist && !ost->frame_rate.num) { + if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) { ost->frame_rate = (AVRational){25, 1}; av_log(NULL, AV_LOG_WARNING, "No information " @@ -3312,6 +3400,11 @@ static int init_output_stream_encode(OutputStream *ost) ost->file_index, ost->index); } + if (ost->max_frame_rate.num && + (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) || + !ost->frame_rate.den)) + ost->frame_rate = ost->max_frame_rate; + if (ost->enc->supported_framerates && !ost->force_fps) { int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates); ost->frame_rate = ost->enc->supported_framerates[idx]; @@ -3359,6 +3452,14 @@ static int init_output_stream_encode(OutputStream *ost) enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample, av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth); + if (frame) { + enc_ctx->color_range = frame->color_range; + enc_ctx->color_primaries = frame->color_primaries; + enc_ctx->color_trc = frame->color_trc; + enc_ctx->colorspace = frame->colorspace; + enc_ctx->chroma_sample_location = frame->chroma_location; + } + enc_ctx->framerate = ost->frame_rate; ost->st->avg_frame_rate = ost->frame_rate; @@ -3376,6 +3477,20 @@ static int init_output_stream_encode(OutputStream *ost) enc_ctx->field_order = AV_FIELD_TT; } + if (frame) { + if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) && + ost->top_field_first >= 0) + frame->top_field_first = !!ost->top_field_first; + + if (frame->interlaced_frame) { + if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG) + enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB; + else + enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT; + } else + enc_ctx->field_order = AV_FIELD_PROGRESSIVE; + } + if (ost->forced_keyframes) { if (!strncmp(ost->forced_keyframes, "expr:", 5)) { ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5, @@ -3416,16 +3531,17 @@ static int init_output_stream_encode(OutputStream *ost) return 0; } -static int init_output_stream(OutputStream *ost, char *error, int error_len) +static int init_output_stream(OutputStream *ost, AVFrame *frame, + char *error, int error_len) { int ret = 0; if (ost->encoding_needed) { - AVCodec *codec = ost->enc; + const AVCodec *codec = ost->enc; AVCodecContext *dec = NULL; InputStream *ist; - ret = init_output_stream_encode(ost); + ret = init_output_stream_encode(ost, frame); if (ret < 0) return ret; @@ -3498,12 +3614,6 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) "Error initializing the output stream codec context.\n"); exit_program(1); } - /* - * FIXME: ost->st->codec should't be needed here anymore. - */ - ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx); - if (ret < 0) - return ret; if (ost->enc_ctx->nb_coded_side_data) { int i; @@ -3548,8 +3658,6 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) // copy estimated duration as a hint to the muxer if (ost->st->duration <= 0 && ist && ist->st->duration > 0) ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base); - - ost->st->codec->codec= ost->enc_ctx->codec; } else if (ost->stream_copy) { ret = init_output_stream_streamcopy(ost); if (ret < 0) @@ -3662,13 +3770,22 @@ static int transcode_init(void) goto dump_format; } - /* open each encoder */ + /* + * initialize stream copy and subtitle/data streams. + * Encoded AVFrame based streams will get initialized as follows: + * - when the first AVFrame is received in do_video_out + * - just before the first AVFrame is received in either transcode_step + * or reap_filters due to us requiring the filter chain buffer sink + * to be configured with the correct audio frame size, which is only + * known after the encoder is initialized. + */ for (i = 0; i < nb_output_streams; i++) { - // skip streams fed from filtergraphs until we have a frame for them - if (output_streams[i]->filter) + if (!output_streams[i]->stream_copy && + (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO || + output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) continue; - ret = init_output_stream(output_streams[i], error, sizeof(error)); + ret = init_output_stream_wrapper(output_streams[i], NULL, 0); if (ret < 0) goto dump_format; } @@ -3932,13 +4049,9 @@ static int check_keyboard_interaction(int64_t cur_time) if (key == 'd' || key == 'D'){ int debug=0; if(key == 'D') { - debug = input_streams[0]->st->codec->debug<<1; + debug = input_streams[0]->dec_ctx->debug << 1; if(!debug) debug = 1; - while(debug & (FF_DEBUG_DCT_COEFF -#if FF_API_DEBUG_MV - |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE -#endif - )) //unsupported, would just crash + while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash debug += debug; }else{ char buf[32]; @@ -3955,7 +4068,7 @@ static int check_keyboard_interaction(int64_t cur_time) fprintf(stderr,"error parsing debug value\n"); } for(i=0;ist->codec->debug = debug; + input_streams[i]->dec_ctx->debug = debug; } for(i=0;ipkt, *queue_pkt; unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0; int ret = 0; while (1) { - AVPacket pkt; - ret = av_read_frame(f->ctx, &pkt); + ret = av_read_frame(f->ctx, pkt); if (ret == AVERROR(EAGAIN)) { av_usleep(10000); @@ -3999,10 +4112,17 @@ static void *input_thread(void *arg) av_thread_message_queue_set_err_recv(f->in_thread_queue, ret); break; } - ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags); + queue_pkt = av_packet_alloc(); + if (!queue_pkt) { + av_packet_unref(pkt); + av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM)); + break; + } + av_packet_move_ref(queue_pkt, pkt); + ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags); if (flags && ret == AVERROR(EAGAIN)) { flags = 0; - ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags); + ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags); av_log(f->ctx, AV_LOG_WARNING, "Thread message queue blocking; consider raising the " "thread_queue_size option (current value: %d)\n", @@ -4013,7 +4133,7 @@ static void *input_thread(void *arg) av_log(f->ctx, AV_LOG_ERROR, "Unable to send packet to main thread: %s\n", av_err2str(ret)); - av_packet_unref(&pkt); + av_packet_free(&queue_pkt); av_thread_message_queue_set_err_recv(f->in_thread_queue, ret); break; } @@ -4025,13 +4145,13 @@ static void *input_thread(void *arg) static void free_input_thread(int i) { InputFile *f = input_files[i]; - AVPacket pkt; + AVPacket *pkt; if (!f || !f->in_thread_queue) return; av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF); while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0) - av_packet_unref(&pkt); + av_packet_free(&pkt); pthread_join(f->thread, NULL); f->joined = 1; @@ -4051,14 +4171,16 @@ static int init_input_thread(int i) int ret; InputFile *f = input_files[i]; - if (nb_input_files == 1) + if (f->thread_queue_size < 0) + f->thread_queue_size = (nb_input_files > 1 ? 8 : 0); + if (!f->thread_queue_size) return 0; if (f->ctx->pb ? !f->ctx->pb->seekable : strcmp(f->ctx->iformat->name, "lavfi")) f->non_blocking = 1; ret = av_thread_message_queue_alloc(&f->in_thread_queue, - f->thread_queue_size, sizeof(AVPacket)); + f->thread_queue_size, sizeof(f->pkt)); if (ret < 0) return ret; @@ -4083,7 +4205,7 @@ static int init_input_threads(void) return 0; } -static int get_input_packet_mt(InputFile *f, AVPacket *pkt) +static int get_input_packet_mt(InputFile *f, AVPacket **pkt) { return av_thread_message_queue_recv(f->in_thread_queue, pkt, f->non_blocking ? @@ -4091,7 +4213,7 @@ static int get_input_packet_mt(InputFile *f, AVPacket *pkt) } #endif -static int get_input_packet(InputFile *f, AVPacket *pkt) +static int get_input_packet(InputFile *f, AVPacket **pkt) { if (f->rate_emu) { int i; @@ -4105,10 +4227,11 @@ static int get_input_packet(InputFile *f, AVPacket *pkt) } #if HAVE_THREADS - if (nb_input_files > 1) + if (f->thread_queue_size) return get_input_packet_mt(f, pkt); #endif - return av_read_frame(f->ctx, pkt); + *pkt = f->pkt; + return av_read_frame(f->ctx, *pkt); } static int got_eagain(void) @@ -4220,7 +4343,7 @@ static int process_input(int file_index) InputFile *ifile = input_files[file_index]; AVFormatContext *is; InputStream *ist; - AVPacket pkt; + AVPacket *pkt; int ret, thread_ret, i, j; int64_t duration; int64_t pkt_dts; @@ -4295,27 +4418,27 @@ static int process_input(int file_index) reset_eagain(); if (do_pkt_dump) { - av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump, - is->streams[pkt.stream_index]); + av_pkt_dump_log2(NULL, AV_LOG_INFO, pkt, do_hex_dump, + is->streams[pkt->stream_index]); } /* the following test is needed in case new streams appear dynamically in stream : we ignore them */ - if (pkt.stream_index >= ifile->nb_streams) { - report_new_stream(file_index, &pkt); + if (pkt->stream_index >= ifile->nb_streams) { + report_new_stream(file_index, pkt); goto discard_packet; } - ist = input_streams[ifile->ist_index + pkt.stream_index]; + ist = input_streams[ifile->ist_index + pkt->stream_index]; - ist->data_size += pkt.size; + ist->data_size += pkt->size; ist->nb_packets++; if (ist->discard) goto discard_packet; - if (pkt.flags & AV_PKT_FLAG_CORRUPT) { + if (pkt->flags & AV_PKT_FLAG_CORRUPT) { av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING, - "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index); + "%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index); if (exit_on_error) exit_program(1); } @@ -4323,11 +4446,11 @@ static int process_input(int file_index) if (debug_ts) { av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s " "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n", - ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type), + ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type), av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q), av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q), - av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base), - av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base), + av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base), + av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base), av_ts2str(input_files[ist->file_index]->ts_offset), av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q)); } @@ -4357,12 +4480,12 @@ static int process_input(int file_index) stime2= stime + (1ULL<st->pts_wrap_bits); ist->wrap_correction_done = 1; - if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) { - pkt.dts -= 1ULL<st->pts_wrap_bits; + if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) { + pkt->dts -= 1ULL<st->pts_wrap_bits; ist->wrap_correction_done = 0; } - if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) { - pkt.pts -= 1ULL<st->pts_wrap_bits; + if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) { + pkt->pts -= 1ULL<st->pts_wrap_bits; ist->wrap_correction_done = 0; } } @@ -4376,10 +4499,10 @@ static int process_input(int file_index) if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX) continue; - if (av_packet_get_side_data(&pkt, src_sd->type, NULL)) + if (av_packet_get_side_data(pkt, src_sd->type, NULL)) continue; - dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size); + dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size); if (!dst_data) exit_program(1); @@ -4387,17 +4510,17 @@ static int process_input(int file_index) } } - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base); - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base); + if (pkt->dts != AV_NOPTS_VALUE) + pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base); + if (pkt->pts != AV_NOPTS_VALUE) + pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base); - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts *= ist->ts_scale; - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts *= ist->ts_scale; + if (pkt->pts != AV_NOPTS_VALUE) + pkt->pts *= ist->ts_scale; + if (pkt->dts != AV_NOPTS_VALUE) + pkt->dts *= ist->ts_scale; - pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) && pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts @@ -4409,27 +4532,27 @@ static int process_input(int file_index) av_log(NULL, AV_LOG_DEBUG, "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", delta, ifile->ts_offset); - pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); + pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); + if (pkt->pts != AV_NOPTS_VALUE) + pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); } } duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base); - if (pkt.pts != AV_NOPTS_VALUE) { - pkt.pts += duration; - ist->max_pts = FFMAX(pkt.pts, ist->max_pts); - ist->min_pts = FFMIN(pkt.pts, ist->min_pts); + if (pkt->pts != AV_NOPTS_VALUE) { + pkt->pts += duration; + ist->max_pts = FFMAX(pkt->pts, ist->max_pts); + ist->min_pts = FFMIN(pkt->pts, ist->min_pts); } - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts += duration; + if (pkt->dts != AV_NOPTS_VALUE) + pkt->dts += duration; - pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) { - int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<st->pts_wrap_bits), + int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<st->pts_wrap_bits), ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10) @@ -4452,46 +4575,51 @@ static int process_input(int file_index) ist->file_index, ist->st->index, ist->st->id, av_get_media_type_string(ist->dec_ctx->codec_type), delta, ifile->ts_offset); - pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); + pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); + if (pkt->pts != AV_NOPTS_VALUE) + pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); } } else { if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE || delta > 1LL*dts_error_threshold*AV_TIME_BASE) { - av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index); - pkt.dts = AV_NOPTS_VALUE; + av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index); + pkt->dts = AV_NOPTS_VALUE; } - if (pkt.pts != AV_NOPTS_VALUE){ - int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q); + if (pkt->pts != AV_NOPTS_VALUE){ + int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q); delta = pkt_pts - ist->next_dts; if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE || delta > 1LL*dts_error_threshold*AV_TIME_BASE) { - av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index); - pkt.pts = AV_NOPTS_VALUE; + av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index); + pkt->pts = AV_NOPTS_VALUE; } } } } - if (pkt.dts != AV_NOPTS_VALUE) - ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); + if (pkt->dts != AV_NOPTS_VALUE) + ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); if (debug_ts) { av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n", - ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type), - av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base), - av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base), + ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type), + av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base), + av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base), av_ts2str(input_files[ist->file_index]->ts_offset), av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q)); } - sub2video_heartbeat(ist, pkt.pts); + sub2video_heartbeat(ist, pkt->pts); - process_input_packet(ist, &pkt, 0); + process_input_packet(ist, pkt, 0); discard_packet: - av_packet_unref(&pkt); +#if HAVE_THREADS + if (ifile->thread_queue_size) + av_packet_free(&pkt); + else +#endif + av_packet_unref(pkt); return 0; } @@ -4577,15 +4705,30 @@ static int transcode_step(void) } if (ost->filter && ost->filter->graph->graph) { - if (!ost->initialized) { - char error[1024] = {0}; - ret = init_output_stream(ost, error, sizeof(error)); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", - ost->file_index, ost->index, error); - exit_program(1); - } - } + /* + * Similar case to the early audio initialization in reap_filters. + * Audio is special in ffmpeg.c currently as we depend on lavfi's + * audio frame buffering/creation to get the output audio frame size + * in samples correct. The audio frame size for the filter chain is + * configured during the output stream initialization. + * + * Apparently avfilter_graph_request_oldest (called in + * transcode_from_filter just down the line) peeks. Peeking already + * puts one frame "ready to be given out", which means that any + * update in filter buffer sink configuration afterwards will not + * help us. And yes, even if it would be utilized, + * av_buffersink_get_samples is affected, as it internally utilizes + * the same early exit for peeked frames. + * + * In other words, if avfilter_graph_request_oldest would not make + * further filter chain configuration or usage of + * av_buffersink_get_samples useless (by just causing the return + * of the peeked AVFrame as-is), we could get rid of this additional + * early encoder initialization. + */ + if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO) + init_output_stream_wrapper(ost, NULL, 1); + if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0) return ret; if (!ist) @@ -4713,6 +4856,10 @@ static int transcode(void) av_freep(&ost->enc_ctx->stats_in); } total_packets_written += ost->packets_written; + if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) { + av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i); + exit_program(1); + } } if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {