X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=fftools%2Fffmpeg.c;h=84306818a26ab4b94e07f1e7cd02a81742c1940a;hb=6918e240d706f7390272976d8b8d502afe426a18;hp=544f1a1cef334df9a2c0507dc40ac587b41787d6;hpb=5ab44ff20cdc0e05adecbd0cd352d25fcb930094;p=ffmpeg diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c index 544f1a1cef3..84306818a26 100644 --- a/fftools/ffmpeg.c +++ b/fftools/ffmpeg.c @@ -182,7 +182,7 @@ static int sub2video_get_blank_frame(InputStream *ist) ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w; ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h; ist->sub2video.frame->format = AV_PIX_FMT_RGB32; - if ((ret = av_frame_get_buffer(frame, 32)) < 0) + if ((ret = av_frame_get_buffer(frame, 0)) < 0) return ret; memset(frame->data[0], 0, frame->height * frame->linesize[0]); return 0; @@ -237,7 +237,7 @@ static void sub2video_push_ref(InputStream *ist, int64_t pts) } } -void sub2video_update(InputStream *ist, AVSubtitle *sub) +void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub) { AVFrame *frame = ist->sub2video.frame; int8_t *dst; @@ -254,7 +254,12 @@ void sub2video_update(InputStream *ist, AVSubtitle *sub) AV_TIME_BASE_Q, ist->st->time_base); num_rects = sub->num_rects; } else { - pts = ist->sub2video.end_pts; + /* If we are initializing the system, utilize current heartbeat + PTS as the start time, and show until the following subpicture + is received. Otherwise, utilize the previous subpicture's end time + as the fall-back value. */ + pts = ist->sub2video.initialize ? + heartbeat_pts : ist->sub2video.end_pts; end_pts = INT64_MAX; num_rects = 0; } @@ -269,6 +274,7 @@ void sub2video_update(InputStream *ist, AVSubtitle *sub) sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]); sub2video_push_ref(ist, pts); ist->sub2video.end_pts = end_pts; + ist->sub2video.initialize = 0; } static void sub2video_heartbeat(InputStream *ist, int64_t pts) @@ -291,9 +297,11 @@ static void sub2video_heartbeat(InputStream *ist, int64_t pts) /* do not send the heartbeat frame if the subtitle is already ahead */ if (pts2 <= ist2->sub2video.last_pts) continue; - if (pts2 >= ist2->sub2video.end_pts || - (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX)) - sub2video_update(ist2, NULL); + if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize) + /* if we have hit the end of the current displayed subpicture, + or if we need to initialize the system, update the + overlayed subpicture and its start/end times */ + sub2video_update(ist2, pts2 + 1, NULL); for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++) nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter); if (nb_reqs) @@ -307,7 +315,7 @@ static void sub2video_flush(InputStream *ist) int ret; if (ist->sub2video.end_pts < INT64_MAX) - sub2video_update(ist, NULL); + sub2video_update(ist, INT64_MAX, NULL); for (i = 0; i < ist->nb_filters; i++) { ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL); if (ret != AVERROR_EOF && ret < 0) @@ -493,32 +501,38 @@ static void ffmpeg_cleanup(int ret) FilterGraph *fg = filtergraphs[i]; avfilter_graph_free(&fg->graph); for (j = 0; j < fg->nb_inputs; j++) { - while (av_fifo_size(fg->inputs[j]->frame_queue)) { + InputFilter *ifilter = fg->inputs[j]; + struct InputStream *ist = ifilter->ist; + + while (av_fifo_size(ifilter->frame_queue)) { AVFrame *frame; - av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame, + av_fifo_generic_read(ifilter->frame_queue, &frame, sizeof(frame), NULL); av_frame_free(&frame); } - av_fifo_freep(&fg->inputs[j]->frame_queue); - if (fg->inputs[j]->ist->sub2video.sub_queue) { - while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) { + av_fifo_freep(&ifilter->frame_queue); + if (ist->sub2video.sub_queue) { + while (av_fifo_size(ist->sub2video.sub_queue)) { AVSubtitle sub; - av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue, + av_fifo_generic_read(ist->sub2video.sub_queue, &sub, sizeof(sub), NULL); avsubtitle_free(&sub); } - av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue); + av_fifo_freep(&ist->sub2video.sub_queue); } - av_buffer_unref(&fg->inputs[j]->hw_frames_ctx); - av_freep(&fg->inputs[j]->name); + av_buffer_unref(&ifilter->hw_frames_ctx); + av_freep(&ifilter->name); av_freep(&fg->inputs[j]); } av_freep(&fg->inputs); for (j = 0; j < fg->nb_outputs; j++) { - av_freep(&fg->outputs[j]->name); - av_freep(&fg->outputs[j]->formats); - av_freep(&fg->outputs[j]->channel_layouts); - av_freep(&fg->outputs[j]->sample_rates); + OutputFilter *ofilter = fg->outputs[j]; + + avfilter_inout_free(&ofilter->out_tmp); + av_freep(&ofilter->name); + av_freep(&ofilter->formats); + av_freep(&ofilter->channel_layouts); + av_freep(&ofilter->sample_rates); av_freep(&fg->outputs[j]); } av_freep(&fg->outputs); @@ -550,9 +564,7 @@ static void ffmpeg_cleanup(int ret) if (!ost) continue; - for (j = 0; j < ost->nb_bitstream_filters; j++) - av_bsf_free(&ost->bsf_ctx[j]); - av_freep(&ost->bsf_ctx); + av_bsf_free(&ost->bsf_ctx); av_frame_free(&ost->filtered_frame); av_frame_free(&ost->last_frame); @@ -567,6 +579,7 @@ static void ffmpeg_cleanup(int ret) ost->audio_channels_mapped = 0; av_dict_free(&ost->sws_dict); + av_dict_free(&ost->swr_opts); avcodec_free_context(&ost->enc_ctx); avcodec_parameters_free(&ost->ref_par); @@ -779,6 +792,8 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT); if (pkt->dts < max) { int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG; + if (exit_on_error) + loglevel = AV_LOG_ERROR; av_log(s, loglevel, "Non-monotonous DTS in output stream " "%d:%d; previous: %"PRId64", current: %"PRId64"; ", ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts); @@ -848,40 +863,15 @@ static void output_packet(OutputFile *of, AVPacket *pkt, { int ret = 0; - /* apply the output bitstream filters, if any */ - if (ost->nb_bitstream_filters) { - int idx; - - ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt); + /* apply the output bitstream filters */ + if (ost->bsf_ctx) { + ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt); if (ret < 0) goto finish; - - eof = 0; - idx = 1; - while (idx) { - /* get a packet from the previous filter up the chain */ - ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt); - if (ret == AVERROR(EAGAIN)) { - ret = 0; - idx--; - continue; - } else if (ret == AVERROR_EOF) { - eof = 1; - } else if (ret < 0) - goto finish; - - /* send it to the next filter down the chain or to the muxer */ - if (idx < ost->nb_bitstream_filters) { - ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt); - if (ret < 0) - goto finish; - idx++; - eof = 0; - } else if (eof) - goto finish; - else - write_packet(of, pkt, ost, 0); - } + while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0) + write_packet(of, pkt, ost, 0); + if (ret == AVERROR(EAGAIN)) + ret = 0; } else if (!eof) write_packet(of, pkt, ost, 0); @@ -1079,6 +1069,7 @@ static void do_video_out(OutputFile *of, if (!ost->filters_script && !ost->filters && + (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) && next_picture && ist && lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) { @@ -1135,7 +1126,7 @@ static void do_video_out(OutputFile *of, av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0)); delta = duration; delta0 = 0; - ost->sync_opts = lrint(sync_ipts); + ost->sync_opts = llrint(sync_ipts); } case VSYNC_CFR: // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c @@ -1146,18 +1137,18 @@ static void do_video_out(OutputFile *of, else if (delta > 1.1) { nb_frames = lrintf(delta); if (delta0 > 1.1) - nb0_frames = lrintf(delta0 - 0.6); + nb0_frames = llrintf(delta0 - 0.6); } break; case VSYNC_VFR: if (delta <= -0.6) nb_frames = 0; else if (delta > 0.6) - ost->sync_opts = lrint(sync_ipts); + ost->sync_opts = llrint(sync_ipts); break; case VSYNC_DROP: case VSYNC_PASSTHROUGH: - ost->sync_opts = lrint(sync_ipts); + ost->sync_opts = llrint(sync_ipts); break; default: av_assert0(0); @@ -1264,7 +1255,8 @@ static void do_video_out(OutputFile *of, ost->forced_keyframes_expr_const_values[FKF_N] += 1; } else if ( ost->forced_keyframes && !strncmp(ost->forced_keyframes, "source", 6) - && in_picture->key_frame==1) { + && in_picture->key_frame==1 + && !i) { forced_keyframe = 1; } @@ -1902,9 +1894,6 @@ static void flush_encoders(void) } } - if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1) - continue; - if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO) continue; @@ -1994,12 +1983,13 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p InputFile *f = input_files [ist->file_index]; int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase); - AVPacket opkt = { 0 }; - - av_init_packet(&opkt); + AVPacket opkt; // EOF: flush output bitstream filters. if (!pkt) { + av_init_packet(&opkt); + opkt.data = NULL; + opkt.size = 0; output_packet(of, &opkt, ost, 1); return; } @@ -2038,40 +2028,29 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) ost->sync_opts++; + if (av_packet_ref(&opkt, pkt) < 0) + exit_program(1); + if (pkt->pts != AV_NOPTS_VALUE) opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time; - else - opkt.pts = AV_NOPTS_VALUE; - if (pkt->dts == AV_NOPTS_VALUE) + if (pkt->dts == AV_NOPTS_VALUE) { opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase); - else - opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); - opkt.dts -= ost_tb_start_time; - - if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) { + } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size); if(!duration) duration = ist->dec_ctx->frame_size; - opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts, - (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last, - ost->mux_timebase) - ost_tb_start_time; - } + opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts, + (AVRational){1, ist->dec_ctx->sample_rate}, duration, + &ist->filter_in_rescale_delta_last, ost->mux_timebase); + /* dts will be set immediately afterwards to what pts is now */ + opkt.pts = opkt.dts - ost_tb_start_time; + } else + opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); + opkt.dts -= ost_tb_start_time; opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase); - opkt.flags = pkt->flags; - - if (pkt->buf) { - opkt.buf = av_buffer_ref(pkt->buf); - if (!opkt.buf) - exit_program(1); - } - opkt.data = pkt->data; - opkt.size = pkt->size; - - av_copy_packet_side_data(&opkt, pkt); - output_packet(of, &opkt, ost, 0); } @@ -2392,7 +2371,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ av_log(ist->dec_ctx, AV_LOG_WARNING, "video_delay is larger in decoder than demuxer %d > %d.\n" "If you want to help, upload a sample " - "of this file to ftp://upload.ffmpeg.org/incoming/ " + "of this file to https://streams.videolan.org/upload/ " "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n", ist->dec_ctx->has_b_frames, ist->st->codecpar->video_delay); @@ -2514,7 +2493,7 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, return ret; if (ist->sub2video.frame) { - sub2video_update(ist, &subtitle); + sub2video_update(ist, INT64_MIN, &subtitle); } else if (ist->nb_filters) { if (!ist->sub2video.sub_queue) ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle)); @@ -2783,7 +2762,7 @@ static void print_sdp(void) if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) { av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename); } else { - avio_printf(sdp_pb, "SDP:\n%s", sdp); + avio_print(sdp_pb, sdp); avio_closep(&sdp_pb); av_freep(&sdp_filename); } @@ -3015,35 +2994,28 @@ static int check_init_output_file(OutputFile *of, int file_index) static int init_output_bsfs(OutputStream *ost) { - AVBSFContext *ctx; - int i, ret; + AVBSFContext *ctx = ost->bsf_ctx; + int ret; - if (!ost->nb_bitstream_filters) + if (!ctx) return 0; - for (i = 0; i < ost->nb_bitstream_filters; i++) { - ctx = ost->bsf_ctx[i]; - - ret = avcodec_parameters_copy(ctx->par_in, - i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar); - if (ret < 0) - return ret; + ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar); + if (ret < 0) + return ret; - ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base; + ctx->time_base_in = ost->st->time_base; - ret = av_bsf_init(ctx); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n", - ost->bsf_ctx[i]->filter->name); - return ret; - } + ret = av_bsf_init(ctx); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n", + ctx->filter->name); + return ret; } - ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1]; ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out); if (ret < 0) return ret; - ost->st->time_base = ctx->time_base_out; return 0; @@ -3375,10 +3347,6 @@ static int init_output_stream_encode(OutputStream *ost) av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n" "Please consider specifying a lower framerate, a different muxer or -vsync 2\n"); } - for (j = 0; j < ost->forced_kf_count; j++) - ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j], - AV_TIME_BASE_Q, - enc_ctx->time_base); enc_ctx->width = av_buffersink_get_w(ost->filter->filter); enc_ctx->height = av_buffersink_get_h(ost->filter->filter); @@ -3480,21 +3448,14 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) !av_dict_get(ost->encoder_opts, "ab", NULL, 0)) av_dict_set(&ost->encoder_opts, "b", "128000", 0); - if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) && - ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format == - av_buffersink_get_format(ost->filter->filter)) { - ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter)); - if (!ost->enc_ctx->hw_frames_ctx) - return AVERROR(ENOMEM); - } else { - ret = hw_device_setup_for_encode(ost); - if (ret < 0) { - snprintf(error, error_len, "Device setup failed for " - "encoder on output stream #%d:%d : %s", + ret = hw_device_setup_for_encode(ost); + if (ret < 0) { + snprintf(error, error_len, "Device setup failed for " + "encoder on output stream #%d:%d : %s", ost->file_index, ost->index, av_err2str(ret)); - return ret; - } + return ret; } + if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) { int input_props = 0, output_props = 0; AVCodecDescriptor const *input_descriptor = @@ -3570,12 +3531,14 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) int i; for (i = 0; i < ist->st->nb_side_data; i++) { AVPacketSideData *sd = &ist->st->side_data[i]; - uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); - if (!dst) - return AVERROR(ENOMEM); - memcpy(dst, sd->data, sd->size); - if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) - av_display_rotation_set((uint32_t *)dst, 0); + if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) { + uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); + if (!dst) + return AVERROR(ENOMEM); + memcpy(dst, sd->data, sd->size); + if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) + av_display_rotation_set((uint32_t *)dst, 0); + } } } @@ -3874,7 +3837,9 @@ static OutputStream *choose_output(void) av_rescale_q(ost->st->cur_dts, ost->st->time_base, AV_TIME_BASE_Q); if (ost->st->cur_dts == AV_NOPTS_VALUE) - av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n"); + av_log(NULL, AV_LOG_DEBUG, + "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n", + ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished); if (!ost->initialized && !ost->inputs_done) return ost; @@ -4087,7 +4052,9 @@ static int init_input_thread(int i) int ret; InputFile *f = input_files[i]; - if (nb_input_files == 1) + if (f->thread_queue_size < 0) + f->thread_queue_size = (nb_input_files > 1 ? 8 : 0); + if (!f->thread_queue_size) return 0; if (f->ctx->pb ? !f->ctx->pb->seekable : @@ -4141,7 +4108,7 @@ static int get_input_packet(InputFile *f, AVPacket *pkt) } #if HAVE_THREADS - if (nb_input_files > 1) + if (f->thread_queue_size) return get_input_packet_mt(f, pkt); #endif return av_read_frame(f->ctx, pkt); @@ -4192,7 +4159,7 @@ static int seek_to_start(InputFile *ifile, AVFormatContext *is) int i, ret, has_audio = 0; int64_t duration = 0; - ret = av_seek_frame(is, -1, is->start_time, 0); + ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0); if (ret < 0) return ret; @@ -4232,7 +4199,8 @@ static int seek_to_start(InputFile *ifile, AVFormatContext *is) ifile->time_base = ist->st->time_base; /* the total duration of the stream, max_pts - min_pts is * the duration of the stream without the last frame */ - duration += ist->max_pts - ist->min_pts; + if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration) + duration += ist->max_pts - ist->min_pts; ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base, ifile->time_base); } @@ -4259,6 +4227,7 @@ static int process_input(int file_index) int ret, thread_ret, i, j; int64_t duration; int64_t pkt_dts; + int disable_discontinuity_correction = copy_ts; is = ifile->ctx; ret = get_input_packet(ifile, &pkt); @@ -4460,10 +4429,20 @@ static int process_input(int file_index) pkt.dts += duration; pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + + if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && + (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) { + int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<st->pts_wrap_bits), + ist->st->time_base, AV_TIME_BASE_Q, + AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10) + disable_discontinuity_correction = 0; + } + if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && - !copy_ts) { + !disable_discontinuity_correction) { int64_t delta = pkt_dts - ist->next_dts; if (is->iformat->flags & AVFMT_TS_DISCONT) { if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE || @@ -4737,6 +4716,10 @@ static int transcode(void) av_freep(&ost->enc_ctx->stats_in); } total_packets_written += ost->packets_written; + if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) { + av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i); + exit_program(1); + } } if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) { @@ -4754,7 +4737,6 @@ static int transcode(void) } } - av_buffer_unref(&hw_device_ctx); hw_device_free_all(); /* finished ! */