X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=fftools%2Fffmpeg.c;h=d896b14a140f5a6790376fbe1e67219ad89aeaab;hb=67e957b43a2fdc197018968b0adc2f6a9c04e961;hp=544f1a1cef334df9a2c0507dc40ac587b41787d6;hpb=fda424b300c1a0b991296aa585691609d01196bd;p=ffmpeg diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c index 544f1a1cef3..d896b14a140 100644 --- a/fftools/ffmpeg.c +++ b/fftools/ffmpeg.c @@ -237,7 +237,7 @@ static void sub2video_push_ref(InputStream *ist, int64_t pts) } } -void sub2video_update(InputStream *ist, AVSubtitle *sub) +void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub) { AVFrame *frame = ist->sub2video.frame; int8_t *dst; @@ -254,7 +254,12 @@ void sub2video_update(InputStream *ist, AVSubtitle *sub) AV_TIME_BASE_Q, ist->st->time_base); num_rects = sub->num_rects; } else { - pts = ist->sub2video.end_pts; + /* If we are initializing the system, utilize current heartbeat + PTS as the start time, and show until the following subpicture + is received. Otherwise, utilize the previous subpicture's end time + as the fall-back value. */ + pts = ist->sub2video.initialize ? + heartbeat_pts : ist->sub2video.end_pts; end_pts = INT64_MAX; num_rects = 0; } @@ -269,6 +274,7 @@ void sub2video_update(InputStream *ist, AVSubtitle *sub) sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]); sub2video_push_ref(ist, pts); ist->sub2video.end_pts = end_pts; + ist->sub2video.initialize = 0; } static void sub2video_heartbeat(InputStream *ist, int64_t pts) @@ -291,9 +297,11 @@ static void sub2video_heartbeat(InputStream *ist, int64_t pts) /* do not send the heartbeat frame if the subtitle is already ahead */ if (pts2 <= ist2->sub2video.last_pts) continue; - if (pts2 >= ist2->sub2video.end_pts || - (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX)) - sub2video_update(ist2, NULL); + if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize) + /* if we have hit the end of the current displayed subpicture, + or if we need to initialize the system, update the + overlayed subpicture and its start/end times */ + sub2video_update(ist2, pts2 + 1, NULL); for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++) nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter); if (nb_reqs) @@ -307,7 +315,7 @@ static void sub2video_flush(InputStream *ist) int ret; if (ist->sub2video.end_pts < INT64_MAX) - sub2video_update(ist, NULL); + sub2video_update(ist, INT64_MAX, NULL); for (i = 0; i < ist->nb_filters; i++) { ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL); if (ret != AVERROR_EOF && ret < 0) @@ -567,6 +575,7 @@ static void ffmpeg_cleanup(int ret) ost->audio_channels_mapped = 0; av_dict_free(&ost->sws_dict); + av_dict_free(&ost->swr_opts); avcodec_free_context(&ost->enc_ctx); avcodec_parameters_free(&ost->ref_par); @@ -779,6 +788,8 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT); if (pkt->dts < max) { int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG; + if (exit_on_error) + loglevel = AV_LOG_ERROR; av_log(s, loglevel, "Non-monotonous DTS in output stream " "%d:%d; previous: %"PRId64", current: %"PRId64"; ", ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts); @@ -1079,6 +1090,7 @@ static void do_video_out(OutputFile *of, if (!ost->filters_script && !ost->filters && + (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) && next_picture && ist && lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) { @@ -1135,7 +1147,7 @@ static void do_video_out(OutputFile *of, av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0)); delta = duration; delta0 = 0; - ost->sync_opts = lrint(sync_ipts); + ost->sync_opts = llrint(sync_ipts); } case VSYNC_CFR: // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c @@ -1146,18 +1158,18 @@ static void do_video_out(OutputFile *of, else if (delta > 1.1) { nb_frames = lrintf(delta); if (delta0 > 1.1) - nb0_frames = lrintf(delta0 - 0.6); + nb0_frames = llrintf(delta0 - 0.6); } break; case VSYNC_VFR: if (delta <= -0.6) nb_frames = 0; else if (delta > 0.6) - ost->sync_opts = lrint(sync_ipts); + ost->sync_opts = llrint(sync_ipts); break; case VSYNC_DROP: case VSYNC_PASSTHROUGH: - ost->sync_opts = lrint(sync_ipts); + ost->sync_opts = llrint(sync_ipts); break; default: av_assert0(0); @@ -1264,7 +1276,8 @@ static void do_video_out(OutputFile *of, ost->forced_keyframes_expr_const_values[FKF_N] += 1; } else if ( ost->forced_keyframes && !strncmp(ost->forced_keyframes, "source", 6) - && in_picture->key_frame==1) { + && in_picture->key_frame==1 + && !i) { forced_keyframe = 1; } @@ -1902,9 +1915,6 @@ static void flush_encoders(void) } } - if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1) - continue; - if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO) continue; @@ -1994,12 +2004,13 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p InputFile *f = input_files [ist->file_index]; int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase); - AVPacket opkt = { 0 }; - - av_init_packet(&opkt); + AVPacket opkt; // EOF: flush output bitstream filters. if (!pkt) { + av_init_packet(&opkt); + opkt.data = NULL; + opkt.size = 0; output_packet(of, &opkt, ost, 1); return; } @@ -2038,40 +2049,29 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) ost->sync_opts++; + if (av_packet_ref(&opkt, pkt) < 0) + exit_program(1); + if (pkt->pts != AV_NOPTS_VALUE) opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time; - else - opkt.pts = AV_NOPTS_VALUE; - if (pkt->dts == AV_NOPTS_VALUE) + if (pkt->dts == AV_NOPTS_VALUE) { opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase); - else - opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); - opkt.dts -= ost_tb_start_time; - - if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) { + } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size); if(!duration) duration = ist->dec_ctx->frame_size; - opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts, - (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last, - ost->mux_timebase) - ost_tb_start_time; - } + opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts, + (AVRational){1, ist->dec_ctx->sample_rate}, duration, + &ist->filter_in_rescale_delta_last, ost->mux_timebase); + /* dts will be set immediately afterwards to what pts is now */ + opkt.pts = opkt.dts - ost_tb_start_time; + } else + opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); + opkt.dts -= ost_tb_start_time; opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase); - opkt.flags = pkt->flags; - - if (pkt->buf) { - opkt.buf = av_buffer_ref(pkt->buf); - if (!opkt.buf) - exit_program(1); - } - opkt.data = pkt->data; - opkt.size = pkt->size; - - av_copy_packet_side_data(&opkt, pkt); - output_packet(of, &opkt, ost, 0); } @@ -2514,7 +2514,7 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, return ret; if (ist->sub2video.frame) { - sub2video_update(ist, &subtitle); + sub2video_update(ist, INT64_MIN, &subtitle); } else if (ist->nb_filters) { if (!ist->sub2video.sub_queue) ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle)); @@ -2783,7 +2783,7 @@ static void print_sdp(void) if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) { av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename); } else { - avio_printf(sdp_pb, "SDP:\n%s", sdp); + avio_print(sdp_pb, sdp); avio_closep(&sdp_pb); av_freep(&sdp_filename); } @@ -3375,10 +3375,6 @@ static int init_output_stream_encode(OutputStream *ost) av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n" "Please consider specifying a lower framerate, a different muxer or -vsync 2\n"); } - for (j = 0; j < ost->forced_kf_count; j++) - ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j], - AV_TIME_BASE_Q, - enc_ctx->time_base); enc_ctx->width = av_buffersink_get_w(ost->filter->filter); enc_ctx->height = av_buffersink_get_h(ost->filter->filter); @@ -3570,12 +3566,14 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) int i; for (i = 0; i < ist->st->nb_side_data; i++) { AVPacketSideData *sd = &ist->st->side_data[i]; - uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); - if (!dst) - return AVERROR(ENOMEM); - memcpy(dst, sd->data, sd->size); - if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) - av_display_rotation_set((uint32_t *)dst, 0); + if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) { + uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); + if (!dst) + return AVERROR(ENOMEM); + memcpy(dst, sd->data, sd->size); + if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) + av_display_rotation_set((uint32_t *)dst, 0); + } } } @@ -3874,7 +3872,9 @@ static OutputStream *choose_output(void) av_rescale_q(ost->st->cur_dts, ost->st->time_base, AV_TIME_BASE_Q); if (ost->st->cur_dts == AV_NOPTS_VALUE) - av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n"); + av_log(NULL, AV_LOG_DEBUG, + "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n", + ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished); if (!ost->initialized && !ost->inputs_done) return ost; @@ -4192,7 +4192,7 @@ static int seek_to_start(InputFile *ifile, AVFormatContext *is) int i, ret, has_audio = 0; int64_t duration = 0; - ret = av_seek_frame(is, -1, is->start_time, 0); + ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0); if (ret < 0) return ret; @@ -4232,7 +4232,8 @@ static int seek_to_start(InputFile *ifile, AVFormatContext *is) ifile->time_base = ist->st->time_base; /* the total duration of the stream, max_pts - min_pts is * the duration of the stream without the last frame */ - duration += ist->max_pts - ist->min_pts; + if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration) + duration += ist->max_pts - ist->min_pts; ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base, ifile->time_base); } @@ -4259,6 +4260,7 @@ static int process_input(int file_index) int ret, thread_ret, i, j; int64_t duration; int64_t pkt_dts; + int disable_discontinuity_correction = copy_ts; is = ifile->ctx; ret = get_input_packet(ifile, &pkt); @@ -4460,10 +4462,20 @@ static int process_input(int file_index) pkt.dts += duration; pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + + if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && + (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) { + int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<st->pts_wrap_bits), + ist->st->time_base, AV_TIME_BASE_Q, + AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10) + disable_discontinuity_correction = 0; + } + if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && - !copy_ts) { + !disable_discontinuity_correction) { int64_t delta = pkt_dts - ist->next_dts; if (is->iformat->flags & AVFMT_TS_DISCONT) { if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||