X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=ffmpeg.c;h=aa16e05f7e087fec4e120c347855b3a04327765e;hb=e377208d43d95d024cf1af3110036366ed4379c9;hp=914bc6adb414293403f6e6e98f33720a6231eb6f;hpb=c8a11014b673ebc6946db6fcd20009d330c57c48;p=ffmpeg diff --git a/ffmpeg.c b/ffmpeg.c index 914bc6adb41..aa16e05f7e0 100644 --- a/ffmpeg.c +++ b/ffmpeg.c @@ -31,7 +31,9 @@ #include #include #include +#if HAVE_ISATTY #include +#endif #include "libavformat/avformat.h" #include "libavdevice/avdevice.h" #include "libswscale/swscale.h" @@ -51,6 +53,7 @@ #include "libavutil/imgutils.h" #include "libavutil/timestamp.h" #include "libavutil/bprint.h" +#include "libavutil/time.h" #include "libavformat/os_support.h" #include "libavformat/ffm.h" // not public API @@ -63,7 +66,6 @@ #if HAVE_SYS_RESOURCE_H #include -#include #include #elif HAVE_GETPROCESSTIMES #include @@ -85,6 +87,11 @@ #elif HAVE_KBHIT #include #endif + +#if HAVE_PTHREADS +#include +#endif + #include #include "cmdutils.h" @@ -97,8 +104,6 @@ #define VSYNC_VFR 2 #define VSYNC_DROP 0xff -#define SINKA - const char program_name[] = "ffmpeg"; const int program_birth_year = 2000; @@ -158,6 +163,7 @@ static int run_as_daemon = 0; static volatile int received_nb_signals = 0; static int64_t video_size = 0; static int64_t audio_size = 0; +static int64_t subtitle_size = 0; static int64_t extra_size = 0; static int nb_frames_dup = 0; static int nb_frames_drop = 0; @@ -170,6 +176,11 @@ static int print_stats = 1; static int debug_ts = 0; static int current_time; +#if HAVE_PTHREADS +/* signal to input threads that they should exit; set by the main thread */ +static int transcoding_finished; +#endif + #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass" typedef struct InputFilter { @@ -249,11 +260,19 @@ typedef struct InputFile { AVFormatContext *ctx; int eof_reached; /* true if eof reached */ int ist_index; /* index of first stream in input_streams */ - int buffer_size; /* current total buffer size */ int64_t ts_offset; int nb_streams; /* number of stream that ffmpeg is aware of; may be different from ctx.nb_streams if new streams appear during av_read_frame() */ int rate_emu; + +#if HAVE_PTHREADS + pthread_t thread; /* thread reading from this file */ + int finished; /* the thread has exited */ + int joined; /* the thread has been joined */ + pthread_mutex_t fifo_lock; /* lock for access to fifo */ + pthread_cond_t fifo_cond; /* the main thread will signal on this cond after reading from fifo */ + AVFifoBuffer *fifo; /* demuxed packets are stored here; freed by the main thread */ +#endif } InputFile; typedef struct OutputStream { @@ -287,6 +306,7 @@ typedef struct OutputStream { int64_t *forced_kf_pts; int forced_kf_count; int forced_kf_index; + char *forced_keyframes; /* audio only */ int audio_channels_map[SWR_CH_MAX]; /* list of the channels id to pick from the source stream */ @@ -689,7 +709,7 @@ static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost) static void init_input_filter(FilterGraph *fg, AVFilterInOut *in) { InputStream *ist = NULL; - enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type; + enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx); int i; // TODO: support other filter types @@ -740,6 +760,8 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in) exit_program(1); } } + av_assert0(ist); + ist->discard = 0; ist->decoding_needed = 1; ist->st->discard = AVDISCARD_NONE; @@ -764,17 +786,13 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterContext *last_filter = out->filter_ctx; int pad_idx = out->pad_idx; int ret; + char name[255]; AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc(); -#if FF_API_OLD_VSINK_API + snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index); ret = avfilter_graph_create_filter(&ofilter->filter, avfilter_get_by_name("buffersink"), - "ffmpeg_buffersink", NULL, NULL, fg->graph); -#else - ret = avfilter_graph_create_filter(&ofilter->filter, - avfilter_get_by_name("buffersink"), - "ffmpeg_buffersink", NULL, buffersink_params, fg->graph); -#endif + name, NULL, NULL/*buffersink_params*/, fg->graph); av_freep(&buffersink_params); if (ret < 0) @@ -788,8 +806,10 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, codec->width, codec->height, (unsigned)ost->sws_flags); + snprintf(name, sizeof(name), "scaler for output stream %d:%d", + ost->file_index, ost->index); if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"), - NULL, args, NULL, fg->graph)) < 0) + name, args, NULL, fg->graph)) < 0) return ret; if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0) return ret; @@ -800,6 +820,8 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, if ((pix_fmts = choose_pix_fmts(ost))) { AVFilterContext *filter; + snprintf(name, sizeof(name), "pixel format for output stream %d:%d", + ost->file_index, ost->index); if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("format"), "format", pix_fmts, NULL, @@ -819,8 +841,10 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num, ost->frame_rate.den); + snprintf(name, sizeof(name), "fps for output stream %d:%d", + ost->file_index, ost->index); ret = avfilter_graph_create_filter(&fps, avfilter_get_by_name("fps"), - "fps", args, NULL, fg->graph); + name, args, NULL, fg->graph); if (ret < 0) return ret; @@ -844,11 +868,14 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterContext *last_filter = out->filter_ctx; int pad_idx = out->pad_idx; char *sample_fmts, *sample_rates, *channel_layouts; + char name[255]; int ret; + + snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index); ret = avfilter_graph_create_filter(&ofilter->filter, avfilter_get_by_name("abuffersink_old"), - "ffmpeg_abuffersink_old", NULL, NULL, fg->graph); + name, NULL, NULL, fg->graph); if (ret < 0) return ret; @@ -911,9 +938,11 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, av_freep(&sample_rates); av_freep(&channel_layouts); + snprintf(name, sizeof(name), "audio format for output stream %d:%d", + ost->file_index, ost->index); ret = avfilter_graph_create_filter(&format, avfilter_get_by_name("aformat"), - "aformat", args, NULL, fg->graph); + name, args, NULL, fg->graph); if (ret < 0) return ret; @@ -925,15 +954,6 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, pad_idx = 0; } - if (audio_sync_method > 0 && 0) { - char args[256] = {0}; - - av_strlcatf(args, sizeof(args), "min_comp=0.001:min_hard_comp=%f", audio_drift_threshold); - if (audio_sync_method > 1) - av_strlcatf(args, sizeof(args), ":max_soft_comp=%d", -audio_sync_method); - AUTO_INSERT_FILTER("-async", "aresample", args); - } - if (audio_volume != 256 && 0) { char args[256]; @@ -959,7 +979,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, \ avio_printf(pb, "%s", ctx->filter->name); \ if (nb_pads > 1) \ - avio_printf(pb, ":%s", pads[inout->pad_idx].name); \ + avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));\ avio_w8(pb, 0); \ avio_close_dyn_buf(pb, &f->name); \ } @@ -969,7 +989,7 @@ static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFil av_freep(&ofilter->name); DESCRIBE_FILTER_LINK(ofilter, out, 0); - switch (out->filter_ctx->output_pads[out->pad_idx].type) { + switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) { case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out); case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out); default: av_assert0(0); @@ -985,29 +1005,43 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, AVRational tb = ist->framerate.num ? (AVRational){ist->framerate.den, ist->framerate.num} : ist->st->time_base; + AVRational fr = ist->framerate.num ? ist->framerate : + ist->st->r_frame_rate; AVRational sar; - char args[255]; + AVBPrint args; + char name[255]; int pad_idx = in->pad_idx; int ret; sar = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio : ist->st->codec->sample_aspect_ratio; - snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d:flags=%d", ist->st->codec->width, + if(!sar.den) + sar = (AVRational){0,1}; + av_bprint_init(&args, 0, 1); + av_bprintf(&args, + "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:" + "pixel_aspect=%d/%d:sws_param=flags=%d", ist->st->codec->width, ist->st->codec->height, ist->st->codec->pix_fmt, tb.num, tb.den, sar.num, sar.den, SWS_BILINEAR + ((ist->st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0)); + if (fr.num && fr.den) + av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den); + snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index, + ist->file_index, ist->st->index); - if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter, in->name, - args, NULL, fg->graph)) < 0) + if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter, name, + args.str, NULL, fg->graph)) < 0) return ret; if (ist->framerate.num) { AVFilterContext *setpts; + snprintf(name, sizeof(name), "force CFR for input from stream %d:%d", + ist->file_index, ist->st->index); if ((ret = avfilter_graph_create_filter(&setpts, avfilter_get_by_name("setpts"), - "setpts", "N", NULL, + name, "N", NULL, fg->graph)) < 0) return ret; @@ -1030,7 +1064,7 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter, AVFilter *filter = avfilter_get_by_name("abuffer"); InputStream *ist = ifilter->ist; int pad_idx = in->pad_idx; - char args[255]; + char args[255], name[255]; int ret; snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s" @@ -1039,9 +1073,11 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter, ist->st->codec->sample_rate, av_get_sample_fmt_name(ist->st->codec->sample_fmt), ist->st->codec->channel_layout); + snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index, + ist->file_index, ist->st->index); if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter, - in->name, args, NULL, + name, args, NULL, fg->graph)) < 0) return ret; @@ -1051,9 +1087,11 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter, av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \ "similarly to -af " filter_name "=%s.\n", arg); \ \ + snprintf(name, sizeof(name), "graph %d %s for input stream %d:%d", \ + fg->index, filter_name, ist->file_index, ist->st->index); \ ret = avfilter_graph_create_filter(&filt_ctx, \ avfilter_get_by_name(filter_name), \ - filter_name, arg, NULL, fg->graph); \ + name, arg, NULL, fg->graph); \ if (ret < 0) \ return ret; \ \ @@ -1104,7 +1142,7 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter, av_freep(&ifilter->name); DESCRIBE_FILTER_LINK(ifilter, in, 1); - switch (in->filter_ctx->input_pads[in->pad_idx].type) { + switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) { case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in); case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in); default: av_assert0(0); @@ -1217,10 +1255,11 @@ static void term_init(void) #if HAVE_TERMIOS_H if(!run_as_daemon){ struct termios tty; + int istty = 1; #if HAVE_ISATTY - if(isatty(0) && isatty(2)) + istty = isatty(0) && isatty(2); #endif - if (tcgetattr (0, &tty) == 0) { + if (istty && tcgetattr (0, &tty) == 0) { oldtty = tty; restore_tty = 1; atexit(term_exit); @@ -1347,6 +1386,7 @@ void av_noreturn exit_program(int ret) } output_streams[i]->bitstream_filters = NULL; + av_freep(&output_streams[i]->forced_keyframes); av_freep(&output_streams[i]->filtered_frame); av_freep(&output_streams[i]->avfilter); av_freep(&output_streams[i]); @@ -1649,6 +1689,7 @@ static void do_subtitle_out(AVFormatContext *s, pkt.data = subtitle_out; pkt.size = subtitle_out_size; pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base); + pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base); if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) { /* XXX: the pts correction is handled here. Maybe handling it in the codec would be better */ @@ -1658,6 +1699,7 @@ static void do_subtitle_out(AVFormatContext *s, pkt.pts += 90 * sub->end_display_time; } write_frame(s, &pkt, ost); + subtitle_size += pkt.size; } } @@ -1744,6 +1786,7 @@ duplicate_frame: pkt.flags |= AV_PKT_FLAG_KEY; write_frame(s, &pkt, ost); + video_size += pkt.size; } else { int got_packet; AVFrame big_picture; @@ -1891,13 +1934,11 @@ static int poll_filters(void) !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) ret = av_buffersink_read_samples(ost->filter->filter, &picref, ost->st->codec->frame_size); - else -#ifdef SINKA + else if(ost->enc->type == AVMEDIA_TYPE_AUDIO) ret = av_buffersink_read(ost->filter->filter, &picref); -#else + else ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref, AV_BUFFERSINK_FLAG_NO_REQUEST); -#endif if (ret < 0) { if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) { char buf[256]; @@ -1927,7 +1968,7 @@ static int poll_filters(void) switch (ost->filter->filter->inputs[0]->type) { case AVMEDIA_TYPE_VIDEO: - avfilter_fill_frame_from_video_buffer_ref(filtered_frame, picref); + avfilter_copy_buf_props(filtered_frame, picref); filtered_frame->pts = frame_pts; if (!ost->frame_aspect_ratio) ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio; @@ -2092,15 +2133,16 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti fflush(stderr); if (is_last_report) { - int64_t raw= audio_size + video_size + extra_size; + int64_t raw= audio_size + video_size + subtitle_size + extra_size; av_log(NULL, AV_LOG_INFO, "\n"); - av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n", + av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0f global headers:%1.0fkB muxing overhead %f%%\n", video_size / 1024.0, audio_size / 1024.0, + subtitle_size / 1024.0, extra_size / 1024.0, 100.0 * (total_size - raw) / raw ); - if(video_size + audio_size + extra_size == 0){ + if(video_size + audio_size + subtitle_size + extra_size == 0){ av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n"); } } @@ -2222,6 +2264,8 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { video_size += pkt->size; ost->sync_opts++; + } else if (ost->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { + subtitle_size += pkt->size; } if (pkt->pts != AV_NOPTS_VALUE) @@ -2269,7 +2313,7 @@ static void rate_emu_sleep(InputStream *ist) int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE); int64_t now = av_gettime() - ist->start; if (pts > now) - usleep(pts - now); + av_usleep(pts - now); } } @@ -2720,24 +2764,37 @@ static InputStream *get_input_stream(OutputStream *ost) { if (ost->source_index >= 0) return input_streams[ost->source_index]; + return NULL; +} - if (ost->filter) { - FilterGraph *fg = ost->filter->graph; - int i; +static void parse_forced_key_frames(char *kf, OutputStream *ost, + AVCodecContext *avctx) +{ + char *p; + int n = 1, i; + int64_t t; - for (i = 0; i < fg->nb_inputs; i++) - if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type) - return fg->inputs[i]->ist; + for (p = kf; *p; p++) + if (*p == ',') + n++; + ost->forced_kf_count = n; + ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n); + if (!ost->forced_kf_pts) { + av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n"); + exit_program(1); + } + for (i = 0; i < n; i++) { + p = i ? strchr(p, ',') + 1 : kf; + t = parse_time_or_die("force_key_frames", p, 1); + ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base); } - - return NULL; } static int transcode_init(void) { int ret = 0, i, j, k; AVFormatContext *oc; - AVCodecContext *codec, *icodec; + AVCodecContext *codec, *icodec = NULL; OutputStream *ost; InputStream *ist; char error[1024]; @@ -2845,6 +2902,10 @@ static int transcode_init(void) codec->time_base.num *= icodec->ticks_per_frame; } } + + if(ost->frame_rate.num) + codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num}; + av_reduce(&codec->time_base.num, &codec->time_base.den, codec->time_base.num, codec->time_base.den, INT_MAX); @@ -2905,6 +2966,8 @@ static int transcode_init(void) ost->encoding_needed = 1; if (codec->codec_type == AVMEDIA_TYPE_VIDEO) { + if (ost->filter && !ost->frame_rate.num) + ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter); if (ist && !ost->frame_rate.num) ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1}; if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) { @@ -2961,6 +3024,9 @@ static int transcode_init(void) codec->bits_per_raw_sample = frame_bits_per_raw_sample; } + if (ost->forced_keyframes) + parse_forced_key_frames(ost->forced_keyframes, ost, + ost->st->codec); break; case AVMEDIA_TYPE_SUBTITLE: codec->time_base = (AVRational){1, 1000}; @@ -3208,6 +3274,218 @@ static int select_input_file(uint8_t *no_packet) return file_index; } +static int check_keyboard_interaction(int64_t cur_time) +{ + int i, ret, key; + static int64_t last_time; + if (received_nb_signals) + return AVERROR_EXIT; + /* read_key() returns 0 on EOF */ + if(cur_time - last_time >= 100000 && !run_as_daemon){ + key = read_key(); + last_time = cur_time; + }else + key = -1; + if (key == 'q') + return AVERROR_EXIT; + if (key == '+') av_log_set_level(av_log_get_level()+10); + if (key == '-') av_log_set_level(av_log_get_level()-10); + if (key == 's') qp_hist ^= 1; + if (key == 'h'){ + if (do_hex_dump){ + do_hex_dump = do_pkt_dump = 0; + } else if(do_pkt_dump){ + do_hex_dump = 1; + } else + do_pkt_dump = 1; + av_log_set_level(AV_LOG_DEBUG); + } + if (key == 'c' || key == 'C'){ + char buf[4096], target[64], command[256], arg[256] = {0}; + double time; + int k, n = 0; + fprintf(stderr, "\nEnter command: