From: Michael Niedermayer Date: Sat, 13 Oct 2012 13:00:13 +0000 (+0200) Subject: Merge commit 'bc4620e5d61a4dd9a1f654fadd281a172aab04be' X-Git-Url: https://git.sesse.net/?a=commitdiff_plain;h=b4ca1b159f4b7f0c3d1e4b2deab686bda934f3a2;p=ffmpeg Merge commit 'bc4620e5d61a4dd9a1f654fadd281a172aab04be' * commit 'bc4620e5d61a4dd9a1f654fadd281a172aab04be': Remove libmpeg2 #define remnants De-doxygenize some top-level files Conflicts: ffmpeg.c ffmpeg.h ffmpeg_filter.c ffplay.c Merged-by: Michael Niedermayer --- b4ca1b159f4b7f0c3d1e4b2deab686bda934f3a2 diff --cc ffmpeg.c index 7dbc4b74ede,4d9c7c0b7f0..97a1e4dff55 --- a/ffmpeg.c +++ b/ffmpeg.c @@@ -975,94 -660,126 +975,94 @@@ static void do_video_stats(AVFormatCont } } - /** + /* - * Read one frame for lavfi output for ost and encode it. + * Get and encode new output from any of the filtergraphs, without causing + * activity. + * + * @return 0 for success, <0 for severe errors */ -static int poll_filter(OutputStream *ost) +static int reap_filters(void) { - OutputFile *of = output_files[ost->file_index]; AVFilterBufferRef *picref; AVFrame *filtered_frame = NULL; - int frame_size, ret; - - if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) { - return AVERROR(ENOMEM); - } else - avcodec_get_frame_defaults(ost->filtered_frame); - filtered_frame = ost->filtered_frame; - - if (ost->enc->type == AVMEDIA_TYPE_AUDIO && - !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) - ret = av_buffersink_read_samples(ost->filter->filter, &picref, - ost->st->codec->frame_size); - else - ret = av_buffersink_read(ost->filter->filter, &picref); - - if (ret < 0) - return ret; - - avfilter_copy_buf_props(filtered_frame, picref); - if (picref->pts != AV_NOPTS_VALUE) { - filtered_frame->pts = av_rescale_q(picref->pts, - ost->filter->filter->inputs[0]->time_base, - ost->st->codec->time_base) - - av_rescale_q(of->start_time, - AV_TIME_BASE_Q, - ost->st->codec->time_base); - - if (of->start_time && filtered_frame->pts < 0) { - avfilter_unref_buffer(picref); - return 0; - } - } - - switch (ost->filter->filter->inputs[0]->type) { - case AVMEDIA_TYPE_VIDEO: - if (!ost->frame_aspect_ratio) - ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect; - - do_video_out(of->ctx, ost, filtered_frame, &frame_size); - if (vstats_filename && frame_size) - do_video_stats(of->ctx, ost, frame_size); - break; - case AVMEDIA_TYPE_AUDIO: - do_audio_out(of->ctx, ost, filtered_frame); - break; - default: - // TODO support subtitle filters - av_assert0(0); - } - - avfilter_unref_buffer(picref); - - return 0; -} - -/* - * Read as many frames from possible from lavfi and encode them. - * - * Always read from the active stream with the lowest timestamp. If no frames - * are available for it then return EAGAIN and wait for more input. This way we - * can use lavfi sources that generate unlimited amount of frames without memory - * usage exploding. - */ -static int poll_filters(void) -{ - int i, j, ret = 0; - - while (ret >= 0 && !received_sigterm) { - OutputStream *ost = NULL; - int64_t min_pts = INT64_MAX; + int i; + int64_t frame_pts; - /* choose output stream with the lowest timestamp */ - for (i = 0; i < nb_output_streams; i++) { - int64_t pts = output_streams[i]->sync_opts; + /* Reap all buffers present in the buffer sinks */ + for (i = 0; i < nb_output_streams; i++) { + OutputStream *ost = output_streams[i]; + OutputFile *of = output_files[ost->file_index]; + int ret = 0; - if (!output_streams[i]->filter || output_streams[i]->finished) - continue; + if (!ost->filter) + continue; - pts = av_rescale_q(pts, output_streams[i]->st->codec->time_base, - AV_TIME_BASE_Q); - if (pts < min_pts) { - min_pts = pts; - ost = output_streams[i]; + if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) { + return AVERROR(ENOMEM); + } else + avcodec_get_frame_defaults(ost->filtered_frame); + filtered_frame = ost->filtered_frame; + + while (1) { + ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref, + AV_BUFFERSINK_FLAG_NO_REQUEST); + if (ret < 0) { + if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) { + char buf[256]; + av_strerror(ret, buf, sizeof(buf)); + av_log(NULL, AV_LOG_WARNING, + "Error in av_buffersink_get_buffer_ref(): %s\n", buf); + } + break; } - } - - if (!ost) - break; - - ret = poll_filter(ost); + frame_pts = AV_NOPTS_VALUE; + if (picref->pts != AV_NOPTS_VALUE) { + filtered_frame->pts = frame_pts = av_rescale_q(picref->pts, + ost->filter->filter->inputs[0]->time_base, + ost->st->codec->time_base) - + av_rescale_q(of->start_time, + AV_TIME_BASE_Q, + ost->st->codec->time_base); + + if (of->start_time && filtered_frame->pts < 0) { + avfilter_unref_buffer(picref); + continue; + } + } + //if (ost->source_index >= 0) + // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold - if (ret == AVERROR_EOF) { - OutputFile *of = output_files[ost->file_index]; - ost->finished = 1; + switch (ost->filter->filter->inputs[0]->type) { + case AVMEDIA_TYPE_VIDEO: + avfilter_copy_buf_props(filtered_frame, picref); + filtered_frame->pts = frame_pts; + if (!ost->frame_aspect_ratio) + ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio; - if (of->shortest) { - for (j = 0; j < of->ctx->nb_streams; j++) - output_streams[of->ost_index + j]->finished = 1; + do_video_out(of->ctx, ost, filtered_frame); + break; + case AVMEDIA_TYPE_AUDIO: + avfilter_copy_buf_props(filtered_frame, picref); + filtered_frame->pts = frame_pts; + do_audio_out(of->ctx, ost, filtered_frame); + break; + default: + // TODO support subtitle filters + av_assert0(0); } - ret = 0; - } else if (ret == AVERROR(EAGAIN)) - return 0; + avfilter_unref_buffer(picref); + } } - return ret; + return 0; } -static void print_report(int is_last_report, int64_t timer_start) +static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time) { char buf[1024]; + AVBPrint buf_script; OutputStream *ost; AVFormatContext *oc; int64_t total_size; @@@ -2681,12 -2123,15 +2678,12 @@@ static void reset_eagain(void int i; for (i = 0; i < nb_input_files; i++) input_files[i]->eagain = 0; + for (i = 0; i < nb_output_streams; i++) + output_streams[i]->unavailable = 0; } - /** - * @return + /* - * Read one packet from an input file and send it for - * - decoding -> lavfi (audio/video) - * - decoding -> encoding -> muxing (subtitles) - * - muxing (streamcopy) - * + * Return * - 0 -- one packet was read and processed * - AVERROR(EAGAIN) -- no packets were available for selected file, * this function should be called again diff --cc ffmpeg.h index 047029d4440,11e846261fd..e1b223fc02d --- a/ffmpeg.h +++ b/ffmpeg.h @@@ -62,13 -56,15 +62,13 @@@ typedef struct StreamMap int stream_index; int sync_file_index; int sync_stream_index; - char *linklabel; /** name of an output link, for mapping lavfi outputs */ + char *linklabel; /* name of an output link, for mapping lavfi outputs */ } StreamMap; -/* select an input file for an output file */ -typedef struct MetadataMap { - int file; // file index - char type; // type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram - int index; // stream/chapter/program number -} MetadataMap; +typedef struct { + int file_idx, stream_idx, channel_idx; // input + int ofile_idx, ostream_idx; // output +} AudioChannelMap; typedef struct OptionsContext { /* input/output options */ diff --cc ffmpeg_filter.c index eb09b778b60,8f3f9125825..436cdf27d2e --- a/ffmpeg_filter.c +++ b/ffmpeg_filter.c @@@ -26,113 -25,12 +26,111 @@@ #include "libavutil/audioconvert.h" #include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/bprint.h" #include "libavutil/pixdesc.h" #include "libavutil/pixfmt.h" +#include "libavutil/imgutils.h" #include "libavutil/samplefmt.h" +enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFormat target) +{ + if (codec && codec->pix_fmts) { + const enum AVPixelFormat *p = codec->pix_fmts; + int has_alpha= av_pix_fmt_descriptors[target].nb_components % 2 == 0; + enum AVPixelFormat best= AV_PIX_FMT_NONE; + if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) { + if (st->codec->codec_id == AV_CODEC_ID_MJPEG) { + p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE }; + } else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) { + p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P, + AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE }; + } + } + for (; *p != AV_PIX_FMT_NONE; p++) { + best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL); + if (*p == target) + break; + } + if (*p == AV_PIX_FMT_NONE) { + if (target != AV_PIX_FMT_NONE) + av_log(NULL, AV_LOG_WARNING, + "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n", + av_pix_fmt_descriptors[target].name, + codec->name, + av_pix_fmt_descriptors[best].name); + return best; + } + } + return target; +} + +void choose_sample_fmt(AVStream *st, AVCodec *codec) +{ + if (codec && codec->sample_fmts) { + const enum AVSampleFormat *p = codec->sample_fmts; + for (; *p != -1; p++) { + if (*p == st->codec->sample_fmt) + break; + } + if (*p == -1) { + if((codec->capabilities & CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0])) + av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n"); + if(av_get_sample_fmt_name(st->codec->sample_fmt)) + av_log(NULL, AV_LOG_WARNING, + "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n", + av_get_sample_fmt_name(st->codec->sample_fmt), + codec->name, + av_get_sample_fmt_name(codec->sample_fmts[0])); + st->codec->sample_fmt = codec->sample_fmts[0]; + } + } +} + +static char *choose_pix_fmts(OutputStream *ost) +{ + if (ost->keep_pix_fmt) { + if (ost->filter) + avfilter_graph_set_auto_convert(ost->filter->graph->graph, + AVFILTER_AUTO_CONVERT_NONE); + if (ost->st->codec->pix_fmt == AV_PIX_FMT_NONE) + return NULL; + return av_strdup(av_get_pix_fmt_name(ost->st->codec->pix_fmt)); + } + if (ost->st->codec->pix_fmt != AV_PIX_FMT_NONE) { + return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc, ost->st->codec->pix_fmt))); + } else if (ost->enc && ost->enc->pix_fmts) { + const enum AVPixelFormat *p; + AVIOContext *s = NULL; + uint8_t *ret; + int len; + + if (avio_open_dyn_buf(&s) < 0) + exit(1); + + p = ost->enc->pix_fmts; + if (ost->st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) { + if (ost->st->codec->codec_id == AV_CODEC_ID_MJPEG) { + p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE }; + } else if (ost->st->codec->codec_id == AV_CODEC_ID_LJPEG) { + p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P, + AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE }; + } + } + + for (; *p != AV_PIX_FMT_NONE; p++) { + const char *name = av_get_pix_fmt_name(*p); + avio_printf(s, "%s:", name); + } + len = avio_close_dyn_buf(s, &ret); + ret[len - 1] = 0; + return ret; + } else + return NULL; +} + - /** - * Define a function for building a string containing a list of - * allowed formats, - */ + /* Define a function for building a string containing a list of + * allowed formats. */ #define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator)\ static char *choose_ ## var ## s(OutputStream *ost) \ { \ diff --cc ffmpeg_opt.c index fba7cb4f0b8,058d5a37795..fefb785945f --- a/ffmpeg_opt.c +++ b/ffmpeg_opt.c @@@ -305,69 -274,8 +305,69 @@@ static int opt_attach(void *optctx, con return 0; } +static int opt_map_channel(void *optctx, const char *opt, const char *arg) +{ + OptionsContext *o = optctx; + int n; + AVStream *st; + AudioChannelMap *m; + + o->audio_channel_maps = + grow_array(o->audio_channel_maps, sizeof(*o->audio_channel_maps), + &o->nb_audio_channel_maps, o->nb_audio_channel_maps + 1); + m = &o->audio_channel_maps[o->nb_audio_channel_maps - 1]; + + /* muted channel syntax */ + n = sscanf(arg, "%d:%d.%d", &m->channel_idx, &m->ofile_idx, &m->ostream_idx); + if ((n == 1 || n == 3) && m->channel_idx == -1) { + m->file_idx = m->stream_idx = -1; + if (n == 1) + m->ofile_idx = m->ostream_idx = -1; + return 0; + } + + /* normal syntax */ + n = sscanf(arg, "%d.%d.%d:%d.%d", + &m->file_idx, &m->stream_idx, &m->channel_idx, + &m->ofile_idx, &m->ostream_idx); + + if (n != 3 && n != 5) { + av_log(NULL, AV_LOG_FATAL, "Syntax error, mapchan usage: " + "[file.stream.channel|-1][:syncfile:syncstream]\n"); + exit(1); + } + + if (n != 5) // only file.stream.channel specified + m->ofile_idx = m->ostream_idx = -1; + + /* check input */ + if (m->file_idx < 0 || m->file_idx >= nb_input_files) { + av_log(NULL, AV_LOG_FATAL, "mapchan: invalid input file index: %d\n", + m->file_idx); + exit(1); + } + if (m->stream_idx < 0 || + m->stream_idx >= input_files[m->file_idx]->nb_streams) { + av_log(NULL, AV_LOG_FATAL, "mapchan: invalid input file stream index #%d.%d\n", + m->file_idx, m->stream_idx); + exit(1); + } + st = input_files[m->file_idx]->ctx->streams[m->stream_idx]; + if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO) { + av_log(NULL, AV_LOG_FATAL, "mapchan: stream #%d.%d is not an audio stream.\n", + m->file_idx, m->stream_idx); + exit(1); + } + if (m->channel_idx < 0 || m->channel_idx >= st->codec->channels) { + av_log(NULL, AV_LOG_FATAL, "mapchan: invalid audio channel #%d.%d.%d\n", + m->file_idx, m->stream_idx, m->channel_idx); + exit(1); + } + return 0; +} + /** - * Parse a metadata specifier in arg. + * Parse a metadata specifier passed as 'arg' parameter. * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram) * @param index for type c/p, chapter/program index is written here * @param stream_spec for type s, the stream specifier is written here diff --cc ffplay.c index 1ce92c622c0,597b23c6514..438c11d17b3 --- a/ffplay.c +++ b/ffplay.c @@@ -100,14 -99,14 +100,14 @@@ typedef struct PacketQueue #define SUBPICTURE_QUEUE_SIZE 4 typedef struct VideoPicture { - double pts; ///< presentation time stamp for this picture - int64_t pos; ///< byte position in file + double pts; // presentation timestamp for this picture - double target_clock; // av_gettime() time at which this should be displayed ideally + int64_t pos; // byte position in file + int skip; SDL_Overlay *bmp; int width, height; /* source height & width */ + AVRational sample_aspect_ratio; int allocated; int reallocate; - enum AVPixelFormat pix_fmt; #if CONFIG_AVFILTER AVFilterBufferRef *picref; @@@ -205,12 -190,8 +205,12 @@@ typedef struct VideoState double frame_timer; double frame_last_pts; - double frame_last_delay; + double frame_last_duration; + double frame_last_dropped_pts; + double frame_last_returned_time; + double frame_last_filter_delay; + int64_t frame_last_dropped_pos; - double video_clock; ///< pts of last decoded frame / predicted pts of next decoded frame + double video_clock; // pts of last decoded frame / predicted pts of next decoded frame int video_stream; AVStream *video_st; PacketQueue videoq; @@@ -225,13 -206,15 +225,13 @@@ struct SwsContext *img_convert_ctx; #endif - // QETimer *video_timer; char filename[1024]; int width, height, xleft, ytop; - - PtsCorrectionContext pts_ctx; + int step; #if CONFIG_AVFILTER - AVFilterContext *in_video_filter; ///< the first filter in the video chain - AVFilterContext *out_video_filter; ///< the last filter in the video chain + AVFilterContext *in_video_filter; // the first filter in the video chain + AVFilterContext *out_video_filter; // the last filter in the video chain int use_dr1; FrameBuffer *buffer_pool; #endif