#include <inttypes.h>
#include <math.h>
#include <limits.h>
+#include <stdint.h>
+
#include "libavutil/avstring.h"
#include "libavutil/colorspace.h"
+#include "libavutil/display.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
typedef struct VideoPicture {
double pts; // presentation timestamp for this picture
- double target_clock; // av_gettime() time at which this should be displayed ideally
+ double target_clock; // av_gettime_relative() time at which this should be displayed ideally
int64_t pos; // byte position in file
SDL_Overlay *bmp;
int width, height; /* source height & width */
AVStream *video_st;
PacketQueue videoq;
double video_current_pts; // current displayed pts (different from video_clock if frame fifos are used)
- double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
+ double video_current_pts_drift; // video_current_pts - time (av_gettime_relative) at which we updated video_current_pts - used to have running video pts
int64_t video_current_pos; // current displayed file pos
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
int pictq_size, pictq_rindex, pictq_windex;
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
static int64_t start_time = AV_NOPTS_VALUE;
static int64_t duration = AV_NOPTS_VALUE;
-static int debug_mv = 0;
static int step = 0;
static int workaround_bugs = 1;
static int fast = 0;
static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
static int error_concealment = 3;
static int decoder_reorder_pts = -1;
-static int autoexit;
+static int noautoexit;
static int exit_on_keydown;
static int exit_on_mousedown;
static int loop = 1;
#if CONFIG_AVFILTER
static char *vfilters = NULL;
#endif
+static int autorotate = 1;
/* current context */
static int is_full_screen;
/* to be more precise, we take into account the time spent since
the last buffer computation */
if (audio_callback_time) {
- time_diff = av_gettime() - audio_callback_time;
+ time_diff = av_gettime_relative() - audio_callback_time;
delay -= (time_diff * s->sdl_sample_rate) / 1000000;
}
}
av_rdft_calc(s->rdft, data[ch]);
}
- // least efficient way to do this, we should of course directly access it but its more than fast enough
+ /* Least efficient way to do this, we should of course
+ * directly access it but it is more than fast enough. */
for (y = 0; y < s->height; y++) {
double w = 1 / sqrt(nb_freq);
int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
if (is->paused) {
return is->video_current_pts;
} else {
- return is->video_current_pts_drift + av_gettime() / 1000000.0;
+ return is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
}
}
static double get_external_clock(VideoState *is)
{
int64_t ti;
- ti = av_gettime();
+ ti = av_gettime_relative();
return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
}
static void stream_pause(VideoState *is)
{
if (is->paused) {
- is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
+ is->frame_timer += av_gettime_relative() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
if (is->read_pause_return != AVERROR(ENOSYS)) {
- is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
+ is->video_current_pts = is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
}
- is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
+ is->video_current_pts_drift = is->video_current_pts - av_gettime_relative() / 1000000.0;
}
is->paused = !is->paused;
}
static double compute_target_time(double frame_current_pts, VideoState *is)
{
- double delay, sync_threshold, diff;
+ double delay, sync_threshold, diff = 0;
/* compute nominal delay */
delay = frame_current_pts - is->frame_last_pts;
}
is->frame_timer += delay;
- av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
+ av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
delay, frame_current_pts, -diff);
return is->frame_timer;
if (is->pictq_size == 0) {
// nothing to do, no picture to display in the que
} else {
- double time = av_gettime() / 1000000.0;
+ double time = av_gettime_relative() / 1000000.0;
double next_target;
/* dequeue the picture */
vp = &is->pictq[is->pictq_rindex];
int aqsize, vqsize, sqsize;
double av_diff;
- cur_time = av_gettime();
+ cur_time = av_gettime_relative();
if (!last_time || (cur_time - last_time) >= 30000) {
aqsize = 0;
vqsize = 0;
vp = &is->pictq[is->pictq_windex];
+ vp->sar = src_frame->sample_aspect_ratio;
+
/* alloc or resize hardware picture buffer */
if (!vp->bmp || vp->reallocate ||
#if CONFIG_AVFILTER
is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
dst_pix_fmt, sws_flags, NULL, NULL, NULL);
- if (is->img_convert_ctx == NULL) {
+ if (!is->img_convert_ctx) {
fprintf(stderr, "Cannot initialize the conversion context\n");
exit(1);
}
init_pts_correction(&is->pts_ctx);
is->frame_last_pts = AV_NOPTS_VALUE;
is->frame_last_delay = 0;
- is->frame_timer = (double)av_gettime() / 1000000.0;
+ is->frame_timer = (double)av_gettime_relative() / 1000000.0;
is->skip_frames = 1;
is->skip_frames_index = 0;
return 0;
char sws_flags_str[128];
char buffersrc_args[256];
int ret;
- AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
+ AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter;
AVCodecContext *codec = is->video_st->codec;
snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
"out", NULL, NULL, graph)) < 0)
return ret;
- if ((ret = avfilter_graph_create_filter(&filt_format,
- avfilter_get_by_name("format"),
- "format", "yuv420p", NULL, graph)) < 0)
- return ret;
- if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
- return ret;
-
+ last_filter = filt_out;
+
+/* Note: this macro adds a filter before the lastly added filter, so the
+ * processing order of the filters is in reverse */
+#define INSERT_FILT(name, arg) do { \
+ AVFilterContext *filt_ctx; \
+ \
+ ret = avfilter_graph_create_filter(&filt_ctx, \
+ avfilter_get_by_name(name), \
+ "avplay_" name, arg, NULL, graph); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
+ if (ret < 0) \
+ return ret; \
+ \
+ last_filter = filt_ctx; \
+} while (0)
+
+ INSERT_FILT("format", "yuv420p");
+
+ if (autorotate) {
+ uint8_t* displaymatrix = av_stream_get_side_data(is->video_st,
+ AV_PKT_DATA_DISPLAYMATRIX, NULL);
+ if (displaymatrix) {
+ double rot = av_display_rotation_get((int32_t*) displaymatrix);
+ if (rot < -135 || rot > 135) {
+ INSERT_FILT("vflip", NULL);
+ INSERT_FILT("hflip", NULL);
+ } else if (rot < -45) {
+ INSERT_FILT("transpose", "dir=clock");
+ } else if (rot > 45) {
+ INSERT_FILT("transpose", "dir=cclock");
+ }
+ }
+ }
if (vfilters) {
AVFilterInOut *outputs = avfilter_inout_alloc();
outputs->next = NULL;
inputs->name = av_strdup("out");
- inputs->filter_ctx = filt_format;
+ inputs->filter_ctx = last_filter;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
return ret;
} else {
- if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
+ if ((ret = avfilter_link(filt_src, 0, last_filter, 0)) < 0)
return ret;
}
AVFilterContext *filt_out = NULL, *filt_in = NULL;
int last_w = is->video_st->codec->width;
int last_h = is->video_st->codec->height;
+ if (!graph) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
goto the_end;
filt_out = is->out_video_filter;
#endif
+ if (!frame) {
+#if CONFIG_AVFILTER
+ avfilter_graph_free(&graph);
+#endif
+ return AVERROR(ENOMEM);
+ }
+
for (;;) {
#if CONFIG_AVFILTER
AVRational tb;
#if CONFIG_AVFILTER
if ( last_w != is->video_st->codec->width
|| last_h != is->video_st->codec->height) {
- av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
+ av_log(NULL, AV_LOG_TRACE, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
is->video_st->codec->width, is->video_st->codec->height);
avfilter_graph_free(&graph);
graph = avfilter_graph_alloc();
if (av_cmp_q(tb, is->video_st->time_base)) {
av_unused int64_t pts1 = pts_int;
pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
- av_dlog(NULL, "video_thread(): "
+ av_log(NULL, AV_LOG_TRACE, "video_thread(): "
"tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
tb.num, tb.den, pts1,
is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
samples_size = wanted_size;
}
}
- av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
+ av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
diff, avg_diff, samples_size - samples_size1,
is->audio_clock, is->video_clock, is->audio_diff_threshold);
}
int resample_changed, audio_resample;
if (!is->frame) {
- if (!(is->frame = avcodec_alloc_frame()))
+ if (!(is->frame = av_frame_alloc()))
return AVERROR(ENOMEM);
- } else
- avcodec_get_frame_defaults(is->frame);
+ }
if (flush_complete)
break;
if (!got_frame) {
/* stop sending empty packets if the decoder is finished */
- if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
+ if (!pkt_temp->data && (dec->codec->capabilities & AV_CODEC_CAP_DELAY))
flush_complete = 1;
continue;
}
int audio_size, len1;
double pts;
- audio_callback_time = av_gettime();
+ audio_callback_time = av_gettime_relative();
while (len > 0) {
if (is->audio_buf_index >= is->audio_buf_size) {
SDL_AudioSpec wanted_spec, spec;
AVDictionary *opts;
AVDictionaryEntry *t = NULL;
+ int ret = 0;
if (stream_index < 0 || stream_index >= ic->nb_streams)
return -1;
opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
codec = avcodec_find_decoder(avctx->codec_id);
- avctx->debug_mv = debug_mv;
avctx->workaround_bugs = workaround_bugs;
avctx->idct_algo = idct;
avctx->skip_frame = skip_frame;
avctx->skip_loop_filter = skip_loop_filter;
avctx->error_concealment = error_concealment;
- if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
+ if (fast)
+ avctx->flags2 |= AV_CODEC_FLAG2_FAST;
if (!av_dict_get(opts, "threads", NULL, 0))
av_dict_set(&opts, "threads", "auto", 0);
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
av_dict_set(&opts, "refcounted_frames", "1", 0);
if (!codec ||
- avcodec_open2(avctx, codec, &opts) < 0)
- return -1;
+ (ret = avcodec_open2(avctx, codec, &opts)) < 0) {
+ goto fail;
+ }
if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
- return AVERROR_OPTION_NOT_FOUND;
+ ret = AVERROR_OPTION_NOT_FOUND;
+ goto fail;
}
/* prepare audio output */
avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
if (!avctx->channel_layout) {
fprintf(stderr, "unable to guess channel layout\n");
- return -1;
+ ret = AVERROR_INVALIDDATA;
+ goto fail;
}
if (avctx->channels == 1)
is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
wanted_spec.userdata = is;
if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
- return -1;
+ ret = AVERROR_UNKNOWN;
+ goto fail;
}
is->audio_hw_buf_size = spec.size;
is->sdl_sample_fmt = AV_SAMPLE_FMT_S16;
default:
break;
}
- return 0;
+
+fail:
+ av_dict_free(&opts);
+
+ return ret;
}
static void stream_component_close(VideoState *is, int stream_index)
avresample_free(&is->avr);
av_freep(&is->audio_buf1);
is->audio_buf = NULL;
- avcodec_free_frame(&is->frame);
+ av_frame_free(&is->frame);
if (is->rdft) {
av_rdft_end(is->rdft);
global_video_state = is;
ic = avformat_alloc_context();
+ if (!ic) {
+ av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
ic->interrupt_callback.callback = decode_interrupt_cb;
err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
if (err < 0) {
orig_nb_streams = ic->nb_streams;
err = avformat_find_stream_info(ic, opts);
+
+ for (i = 0; i < orig_nb_streams; i++)
+ av_dict_free(&opts[i]);
+ av_freep(&opts);
+
if (err < 0) {
fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
ret = -1;
goto fail;
}
- for (i = 0; i < orig_nb_streams; i++)
- av_dict_free(&opts[i]);
- av_freep(&opts);
if (ic->pb)
ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
packet_queue_put(&is->videoq, pkt);
}
if (is->audio_stream >= 0 &&
- is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
+ (is->audio_st->codec->codec->capabilities & AV_CODEC_CAP_DELAY)) {
av_init_packet(pkt);
pkt->data = NULL;
pkt->size = 0;
if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
if (loop != 1 && (!loop || --loop)) {
stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
- } else if (autoexit) {
+ } else if (!noautoexit) {
ret = AVERROR_EOF;
goto fail;
}
}
}
+static void seek_chapter(VideoState *is, int incr)
+{
+ int64_t pos = get_master_clock(is) * AV_TIME_BASE;
+ int i;
+
+ if (!is->ic->nb_chapters)
+ return;
+
+ /* find the current chapter */
+ for (i = 0; i < is->ic->nb_chapters; i++) {
+ AVChapter *ch = is->ic->chapters[i];
+ if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
+ i--;
+ break;
+ }
+ }
+
+ i += incr;
+ i = FFMAX(i, 0);
+ if (i >= is->ic->nb_chapters)
+ return;
+
+ av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
+ stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
+ AV_TIME_BASE_Q), 0, 0);
+}
+
/* handle an event sent by the GUI */
static void event_loop(void)
{
case SDLK_w:
toggle_audio_display();
break;
+ case SDLK_PAGEUP:
+ seek_chapter(cur_stream, 1);
+ break;
+ case SDLK_PAGEDOWN:
+ seek_chapter(cur_stream, -1);
+ break;
case SDLK_LEFT:
incr = -10.0;
goto do_seek;
return 0;
}
-static int opt_vismv(void *optctx, const char *opt, const char *arg)
-{
- debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
- return 0;
-}
-
static const OptionDef options[] = {
#include "cmdutils_common_opts.h"
{ "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
{ "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
{ "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
{ "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
- { "vismv", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vismv }, "visualize motion vectors", "" },
{ "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
{ "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
{ "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
{ "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo", "algo" },
{ "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
{ "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
- { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
+ { "noautoexit", OPT_BOOL | OPT_EXPERT, { &noautoexit }, "Do not exit at the end of playback", "" },
{ "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
{ "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
{ "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
{ "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
{ "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
{ "i", 0, { NULL }, "avconv compatibility dummy option", ""},
+ { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
{ NULL, },
};