#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/avassert.h"
+#include "libavutil/time.h"
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
#include "cmdutils.h"
-#include <unistd.h>
#include <assert.h>
const char program_name[] = "ffplay";
SDL_PushEvent(&event);
}
//FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
- usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
+ av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
}
return 0;
}
return 0;
}
- avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
+ if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
+ return 0;
if (got_picture) {
int ret = 1;
}
#if CONFIG_AVFILTER
+static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
+ AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
+{
+ int ret;
+ AVFilterInOut *outputs = NULL, *inputs = NULL;
+
+ if (filtergraph) {
+ outputs = avfilter_inout_alloc();
+ inputs = avfilter_inout_alloc();
+ if (!outputs || !inputs) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = source_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = NULL;
+
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = sink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = NULL;
+
+ if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
+ goto fail;
+ } else {
+ if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
+ goto fail;
+ }
+
+ return avfilter_graph_config(graph, NULL);
+fail:
+ avfilter_inout_free(&outputs);
+ avfilter_inout_free(&inputs);
+ return ret;
+}
+
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
{
static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
graph->scale_sws_opts = av_strdup(sws_flags_str);
- snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
+ snprintf(buffersrc_args, sizeof(buffersrc_args),
+ "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
codec->width, codec->height, codec->pix_fmt,
is->video_st->time_base.num, is->video_st->time_base.den,
codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
-
if ((ret = avfilter_graph_create_filter(&filt_src,
avfilter_get_by_name("buffer"),
- "src", buffersrc_args, NULL,
+ "ffplay_buffer", buffersrc_args, NULL,
graph)) < 0)
return ret;
-#if FF_API_OLD_VSINK_API
- ret = avfilter_graph_create_filter(&filt_out,
- avfilter_get_by_name("buffersink"),
- "out", NULL, pix_fmts, graph);
-#else
buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&filt_out,
avfilter_get_by_name("buffersink"),
- "out", NULL, buffersink_params, graph);
-#endif
+ "ffplay_buffersink", NULL, buffersink_params, graph);
av_freep(&buffersink_params);
if (ret < 0)
return ret;
if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
return ret;
-
- if (vfilters) {
- AVFilterInOut *outputs = avfilter_inout_alloc();
- AVFilterInOut *inputs = avfilter_inout_alloc();
-
- outputs->name = av_strdup("in");
- outputs->filter_ctx = filt_src;
- outputs->pad_idx = 0;
- outputs->next = NULL;
-
- inputs->name = av_strdup("out");
- inputs->filter_ctx = filt_format;
- inputs->pad_idx = 0;
- inputs->next = NULL;
-
- if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
- return ret;
- } else {
- if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
- return ret;
- }
-
- if ((ret = avfilter_graph_config(graph, NULL)) < 0)
+ if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_format)) < 0)
return ret;
is->in_video_filter = filt_src;
while (is->paused && !is->videoq.abort_request)
SDL_Delay(10);
+ avcodec_get_frame_defaults(frame);
ret = get_video_frame(is, frame, &pts_int, &pkt);
if (ret < 0)
goto the_end;
frame->pts = pts_int;
frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
- if (is->use_dr1) {
+ if (is->use_dr1 && frame->opaque) {
FrameBuffer *buf = frame->opaque;
AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
frame->data, frame->linesize,
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
is->frame_last_filter_delay = 0;
- avfilter_fill_frame_from_video_buffer_ref(frame, picref);
+ avfilter_copy_buf_props(frame, picref);
pts_int = picref->pts;
tb = filt_out->inputs[0]->time_base;
ic->streams[stream_index]->discard = AVDISCARD_ALL;
avcodec_close(avctx);
+#if CONFIG_AVFILTER
free_buffer_pool(&is->buffer_pool);
+#endif
switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
is->audio_st = NULL;
static void toggle_full_screen(VideoState *is)
{
- av_unused int i;
- is_full_screen = !is_full_screen;
#if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
/* OS X needs to reallocate the SDL overlays */
- for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
+ int i;
+ for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
is->pictq[i].reallocate = 1;
- }
#endif
+ is_full_screen = !is_full_screen;
video_open(is, 1);
}