#define SUBPICTURE_QUEUE_SIZE 4
typedef struct VideoPicture {
- double pts; ///< presentation time stamp for this picture
- int64_t pos; ///< byte position in file
+ double pts; // presentation timestamp for this picture
+ int64_t pos; // byte position in file
int skip;
SDL_Overlay *bmp;
int width, height; /* source height & width */
double frame_last_returned_time;
double frame_last_filter_delay;
int64_t frame_last_dropped_pos;
- double video_clock; ///< pts of last decoded frame / predicted pts of next decoded frame
+ double video_clock; // pts of last decoded frame / predicted pts of next decoded frame
int video_stream;
AVStream *video_st;
PacketQueue videoq;
- double video_current_pts; ///< current displayed pts (different from video_clock if frame fifos are used)
- double video_current_pts_drift; ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
- int64_t video_current_pos; ///< current displayed file pos
+ double video_current_pts; // current displayed pts (different from video_clock if frame fifos are used)
+ double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
+ int64_t video_current_pos; // current displayed file pos
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
int pictq_size, pictq_rindex, pictq_windex;
SDL_mutex *pictq_mutex;
int step;
#if CONFIG_AVFILTER
- AVFilterContext *in_video_filter; ///< the first filter in the video chain
- AVFilterContext *out_video_filter; ///< the last filter in the video chain
+ AVFilterContext *in_video_filter; // the first filter in the video chain
+ AVFilterContext *out_video_filter; // the last filter in the video chain
int use_dr1;
FrameBuffer *buffer_pool;
#endif
static int exit_on_mousedown;
static int loop = 1;
static int framedrop = -1;
-static int infinite_buffer = 0;
+static int infinite_buffer = -1;
static enum ShowMode show_mode = SHOW_MODE_NONE;
static const char *audio_codec_name;
static const char *subtitle_codec_name;
static SDL_Surface *screen;
-void av_noreturn exit_program(int ret)
-{
- exit(ret);
-}
+static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
{
sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
vp->width, vp->height, src_frame->format, vp->width, vp->height,
- PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
+ AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
if (is->img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context\n");
exit(1);
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
{
- static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
char sws_flags_str[128];
char buffersrc_args[256];
int ret;
AVFilterContext *filt_out = NULL, *filt_in = NULL;
int last_w = 0;
int last_h = 0;
- enum PixelFormat last_format = -2;
+ enum AVPixelFormat last_format = -2;
if (codec->codec->capabilities & CODEC_CAP_DR1) {
is->use_dr1 = 1;
avfilter_graph_free(&graph);
#endif
av_free_packet(&pkt);
- av_free(frame);
+ avcodec_free_frame(&frame);
return 0;
}
flush_complete = 1;
continue;
}
- data_size = av_samples_get_buffer_size(NULL, dec->channels,
+ data_size = av_samples_get_buffer_size(NULL, is->frame->channels,
is->frame->nb_samples,
- dec->sample_fmt, 1);
+ is->frame->format, 1);
dec_channel_layout =
- (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ?
- dec->channel_layout : av_get_default_channel_layout(dec->channels);
+ (is->frame->channel_layout && is->frame->channels == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
+ is->frame->channel_layout : av_get_default_channel_layout(is->frame->channels);
wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
- if (dec->sample_fmt != is->audio_src.fmt ||
- dec_channel_layout != is->audio_src.channel_layout ||
- dec->sample_rate != is->audio_src.freq ||
- (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
+ if (is->frame->format != is->audio_src.fmt ||
+ dec_channel_layout != is->audio_src.channel_layout ||
+ is->frame->sample_rate != is->audio_src.freq ||
+ (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
swr_free(&is->swr_ctx);
is->swr_ctx = swr_alloc_set_opts(NULL,
is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
- dec_channel_layout, dec->sample_fmt, dec->sample_rate,
+ dec_channel_layout, is->frame->format, is->frame->sample_rate,
0, NULL);
if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
- dec->sample_rate, av_get_sample_fmt_name(dec->sample_fmt), dec->channels,
+ is->frame->sample_rate, av_get_sample_fmt_name(is->frame->format), (int)is->frame->channels,
is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
break;
}
is->audio_src.channel_layout = dec_channel_layout;
- is->audio_src.channels = dec->channels;
- is->audio_src.freq = dec->sample_rate;
- is->audio_src.fmt = dec->sample_fmt;
+ is->audio_src.channels = is->frame->channels;
+ is->audio_src.freq = is->frame->sample_rate;
+ is->audio_src.fmt = is->frame->format;
}
if (is->swr_ctx) {
uint8_t *out[] = {is->audio_buf2};
int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
if (wanted_nb_samples != is->frame->nb_samples) {
- if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / dec->sample_rate,
- wanted_nb_samples * is->audio_tgt.freq / dec->sample_rate) < 0) {
+ if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
+ wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
fprintf(stderr, "swr_set_compensation() failed\n");
break;
}
pts = is->audio_clock;
*pts_ptr = pts;
is->audio_clock += (double)data_size /
- (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
+ (is->frame->channels * is->frame->sample_rate * av_get_bytes_per_sample(is->frame->format));
#ifdef DEBUG
{
static double last_clock;
swr_free(&is->swr_ctx);
av_freep(&is->audio_buf1);
is->audio_buf = NULL;
- av_freep(&is->frame);
+ avcodec_free_frame(&is->frame);
if (is->rdft) {
av_rdft_end(is->rdft);
return is->abort_request;
}
+static int is_realtime(AVFormatContext *s)
+{
+ if( !strcmp(s->iformat->name, "rtp")
+ || !strcmp(s->iformat->name, "rtsp")
+ || !strcmp(s->iformat->name, "sdp")
+ )
+ return 1;
+
+ if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
+ || !strncmp(s->filename, "udp:", 4)
+ )
+ )
+ return 1;
+ return 0;
+}
+
/* this thread gets the stream from the disk or the network */
static int read_thread(void *arg)
{
goto fail;
}
+ if (infinite_buffer < 0 && is_realtime(ic))
+ infinite_buffer = 1;
+
for (;;) {
if (is->abort_request)
break;
}
/* if the queue are full, no need to read more */
- if (!infinite_buffer &&
+ if (infinite_buffer<1 &&
(is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
|| ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
&& (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
eof = 1;
if (ic->pb && ic->pb->error)
break;
- SDL_Delay(100); /* wait for user event */
+ SDL_LockMutex(wait_mutex);
+ SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
+ SDL_UnlockMutex(wait_mutex);
continue;
}
/* check if packet is in play range specified by user, then queue, otherwise discard */
if (input_filename) {
fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
filename, input_filename);
- exit_program(1);
+ exit(1);
}
if (!strcmp(filename, "-"))
filename = "pipe:";