2 * Copyright (c) 2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * simple media player based on the FFmpeg libraries
33 #include "libavutil/avstring.h"
34 #include "libavutil/colorspace.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
48 #include "libswresample/swresample.h"
51 # include "libavfilter/avcodec.h"
52 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/buffersrc.h"
58 #include <SDL_thread.h>
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
70 /* SDL audio buffer size, in samples. Should be small to have precise
71 A/V sync as SDL does not have hardware buffer fullness info. */
72 #define SDL_AUDIO_BUFFER_SIZE 1024
74 /* no AV sync correction is done if below the minimum AV sync threshold */
75 #define AV_SYNC_THRESHOLD_MIN 0.01
76 /* AV sync correction is done if above the maximum AV sync threshold */
77 #define AV_SYNC_THRESHOLD_MAX 0.1
78 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
79 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
80 /* no AV correction is done if too big error */
81 #define AV_NOSYNC_THRESHOLD 10.0
83 /* maximum audio speed change to get correct sync */
84 #define SAMPLE_CORRECTION_PERCENT_MAX 10
86 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
87 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
88 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
89 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
91 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
92 #define AUDIO_DIFF_AVG_NB 20
94 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
95 #define REFRESH_RATE 0.01
97 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
98 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
99 #define SAMPLE_ARRAY_SIZE (8 * 65536)
101 #define CURSOR_HIDE_DELAY 1000000
103 static int64_t sws_flags = SWS_BICUBIC;
105 typedef struct MyAVPacketList {
107 struct MyAVPacketList *next;
111 typedef struct PacketQueue {
112 MyAVPacketList *first_pkt, *last_pkt;
121 #define VIDEO_PICTURE_QUEUE_SIZE 3
122 #define SUBPICTURE_QUEUE_SIZE 4
124 typedef struct VideoPicture {
125 double pts; // presentation timestamp for this picture
126 double duration; // estimated duration based on frame rate
127 int64_t pos; // byte position in file
129 int width, height; /* source height & width */
137 typedef struct SubPicture {
138 double pts; /* presentation time stamp for this picture */
143 typedef struct AudioParams {
146 int64_t channel_layout;
147 enum AVSampleFormat fmt;
152 typedef struct Clock {
153 double pts; /* clock base */
154 double pts_drift; /* clock base minus time at which we updated the clock */
157 int serial; /* clock is based on a packet with this serial */
159 int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
163 AV_SYNC_AUDIO_MASTER, /* default choice */
164 AV_SYNC_VIDEO_MASTER,
165 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
168 typedef struct VideoState {
169 SDL_Thread *read_tid;
170 SDL_Thread *video_tid;
171 AVInputFormat *iformat;
177 int queue_attachments_req;
182 int read_pause_return;
197 int audio_clock_serial;
198 double audio_diff_cum; /* used for AV difference average computation */
199 double audio_diff_avg_coef;
200 double audio_diff_threshold;
201 int audio_diff_avg_count;
204 int audio_hw_buf_size;
205 uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
208 unsigned int audio_buf_size; /* in bytes */
209 unsigned int audio_buf1_size;
210 int audio_buf_index; /* in bytes */
211 int audio_write_buf_size;
212 int audio_buf_frames_pending;
213 AVPacket audio_pkt_temp;
215 int audio_pkt_temp_serial;
216 int audio_last_serial;
217 struct AudioParams audio_src;
219 struct AudioParams audio_filter_src;
221 struct AudioParams audio_tgt;
222 struct SwrContext *swr_ctx;
223 int frame_drops_early;
224 int frame_drops_late;
226 int64_t audio_frame_next_pts;
229 SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
231 int16_t sample_array[SAMPLE_ARRAY_SIZE];
232 int sample_array_index;
236 FFTSample *rdft_data;
238 double last_vis_time;
240 SDL_Thread *subtitle_tid;
242 AVStream *subtitle_st;
243 PacketQueue subtitleq;
244 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
245 int subpq_size, subpq_rindex, subpq_windex;
246 SDL_mutex *subpq_mutex;
247 SDL_cond *subpq_cond;
250 double frame_last_returned_time;
251 double frame_last_filter_delay;
255 int64_t video_current_pos; // current displayed file pos
256 double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
257 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
258 int pictq_size, pictq_rindex, pictq_windex;
259 SDL_mutex *pictq_mutex;
260 SDL_cond *pictq_cond;
262 struct SwsContext *img_convert_ctx;
264 SDL_Rect last_display_rect;
267 int width, height, xleft, ytop;
271 AVFilterContext *in_video_filter; // the first filter in the video chain
272 AVFilterContext *out_video_filter; // the last filter in the video chain
273 AVFilterContext *in_audio_filter; // the first filter in the audio chain
274 AVFilterContext *out_audio_filter; // the last filter in the audio chain
275 AVFilterGraph *agraph; // audio filter graph
278 int last_video_stream, last_audio_stream, last_subtitle_stream;
280 SDL_cond *continue_read_thread;
283 /* options specified by the user */
284 static AVInputFormat *file_iformat;
285 static const char *input_filename;
286 static const char *window_title;
287 static int fs_screen_width;
288 static int fs_screen_height;
289 static int default_width = 640;
290 static int default_height = 480;
291 static int screen_width = 0;
292 static int screen_height = 0;
293 static int audio_disable;
294 static int video_disable;
295 static int subtitle_disable;
296 static int wanted_stream[AVMEDIA_TYPE_NB] = {
297 [AVMEDIA_TYPE_AUDIO] = -1,
298 [AVMEDIA_TYPE_VIDEO] = -1,
299 [AVMEDIA_TYPE_SUBTITLE] = -1,
301 static int seek_by_bytes = -1;
302 static int display_disable;
303 static int show_status = 1;
304 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
305 static int64_t start_time = AV_NOPTS_VALUE;
306 static int64_t duration = AV_NOPTS_VALUE;
307 static int workaround_bugs = 1;
309 static int genpts = 0;
310 static int lowres = 0;
311 static int error_concealment = 3;
312 static int decoder_reorder_pts = -1;
314 static int exit_on_keydown;
315 static int exit_on_mousedown;
317 static int framedrop = -1;
318 static int infinite_buffer = -1;
319 static enum ShowMode show_mode = SHOW_MODE_NONE;
320 static const char *audio_codec_name;
321 static const char *subtitle_codec_name;
322 static const char *video_codec_name;
323 double rdftspeed = 0.02;
324 static int64_t cursor_last_shown;
325 static int cursor_hidden = 0;
327 static char *vfilters = NULL;
328 static char *afilters = NULL;
331 /* current context */
332 static int is_full_screen;
333 static int64_t audio_callback_time;
335 static AVPacket flush_pkt;
337 #define FF_ALLOC_EVENT (SDL_USEREVENT)
338 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
340 static SDL_Surface *screen;
343 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
344 enum AVSampleFormat fmt2, int64_t channel_count2)
346 /* If channel count == 1, planar and non-planar formats are the same */
347 if (channel_count1 == 1 && channel_count2 == 1)
348 return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
350 return channel_count1 != channel_count2 || fmt1 != fmt2;
354 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
356 if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
357 return channel_layout;
362 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
364 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
366 MyAVPacketList *pkt1;
368 if (q->abort_request)
371 pkt1 = av_malloc(sizeof(MyAVPacketList));
376 if (pkt == &flush_pkt)
378 pkt1->serial = q->serial;
383 q->last_pkt->next = pkt1;
386 q->size += pkt1->pkt.size + sizeof(*pkt1);
387 /* XXX: should duplicate packet data in DV case */
388 SDL_CondSignal(q->cond);
392 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
396 /* duplicate the packet */
397 if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
400 SDL_LockMutex(q->mutex);
401 ret = packet_queue_put_private(q, pkt);
402 SDL_UnlockMutex(q->mutex);
404 if (pkt != &flush_pkt && ret < 0)
410 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
412 AVPacket pkt1, *pkt = &pkt1;
416 pkt->stream_index = stream_index;
417 return packet_queue_put(q, pkt);
420 /* packet queue handling */
421 static void packet_queue_init(PacketQueue *q)
423 memset(q, 0, sizeof(PacketQueue));
424 q->mutex = SDL_CreateMutex();
425 q->cond = SDL_CreateCond();
426 q->abort_request = 1;
429 static void packet_queue_flush(PacketQueue *q)
431 MyAVPacketList *pkt, *pkt1;
433 SDL_LockMutex(q->mutex);
434 for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
436 av_free_packet(&pkt->pkt);
443 SDL_UnlockMutex(q->mutex);
446 static void packet_queue_destroy(PacketQueue *q)
448 packet_queue_flush(q);
449 SDL_DestroyMutex(q->mutex);
450 SDL_DestroyCond(q->cond);
453 static void packet_queue_abort(PacketQueue *q)
455 SDL_LockMutex(q->mutex);
457 q->abort_request = 1;
459 SDL_CondSignal(q->cond);
461 SDL_UnlockMutex(q->mutex);
464 static void packet_queue_start(PacketQueue *q)
466 SDL_LockMutex(q->mutex);
467 q->abort_request = 0;
468 packet_queue_put_private(q, &flush_pkt);
469 SDL_UnlockMutex(q->mutex);
472 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
473 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
475 MyAVPacketList *pkt1;
478 SDL_LockMutex(q->mutex);
481 if (q->abort_request) {
488 q->first_pkt = pkt1->next;
492 q->size -= pkt1->pkt.size + sizeof(*pkt1);
495 *serial = pkt1->serial;
503 SDL_CondWait(q->cond, q->mutex);
506 SDL_UnlockMutex(q->mutex);
510 static inline void fill_rectangle(SDL_Surface *screen,
511 int x, int y, int w, int h, int color, int update)
518 SDL_FillRect(screen, &rect, color);
519 if (update && w > 0 && h > 0)
520 SDL_UpdateRect(screen, x, y, w, h);
523 /* draw only the border of a rectangle */
524 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
528 /* fill the background */
532 w2 = width - (x + w);
538 h2 = height - (y + h);
541 fill_rectangle(screen,
545 fill_rectangle(screen,
546 xleft + width - w2, ytop,
549 fill_rectangle(screen,
553 fill_rectangle(screen,
554 xleft + w1, ytop + height - h2,
559 #define ALPHA_BLEND(a, oldp, newp, s)\
560 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
562 #define RGBA_IN(r, g, b, a, s)\
564 unsigned int v = ((const uint32_t *)(s))[0];\
565 a = (v >> 24) & 0xff;\
566 r = (v >> 16) & 0xff;\
567 g = (v >> 8) & 0xff;\
571 #define YUVA_IN(y, u, v, a, s, pal)\
573 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
574 a = (val >> 24) & 0xff;\
575 y = (val >> 16) & 0xff;\
576 u = (val >> 8) & 0xff;\
580 #define YUVA_OUT(d, y, u, v, a)\
582 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
588 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
590 int wrap, wrap3, width2, skip2;
591 int y, u, v, a, u1, v1, a1, w, h;
592 uint8_t *lum, *cb, *cr;
595 int dstx, dsty, dstw, dsth;
597 dstw = av_clip(rect->w, 0, imgw);
598 dsth = av_clip(rect->h, 0, imgh);
599 dstx = av_clip(rect->x, 0, imgw - dstw);
600 dsty = av_clip(rect->y, 0, imgh - dsth);
601 lum = dst->data[0] + dsty * dst->linesize[0];
602 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
603 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
605 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
607 wrap = dst->linesize[0];
608 wrap3 = rect->pict.linesize[0];
609 p = rect->pict.data[0];
610 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
618 YUVA_IN(y, u, v, a, p, pal);
619 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
621 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
627 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
628 YUVA_IN(y, u, v, a, p, pal);
632 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
634 YUVA_IN(y, u, v, a, p + BPP, pal);
638 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
639 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
640 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
647 YUVA_IN(y, u, v, a, p, pal);
648 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
649 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
650 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
654 p += wrap3 - dstw * BPP;
655 lum += wrap - dstw - dstx;
656 cb += dst->linesize[1] - width2 - skip2;
657 cr += dst->linesize[2] - width2 - skip2;
659 for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
665 YUVA_IN(y, u, v, a, p, pal);
669 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
672 YUVA_IN(y, u, v, a, p, pal);
676 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
677 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
678 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
684 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
685 YUVA_IN(y, u, v, a, p, pal);
689 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
691 YUVA_IN(y, u, v, a, p + BPP, pal);
695 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
699 YUVA_IN(y, u, v, a, p, pal);
703 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
705 YUVA_IN(y, u, v, a, p + BPP, pal);
709 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
711 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
712 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
716 p += -wrap3 + 2 * BPP;
720 YUVA_IN(y, u, v, a, p, pal);
724 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
727 YUVA_IN(y, u, v, a, p, pal);
731 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
732 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
733 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
739 p += wrap3 + (wrap3 - dstw * BPP);
740 lum += wrap + (wrap - dstw - dstx);
741 cb += dst->linesize[1] - width2 - skip2;
742 cr += dst->linesize[2] - width2 - skip2;
744 /* handle odd height */
751 YUVA_IN(y, u, v, a, p, pal);
752 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
753 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
754 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
760 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
761 YUVA_IN(y, u, v, a, p, pal);
765 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
767 YUVA_IN(y, u, v, a, p + BPP, pal);
771 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
772 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
773 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
780 YUVA_IN(y, u, v, a, p, pal);
781 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
782 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
783 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
788 static void free_picture(VideoPicture *vp)
791 SDL_FreeYUVOverlay(vp->bmp);
796 static void free_subpicture(SubPicture *sp)
798 avsubtitle_free(&sp->sub);
801 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
804 int width, height, x, y;
806 if (vp->sar.num == 0)
809 aspect_ratio = av_q2d(vp->sar);
811 if (aspect_ratio <= 0.0)
813 aspect_ratio *= (float)vp->width / (float)vp->height;
815 /* XXX: we suppose the screen has a 1.0 pixel ratio */
817 width = ((int)rint(height * aspect_ratio)) & ~1;
818 if (width > scr_width) {
820 height = ((int)rint(width / aspect_ratio)) & ~1;
822 x = (scr_width - width) / 2;
823 y = (scr_height - height) / 2;
824 rect->x = scr_xleft + x;
825 rect->y = scr_ytop + y;
826 rect->w = FFMAX(width, 1);
827 rect->h = FFMAX(height, 1);
830 static void video_image_display(VideoState *is)
838 vp = &is->pictq[is->pictq_rindex];
840 if (is->subtitle_st) {
841 if (is->subpq_size > 0) {
842 sp = &is->subpq[is->subpq_rindex];
844 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
845 SDL_LockYUVOverlay (vp->bmp);
847 pict.data[0] = vp->bmp->pixels[0];
848 pict.data[1] = vp->bmp->pixels[2];
849 pict.data[2] = vp->bmp->pixels[1];
851 pict.linesize[0] = vp->bmp->pitches[0];
852 pict.linesize[1] = vp->bmp->pitches[2];
853 pict.linesize[2] = vp->bmp->pitches[1];
855 for (i = 0; i < sp->sub.num_rects; i++)
856 blend_subrect(&pict, sp->sub.rects[i],
857 vp->bmp->w, vp->bmp->h);
859 SDL_UnlockYUVOverlay (vp->bmp);
864 calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
866 SDL_DisplayYUVOverlay(vp->bmp, &rect);
868 if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
869 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
870 fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
871 is->last_display_rect = rect;
876 static inline int compute_mod(int a, int b)
878 return a < 0 ? a%b + b : a%b;
881 static void video_audio_display(VideoState *s)
883 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
884 int ch, channels, h, h2, bgcolor, fgcolor;
886 int rdft_bits, nb_freq;
888 for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
890 nb_freq = 1 << (rdft_bits - 1);
892 /* compute display index : center on currently output samples */
893 channels = s->audio_tgt.channels;
894 nb_display_channels = channels;
896 int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
898 delay = s->audio_write_buf_size;
901 /* to be more precise, we take into account the time spent since
902 the last buffer computation */
903 if (audio_callback_time) {
904 time_diff = av_gettime() - audio_callback_time;
905 delay -= (time_diff * s->audio_tgt.freq) / 1000000;
908 delay += 2 * data_used;
909 if (delay < data_used)
912 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
913 if (s->show_mode == SHOW_MODE_WAVES) {
915 for (i = 0; i < 1000; i += channels) {
916 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
917 int a = s->sample_array[idx];
918 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
919 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
920 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
922 if (h < score && (b ^ c) < 0) {
929 s->last_i_start = i_start;
931 i_start = s->last_i_start;
934 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
935 if (s->show_mode == SHOW_MODE_WAVES) {
936 fill_rectangle(screen,
937 s->xleft, s->ytop, s->width, s->height,
940 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
942 /* total height for one channel */
943 h = s->height / nb_display_channels;
944 /* graph height / 2 */
946 for (ch = 0; ch < nb_display_channels; ch++) {
948 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
949 for (x = 0; x < s->width; x++) {
950 y = (s->sample_array[i] * h2) >> 15;
957 fill_rectangle(screen,
958 s->xleft + x, ys, 1, y,
961 if (i >= SAMPLE_ARRAY_SIZE)
962 i -= SAMPLE_ARRAY_SIZE;
966 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
968 for (ch = 1; ch < nb_display_channels; ch++) {
969 y = s->ytop + ch * h;
970 fill_rectangle(screen,
971 s->xleft, y, s->width, 1,
974 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
976 nb_display_channels= FFMIN(nb_display_channels, 2);
977 if (rdft_bits != s->rdft_bits) {
978 av_rdft_end(s->rdft);
979 av_free(s->rdft_data);
980 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
981 s->rdft_bits = rdft_bits;
982 s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
986 for (ch = 0; ch < nb_display_channels; ch++) {
987 data[ch] = s->rdft_data + 2 * nb_freq * ch;
989 for (x = 0; x < 2 * nb_freq; x++) {
990 double w = (x-nb_freq) * (1.0 / nb_freq);
991 data[ch][x] = s->sample_array[i] * (1.0 - w * w);
993 if (i >= SAMPLE_ARRAY_SIZE)
994 i -= SAMPLE_ARRAY_SIZE;
996 av_rdft_calc(s->rdft, data[ch]);
998 /* Least efficient way to do this, we should of course
999 * directly access it but it is more than fast enough. */
1000 for (y = 0; y < s->height; y++) {
1001 double w = 1 / sqrt(nb_freq);
1002 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1003 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
1004 + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
1007 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1009 fill_rectangle(screen,
1010 s->xpos, s->height-y, 1, 1,
1014 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1017 if (s->xpos >= s->width)
1022 static void stream_close(VideoState *is)
1025 /* XXX: use a special url_shutdown call to abort parse cleanly */
1026 is->abort_request = 1;
1027 SDL_WaitThread(is->read_tid, NULL);
1028 packet_queue_destroy(&is->videoq);
1029 packet_queue_destroy(&is->audioq);
1030 packet_queue_destroy(&is->subtitleq);
1032 /* free all pictures */
1033 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
1034 free_picture(&is->pictq[i]);
1035 for (i = 0; i < SUBPICTURE_QUEUE_SIZE; i++)
1036 free_subpicture(&is->subpq[i]);
1037 SDL_DestroyMutex(is->pictq_mutex);
1038 SDL_DestroyCond(is->pictq_cond);
1039 SDL_DestroyMutex(is->subpq_mutex);
1040 SDL_DestroyCond(is->subpq_cond);
1041 SDL_DestroyCond(is->continue_read_thread);
1042 #if !CONFIG_AVFILTER
1043 sws_freeContext(is->img_convert_ctx);
1048 static void do_exit(VideoState *is)
1053 av_lockmgr_register(NULL);
1056 av_freep(&vfilters);
1058 avformat_network_deinit();
1062 av_log(NULL, AV_LOG_QUIET, "%s", "");
1066 static void sigterm_handler(int sig)
1071 static void set_default_window_size(VideoPicture *vp)
1074 calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1075 default_width = rect.w;
1076 default_height = rect.h;
1079 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1081 int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1084 if (is_full_screen) flags |= SDL_FULLSCREEN;
1085 else flags |= SDL_RESIZABLE;
1087 if (vp && vp->width)
1088 set_default_window_size(vp);
1090 if (is_full_screen && fs_screen_width) {
1091 w = fs_screen_width;
1092 h = fs_screen_height;
1093 } else if (!is_full_screen && screen_width) {
1100 w = FFMIN(16383, w);
1101 if (screen && is->width == screen->w && screen->w == w
1102 && is->height== screen->h && screen->h == h && !force_set_video_mode)
1104 screen = SDL_SetVideoMode(w, h, 0, flags);
1106 av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1110 window_title = input_filename;
1111 SDL_WM_SetCaption(window_title, window_title);
1113 is->width = screen->w;
1114 is->height = screen->h;
1119 /* display the current picture, if any */
1120 static void video_display(VideoState *is)
1123 video_open(is, 0, NULL);
1124 if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1125 video_audio_display(is);
1126 else if (is->video_st)
1127 video_image_display(is);
1130 static double get_clock(Clock *c)
1132 if (*c->queue_serial != c->serial)
1137 double time = av_gettime() / 1000000.0;
1138 return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1142 static void set_clock_at(Clock *c, double pts, int serial, double time)
1145 c->last_updated = time;
1146 c->pts_drift = c->pts - time;
1150 static void set_clock(Clock *c, double pts, int serial)
1152 double time = av_gettime() / 1000000.0;
1153 set_clock_at(c, pts, serial, time);
1156 static void set_clock_speed(Clock *c, double speed)
1158 set_clock(c, get_clock(c), c->serial);
1162 static void init_clock(Clock *c, int *queue_serial)
1166 c->queue_serial = queue_serial;
1167 set_clock(c, NAN, -1);
1170 static void sync_clock_to_slave(Clock *c, Clock *slave)
1172 double clock = get_clock(c);
1173 double slave_clock = get_clock(slave);
1174 if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1175 set_clock(c, slave_clock, slave->serial);
1178 static int get_master_sync_type(VideoState *is) {
1179 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1181 return AV_SYNC_VIDEO_MASTER;
1183 return AV_SYNC_AUDIO_MASTER;
1184 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1186 return AV_SYNC_AUDIO_MASTER;
1188 return AV_SYNC_EXTERNAL_CLOCK;
1190 return AV_SYNC_EXTERNAL_CLOCK;
1194 /* get the current master clock value */
1195 static double get_master_clock(VideoState *is)
1199 switch (get_master_sync_type(is)) {
1200 case AV_SYNC_VIDEO_MASTER:
1201 val = get_clock(&is->vidclk);
1203 case AV_SYNC_AUDIO_MASTER:
1204 val = get_clock(&is->audclk);
1207 val = get_clock(&is->extclk);
1213 static void check_external_clock_speed(VideoState *is) {
1214 if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1215 is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1216 set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP));
1217 } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1218 (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1219 set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP));
1221 double speed = is->extclk.speed;
1223 set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1227 /* seek in the stream */
1228 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1230 if (!is->seek_req) {
1233 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1235 is->seek_flags |= AVSEEK_FLAG_BYTE;
1237 SDL_CondSignal(is->continue_read_thread);
1241 /* pause or resume the video */
1242 static void stream_toggle_pause(VideoState *is)
1245 is->frame_timer += av_gettime() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
1246 if (is->read_pause_return != AVERROR(ENOSYS)) {
1247 is->vidclk.paused = 0;
1249 set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1251 set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1252 is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1255 static void toggle_pause(VideoState *is)
1257 stream_toggle_pause(is);
1261 static void step_to_next_frame(VideoState *is)
1263 /* if the stream is paused unpause it, then step */
1265 stream_toggle_pause(is);
1269 static double compute_target_delay(double delay, VideoState *is)
1271 double sync_threshold, diff;
1273 /* update delay to follow master synchronisation source */
1274 if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1275 /* if video is slave, we try to correct big delays by
1276 duplicating or deleting a frame */
1277 diff = get_clock(&is->vidclk) - get_master_clock(is);
1279 /* skip or repeat frame. We take into account the
1280 delay to compute the threshold. I still don't know
1281 if it is the best guess */
1282 sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1283 if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1284 if (diff <= -sync_threshold)
1285 delay = FFMAX(0, delay + diff);
1286 else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1287 delay = delay + diff;
1288 else if (diff >= sync_threshold)
1293 av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1299 static double vp_duration(VideoState *is, VideoPicture *vp, VideoPicture *nextvp) {
1300 if (vp->serial == nextvp->serial) {
1301 double duration = nextvp->pts - vp->pts;
1302 if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1303 return vp->duration;
1311 static void pictq_next_picture(VideoState *is) {
1312 /* update queue size and signal for next picture */
1313 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1314 is->pictq_rindex = 0;
1316 SDL_LockMutex(is->pictq_mutex);
1318 SDL_CondSignal(is->pictq_cond);
1319 SDL_UnlockMutex(is->pictq_mutex);
1322 static int pictq_prev_picture(VideoState *is) {
1323 VideoPicture *prevvp;
1325 /* update queue size and signal for the previous picture */
1326 prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1327 if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
1328 SDL_LockMutex(is->pictq_mutex);
1329 if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE) {
1330 if (--is->pictq_rindex == -1)
1331 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1335 SDL_CondSignal(is->pictq_cond);
1336 SDL_UnlockMutex(is->pictq_mutex);
1341 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1342 /* update current video pts */
1343 set_clock(&is->vidclk, pts, serial);
1344 sync_clock_to_slave(&is->extclk, &is->vidclk);
1345 is->video_current_pos = pos;
1348 /* called to display each frame */
1349 static void video_refresh(void *opaque, double *remaining_time)
1351 VideoState *is = opaque;
1354 SubPicture *sp, *sp2;
1356 if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1357 check_external_clock_speed(is);
1359 if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1360 time = av_gettime() / 1000000.0;
1361 if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1363 is->last_vis_time = time;
1365 *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1370 if (is->force_refresh)
1371 redisplay = pictq_prev_picture(is);
1373 if (is->pictq_size == 0) {
1374 // nothing to do, no picture to display in the queue
1376 double last_duration, duration, delay;
1377 VideoPicture *vp, *lastvp;
1379 /* dequeue the picture */
1380 vp = &is->pictq[is->pictq_rindex];
1381 lastvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1383 if (vp->serial != is->videoq.serial) {
1384 pictq_next_picture(is);
1385 is->video_current_pos = -1;
1390 if (lastvp->serial != vp->serial && !redisplay)
1391 is->frame_timer = av_gettime() / 1000000.0;
1396 /* compute nominal last_duration */
1397 last_duration = vp_duration(is, lastvp, vp);
1401 delay = compute_target_delay(last_duration, is);
1403 time= av_gettime()/1000000.0;
1404 if (time < is->frame_timer + delay && !redisplay) {
1405 *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1409 is->frame_timer += delay;
1410 if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1411 is->frame_timer = time;
1413 SDL_LockMutex(is->pictq_mutex);
1414 if (!redisplay && !isnan(vp->pts))
1415 update_video_pts(is, vp->pts, vp->pos, vp->serial);
1416 SDL_UnlockMutex(is->pictq_mutex);
1418 if (is->pictq_size > 1) {
1419 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1420 duration = vp_duration(is, vp, nextvp);
1421 if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1423 is->frame_drops_late++;
1424 pictq_next_picture(is);
1430 if (is->subtitle_st) {
1431 while (is->subpq_size > 0) {
1432 sp = &is->subpq[is->subpq_rindex];
1434 if (is->subpq_size > 1)
1435 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1439 if (sp->serial != is->subtitleq.serial
1440 || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1441 || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1443 free_subpicture(sp);
1445 /* update queue size and signal for next picture */
1446 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1447 is->subpq_rindex = 0;
1449 SDL_LockMutex(is->subpq_mutex);
1451 SDL_CondSignal(is->subpq_cond);
1452 SDL_UnlockMutex(is->subpq_mutex);
1460 /* display picture */
1461 if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1464 pictq_next_picture(is);
1466 if (is->step && !is->paused)
1467 stream_toggle_pause(is);
1470 is->force_refresh = 0;
1472 static int64_t last_time;
1474 int aqsize, vqsize, sqsize;
1477 cur_time = av_gettime();
1478 if (!last_time || (cur_time - last_time) >= 30000) {
1483 aqsize = is->audioq.size;
1485 vqsize = is->videoq.size;
1486 if (is->subtitle_st)
1487 sqsize = is->subtitleq.size;
1489 if (is->audio_st && is->video_st)
1490 av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1491 else if (is->video_st)
1492 av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1493 else if (is->audio_st)
1494 av_diff = get_master_clock(is) - get_clock(&is->audclk);
1495 av_log(NULL, AV_LOG_INFO,
1496 "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1497 get_master_clock(is),
1498 (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1500 is->frame_drops_early + is->frame_drops_late,
1504 is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1505 is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1507 last_time = cur_time;
1512 /* allocate a picture (needs to do that in main thread to avoid
1513 potential locking problems */
1514 static void alloc_picture(VideoState *is)
1519 vp = &is->pictq[is->pictq_windex];
1523 video_open(is, 0, vp);
1525 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1528 bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1529 if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1530 /* SDL allocates a buffer smaller than requested if the video
1531 * overlay hardware is unable to support the requested size. */
1532 av_log(NULL, AV_LOG_FATAL,
1533 "Error: the video system does not support an image\n"
1534 "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1535 "to reduce the image size.\n", vp->width, vp->height );
1539 SDL_LockMutex(is->pictq_mutex);
1541 SDL_CondSignal(is->pictq_cond);
1542 SDL_UnlockMutex(is->pictq_mutex);
1545 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1546 int i, width, height;
1548 for (i = 0; i < 3; i++) {
1555 if (bmp->pitches[i] > width) {
1556 maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1557 for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1563 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1567 #if defined(DEBUG_SYNC) && 0
1568 printf("frame_type=%c pts=%0.3f\n",
1569 av_get_picture_type_char(src_frame->pict_type), pts);
1572 /* wait until we have space to put a new picture */
1573 SDL_LockMutex(is->pictq_mutex);
1575 /* keep the last already displayed picture in the queue */
1576 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 1 &&
1577 !is->videoq.abort_request) {
1578 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1580 SDL_UnlockMutex(is->pictq_mutex);
1582 if (is->videoq.abort_request)
1585 vp = &is->pictq[is->pictq_windex];
1587 vp->sar = src_frame->sample_aspect_ratio;
1589 /* alloc or resize hardware picture buffer */
1590 if (!vp->bmp || vp->reallocate || !vp->allocated ||
1591 vp->width != src_frame->width ||
1592 vp->height != src_frame->height) {
1597 vp->width = src_frame->width;
1598 vp->height = src_frame->height;
1600 /* the allocation must be done in the main thread to avoid
1601 locking problems. */
1602 event.type = FF_ALLOC_EVENT;
1603 event.user.data1 = is;
1604 SDL_PushEvent(&event);
1606 /* wait until the picture is allocated */
1607 SDL_LockMutex(is->pictq_mutex);
1608 while (!vp->allocated && !is->videoq.abort_request) {
1609 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1611 /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1612 if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1613 while (!vp->allocated && !is->abort_request) {
1614 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1617 SDL_UnlockMutex(is->pictq_mutex);
1619 if (is->videoq.abort_request)
1623 /* if the frame is not skipped, then display it */
1625 AVPicture pict = { { 0 } };
1627 /* get a pointer on the bitmap */
1628 SDL_LockYUVOverlay (vp->bmp);
1630 pict.data[0] = vp->bmp->pixels[0];
1631 pict.data[1] = vp->bmp->pixels[2];
1632 pict.data[2] = vp->bmp->pixels[1];
1634 pict.linesize[0] = vp->bmp->pitches[0];
1635 pict.linesize[1] = vp->bmp->pitches[2];
1636 pict.linesize[2] = vp->bmp->pitches[1];
1639 // FIXME use direct rendering
1640 av_picture_copy(&pict, (AVPicture *)src_frame,
1641 src_frame->format, vp->width, vp->height);
1643 av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1644 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1645 vp->width, vp->height, src_frame->format, vp->width, vp->height,
1646 AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1647 if (is->img_convert_ctx == NULL) {
1648 av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1651 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1652 0, vp->height, pict.data, pict.linesize);
1654 /* workaround SDL PITCH_WORKAROUND */
1655 duplicate_right_border_pixels(vp->bmp);
1656 /* update the bitmap content */
1657 SDL_UnlockYUVOverlay(vp->bmp);
1660 vp->duration = duration;
1662 vp->serial = serial;
1664 /* now we can update the picture count */
1665 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1666 is->pictq_windex = 0;
1667 SDL_LockMutex(is->pictq_mutex);
1669 SDL_UnlockMutex(is->pictq_mutex);
1674 static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
1678 if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1681 if (pkt->data == flush_pkt.data) {
1682 avcodec_flush_buffers(is->video_st->codec);
1686 if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1689 if (!got_picture && !pkt->data)
1690 is->video_finished = *serial;
1696 if (decoder_reorder_pts == -1) {
1697 frame->pts = av_frame_get_best_effort_timestamp(frame);
1698 } else if (decoder_reorder_pts) {
1699 frame->pts = frame->pkt_pts;
1701 frame->pts = frame->pkt_dts;
1704 if (frame->pts != AV_NOPTS_VALUE)
1705 dpts = av_q2d(is->video_st->time_base) * frame->pts;
1707 frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1709 if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1710 if (frame->pts != AV_NOPTS_VALUE) {
1711 double diff = dpts - get_master_clock(is);
1712 if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1713 diff - is->frame_last_filter_delay < 0 &&
1714 *serial == is->vidclk.serial &&
1715 is->videoq.nb_packets) {
1716 is->frame_drops_early++;
1717 av_frame_unref(frame);
1729 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1730 AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1733 AVFilterInOut *outputs = NULL, *inputs = NULL;
1736 outputs = avfilter_inout_alloc();
1737 inputs = avfilter_inout_alloc();
1738 if (!outputs || !inputs) {
1739 ret = AVERROR(ENOMEM);
1743 outputs->name = av_strdup("in");
1744 outputs->filter_ctx = source_ctx;
1745 outputs->pad_idx = 0;
1746 outputs->next = NULL;
1748 inputs->name = av_strdup("out");
1749 inputs->filter_ctx = sink_ctx;
1750 inputs->pad_idx = 0;
1751 inputs->next = NULL;
1753 if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1756 if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1760 ret = avfilter_graph_config(graph, NULL);
1762 avfilter_inout_free(&outputs);
1763 avfilter_inout_free(&inputs);
1767 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1769 static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1770 char sws_flags_str[128];
1771 char buffersrc_args[256];
1773 AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1774 AVCodecContext *codec = is->video_st->codec;
1775 AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1777 av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1778 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1779 graph->scale_sws_opts = av_strdup(sws_flags_str);
1781 snprintf(buffersrc_args, sizeof(buffersrc_args),
1782 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1783 frame->width, frame->height, frame->format,
1784 is->video_st->time_base.num, is->video_st->time_base.den,
1785 codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1786 if (fr.num && fr.den)
1787 av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1789 if ((ret = avfilter_graph_create_filter(&filt_src,
1790 avfilter_get_by_name("buffer"),
1791 "ffplay_buffer", buffersrc_args, NULL,
1795 ret = avfilter_graph_create_filter(&filt_out,
1796 avfilter_get_by_name("buffersink"),
1797 "ffplay_buffersink", NULL, NULL, graph);
1801 if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1804 /* SDL YUV code is not handling odd width/height for some driver
1805 * combinations, therefore we crop the picture to an even width/height. */
1806 if ((ret = avfilter_graph_create_filter(&filt_crop,
1807 avfilter_get_by_name("crop"),
1808 "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1810 if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1813 if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1816 is->in_video_filter = filt_src;
1817 is->out_video_filter = filt_out;
1823 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1825 static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
1826 int sample_rates[2] = { 0, -1 };
1827 int64_t channel_layouts[2] = { 0, -1 };
1828 int channels[2] = { 0, -1 };
1829 AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1830 char aresample_swr_opts[512] = "";
1831 AVDictionaryEntry *e = NULL;
1832 char asrc_args[256];
1835 avfilter_graph_free(&is->agraph);
1836 if (!(is->agraph = avfilter_graph_alloc()))
1837 return AVERROR(ENOMEM);
1839 while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1840 av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1841 if (strlen(aresample_swr_opts))
1842 aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1843 av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1845 ret = snprintf(asrc_args, sizeof(asrc_args),
1846 "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1847 is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1848 is->audio_filter_src.channels,
1849 1, is->audio_filter_src.freq);
1850 if (is->audio_filter_src.channel_layout)
1851 snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1852 ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1854 ret = avfilter_graph_create_filter(&filt_asrc,
1855 avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1856 asrc_args, NULL, is->agraph);
1861 ret = avfilter_graph_create_filter(&filt_asink,
1862 avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1863 NULL, NULL, is->agraph);
1867 if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1869 if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1872 if (force_output_format) {
1873 channel_layouts[0] = is->audio_tgt.channel_layout;
1874 channels [0] = is->audio_tgt.channels;
1875 sample_rates [0] = is->audio_tgt.freq;
1876 if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1878 if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1880 if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1882 if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1887 if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1890 is->in_audio_filter = filt_asrc;
1891 is->out_audio_filter = filt_asink;
1895 avfilter_graph_free(&is->agraph);
1898 #endif /* CONFIG_AVFILTER */
1900 static int video_thread(void *arg)
1902 AVPacket pkt = { 0 };
1903 VideoState *is = arg;
1904 AVFrame *frame = av_frame_alloc();
1909 AVRational tb = is->video_st->time_base;
1910 AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
1913 AVFilterGraph *graph = avfilter_graph_alloc();
1914 AVFilterContext *filt_out = NULL, *filt_in = NULL;
1917 enum AVPixelFormat last_format = -2;
1918 int last_serial = -1;
1922 while (is->paused && !is->videoq.abort_request)
1925 av_free_packet(&pkt);
1927 ret = get_video_frame(is, frame, &pkt, &serial);
1934 if ( last_w != frame->width
1935 || last_h != frame->height
1936 || last_format != frame->format
1937 || last_serial != serial) {
1938 av_log(NULL, AV_LOG_DEBUG,
1939 "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1941 (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1942 frame->width, frame->height,
1943 (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1944 avfilter_graph_free(&graph);
1945 graph = avfilter_graph_alloc();
1946 if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
1948 event.type = FF_QUIT_EVENT;
1949 event.user.data1 = is;
1950 SDL_PushEvent(&event);
1953 filt_in = is->in_video_filter;
1954 filt_out = is->out_video_filter;
1955 last_w = frame->width;
1956 last_h = frame->height;
1957 last_format = frame->format;
1958 last_serial = serial;
1959 frame_rate = filt_out->inputs[0]->frame_rate;
1962 ret = av_buffersrc_add_frame(filt_in, frame);
1967 is->frame_last_returned_time = av_gettime() / 1000000.0;
1969 ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
1971 if (ret == AVERROR_EOF)
1972 is->video_finished = serial;
1977 is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1978 if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1979 is->frame_last_filter_delay = 0;
1980 tb = filt_out->inputs[0]->time_base;
1982 duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
1983 pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
1984 ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), serial);
1985 av_frame_unref(frame);
1995 avfilter_graph_free(&graph);
1997 av_free_packet(&pkt);
1998 av_frame_free(&frame);
2002 static int subtitle_thread(void *arg)
2004 VideoState *is = arg;
2006 AVPacket pkt1, *pkt = &pkt1;
2011 int r, g, b, y, u, v, a;
2014 while (is->paused && !is->subtitleq.abort_request) {
2017 if (packet_queue_get(&is->subtitleq, pkt, 1, &serial) < 0)
2020 if (pkt->data == flush_pkt.data) {
2021 avcodec_flush_buffers(is->subtitle_st->codec);
2024 SDL_LockMutex(is->subpq_mutex);
2025 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
2026 !is->subtitleq.abort_request) {
2027 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
2029 SDL_UnlockMutex(is->subpq_mutex);
2031 if (is->subtitleq.abort_request)
2034 sp = &is->subpq[is->subpq_windex];
2036 /* NOTE: ipts is the PTS of the _first_ picture beginning in
2037 this packet, if any */
2039 if (pkt->pts != AV_NOPTS_VALUE)
2040 pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2042 avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
2043 &got_subtitle, pkt);
2044 if (got_subtitle && sp->sub.format == 0) {
2045 if (sp->sub.pts != AV_NOPTS_VALUE)
2046 pts = sp->sub.pts / (double)AV_TIME_BASE;
2048 sp->serial = serial;
2050 for (i = 0; i < sp->sub.num_rects; i++)
2052 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2054 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2055 y = RGB_TO_Y_CCIR(r, g, b);
2056 u = RGB_TO_U_CCIR(r, g, b, 0);
2057 v = RGB_TO_V_CCIR(r, g, b, 0);
2058 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2062 /* now we can update the picture count */
2063 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2064 is->subpq_windex = 0;
2065 SDL_LockMutex(is->subpq_mutex);
2067 SDL_UnlockMutex(is->subpq_mutex);
2068 } else if (got_subtitle) {
2069 avsubtitle_free(&sp->sub);
2071 av_free_packet(pkt);
2076 /* copy samples for viewing in editor window */
2077 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2081 size = samples_size / sizeof(short);
2083 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2086 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2088 is->sample_array_index += len;
2089 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2090 is->sample_array_index = 0;
2095 /* return the wanted number of samples to get better sync if sync_type is video
2096 * or external master clock */
2097 static int synchronize_audio(VideoState *is, int nb_samples)
2099 int wanted_nb_samples = nb_samples;
2101 /* if not master, then we try to remove or add samples to correct the clock */
2102 if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
2103 double diff, avg_diff;
2104 int min_nb_samples, max_nb_samples;
2106 diff = get_clock(&is->audclk) - get_master_clock(is);
2108 if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2109 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2110 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2111 /* not enough measures to have a correct estimate */
2112 is->audio_diff_avg_count++;
2114 /* estimate the A-V difference */
2115 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2117 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2118 wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2119 min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2120 max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2121 wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2123 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2124 diff, avg_diff, wanted_nb_samples - nb_samples,
2125 is->audio_clock, is->audio_diff_threshold);
2128 /* too big difference : may be initial PTS errors, so
2130 is->audio_diff_avg_count = 0;
2131 is->audio_diff_cum = 0;
2135 return wanted_nb_samples;
2139 * Decode one audio frame and return its uncompressed size.
2141 * The processed audio frame is decoded, converted if required, and
2142 * stored in is->audio_buf, with size in bytes given by the return
2145 static int audio_decode_frame(VideoState *is)
2147 AVPacket *pkt_temp = &is->audio_pkt_temp;
2148 AVPacket *pkt = &is->audio_pkt;
2149 AVCodecContext *dec = is->audio_st->codec;
2150 int len1, data_size, resampled_data_size;
2151 int64_t dec_channel_layout;
2153 av_unused double audio_clock0;
2154 int wanted_nb_samples;
2160 /* NOTE: the audio packet can contain several frames */
2161 while (pkt_temp->stream_index != -1 || is->audio_buf_frames_pending) {
2163 if (!(is->frame = av_frame_alloc()))
2164 return AVERROR(ENOMEM);
2166 av_frame_unref(is->frame);
2169 if (is->audioq.serial != is->audio_pkt_temp_serial)
2175 if (!is->audio_buf_frames_pending) {
2176 len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2178 /* if error, we skip the frame */
2184 pkt_temp->pts = AV_NOPTS_VALUE;
2185 pkt_temp->data += len1;
2186 pkt_temp->size -= len1;
2187 if (pkt_temp->data && pkt_temp->size <= 0 || !pkt_temp->data && !got_frame)
2188 pkt_temp->stream_index = -1;
2189 if (!pkt_temp->data && !got_frame)
2190 is->audio_finished = is->audio_pkt_temp_serial;
2195 tb = (AVRational){1, is->frame->sample_rate};
2196 if (is->frame->pts != AV_NOPTS_VALUE)
2197 is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb);
2198 else if (is->frame->pkt_pts != AV_NOPTS_VALUE)
2199 is->frame->pts = av_rescale_q(is->frame->pkt_pts, is->audio_st->time_base, tb);
2200 else if (is->audio_frame_next_pts != AV_NOPTS_VALUE)
2202 is->frame->pts = av_rescale_q(is->audio_frame_next_pts, (AVRational){1, is->audio_filter_src.freq}, tb);
2204 is->frame->pts = av_rescale_q(is->audio_frame_next_pts, (AVRational){1, is->audio_src.freq}, tb);
2207 if (is->frame->pts != AV_NOPTS_VALUE)
2208 is->audio_frame_next_pts = is->frame->pts + is->frame->nb_samples;
2211 dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2214 cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2215 is->frame->format, av_frame_get_channels(is->frame)) ||
2216 is->audio_filter_src.channel_layout != dec_channel_layout ||
2217 is->audio_filter_src.freq != is->frame->sample_rate ||
2218 is->audio_pkt_temp_serial != is->audio_last_serial;
2221 char buf1[1024], buf2[1024];
2222 av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2223 av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2224 av_log(NULL, AV_LOG_DEBUG,
2225 "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2226 is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial,
2227 is->frame->sample_rate, av_frame_get_channels(is->frame), av_get_sample_fmt_name(is->frame->format), buf2, is->audio_pkt_temp_serial);
2229 is->audio_filter_src.fmt = is->frame->format;
2230 is->audio_filter_src.channels = av_frame_get_channels(is->frame);
2231 is->audio_filter_src.channel_layout = dec_channel_layout;
2232 is->audio_filter_src.freq = is->frame->sample_rate;
2233 is->audio_last_serial = is->audio_pkt_temp_serial;
2235 if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2239 if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2244 if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) {
2245 if (ret == AVERROR(EAGAIN)) {
2246 is->audio_buf_frames_pending = 0;
2249 if (ret == AVERROR_EOF)
2250 is->audio_finished = is->audio_pkt_temp_serial;
2253 is->audio_buf_frames_pending = 1;
2254 tb = is->out_audio_filter->inputs[0]->time_base;
2257 data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(is->frame),
2258 is->frame->nb_samples,
2259 is->frame->format, 1);
2261 dec_channel_layout =
2262 (is->frame->channel_layout && av_frame_get_channels(is->frame) == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2263 is->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(is->frame));
2264 wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2266 if (is->frame->format != is->audio_src.fmt ||
2267 dec_channel_layout != is->audio_src.channel_layout ||
2268 is->frame->sample_rate != is->audio_src.freq ||
2269 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2270 swr_free(&is->swr_ctx);
2271 is->swr_ctx = swr_alloc_set_opts(NULL,
2272 is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2273 dec_channel_layout, is->frame->format, is->frame->sample_rate,
2275 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2276 av_log(NULL, AV_LOG_ERROR,
2277 "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2278 is->frame->sample_rate, av_get_sample_fmt_name(is->frame->format), av_frame_get_channels(is->frame),
2279 is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2282 is->audio_src.channel_layout = dec_channel_layout;
2283 is->audio_src.channels = av_frame_get_channels(is->frame);
2284 is->audio_src.freq = is->frame->sample_rate;
2285 is->audio_src.fmt = is->frame->format;
2289 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2290 uint8_t **out = &is->audio_buf1;
2291 int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2292 int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2295 av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2298 if (wanted_nb_samples != is->frame->nb_samples) {
2299 if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2300 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2301 av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2305 av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2306 if (!is->audio_buf1)
2307 return AVERROR(ENOMEM);
2308 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2310 av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2313 if (len2 == out_count) {
2314 av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2315 swr_init(is->swr_ctx);
2317 is->audio_buf = is->audio_buf1;
2318 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2320 is->audio_buf = is->frame->data[0];
2321 resampled_data_size = data_size;
2324 audio_clock0 = is->audio_clock;
2325 /* update the audio clock with the pts */
2326 if (is->frame->pts != AV_NOPTS_VALUE)
2327 is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2329 is->audio_clock = NAN;
2330 is->audio_clock_serial = is->audio_pkt_temp_serial;
2333 static double last_clock;
2334 printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2335 is->audio_clock - last_clock,
2336 is->audio_clock, audio_clock0);
2337 last_clock = is->audio_clock;
2340 return resampled_data_size;
2343 /* free the current packet */
2345 av_free_packet(pkt);
2346 memset(pkt_temp, 0, sizeof(*pkt_temp));
2347 pkt_temp->stream_index = -1;
2349 if (is->audioq.abort_request) {
2353 if (is->audioq.nb_packets == 0)
2354 SDL_CondSignal(is->continue_read_thread);
2356 /* read next packet */
2357 if ((packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2360 if (pkt->data == flush_pkt.data) {
2361 avcodec_flush_buffers(dec);
2362 is->audio_buf_frames_pending = 0;
2363 is->audio_frame_next_pts = AV_NOPTS_VALUE;
2364 if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek)
2365 is->audio_frame_next_pts = is->audio_st->start_time;
2372 /* prepare a new audio buffer */
2373 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2375 VideoState *is = opaque;
2376 int audio_size, len1;
2378 audio_callback_time = av_gettime();
2381 if (is->audio_buf_index >= is->audio_buf_size) {
2382 audio_size = audio_decode_frame(is);
2383 if (audio_size < 0) {
2384 /* if error, just output silence */
2385 is->audio_buf = is->silence_buf;
2386 is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2388 if (is->show_mode != SHOW_MODE_VIDEO)
2389 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2390 is->audio_buf_size = audio_size;
2392 is->audio_buf_index = 0;
2394 len1 = is->audio_buf_size - is->audio_buf_index;
2397 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2400 is->audio_buf_index += len1;
2402 is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2403 /* Let's assume the audio driver that is used by SDL has two periods. */
2404 if (!isnan(is->audio_clock)) {
2405 set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2406 sync_clock_to_slave(&is->extclk, &is->audclk);
2410 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2412 SDL_AudioSpec wanted_spec, spec;
2414 static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2416 env = SDL_getenv("SDL_AUDIO_CHANNELS");
2418 wanted_nb_channels = atoi(env);
2419 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2421 if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2422 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2423 wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2425 wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2426 wanted_spec.freq = wanted_sample_rate;
2427 if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2428 av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2431 wanted_spec.format = AUDIO_S16SYS;
2432 wanted_spec.silence = 0;
2433 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2434 wanted_spec.callback = sdl_audio_callback;
2435 wanted_spec.userdata = opaque;
2436 while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2437 av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2438 wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2439 if (!wanted_spec.channels) {
2440 av_log(NULL, AV_LOG_ERROR,
2441 "No more channel combinations to try, audio open failed\n");
2444 wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2446 if (spec.format != AUDIO_S16SYS) {
2447 av_log(NULL, AV_LOG_ERROR,
2448 "SDL advised audio format %d is not supported!\n", spec.format);
2451 if (spec.channels != wanted_spec.channels) {
2452 wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2453 if (!wanted_channel_layout) {
2454 av_log(NULL, AV_LOG_ERROR,
2455 "SDL advised channel count %d is not supported!\n", spec.channels);
2460 audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2461 audio_hw_params->freq = spec.freq;
2462 audio_hw_params->channel_layout = wanted_channel_layout;
2463 audio_hw_params->channels = spec.channels;
2464 audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2465 audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2466 if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2467 av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2473 /* open a given stream. Return 0 if OK */
2474 static int stream_component_open(VideoState *is, int stream_index)
2476 AVFormatContext *ic = is->ic;
2477 AVCodecContext *avctx;
2479 const char *forced_codec_name = NULL;
2481 AVDictionaryEntry *t = NULL;
2482 int sample_rate, nb_channels;
2483 int64_t channel_layout;
2485 int stream_lowres = lowres;
2487 if (stream_index < 0 || stream_index >= ic->nb_streams)
2489 avctx = ic->streams[stream_index]->codec;
2491 codec = avcodec_find_decoder(avctx->codec_id);
2493 switch(avctx->codec_type){
2494 case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2495 case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2496 case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2498 if (forced_codec_name)
2499 codec = avcodec_find_decoder_by_name(forced_codec_name);
2501 if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2502 "No codec could be found with name '%s'\n", forced_codec_name);
2503 else av_log(NULL, AV_LOG_WARNING,
2504 "No codec could be found with id %d\n", avctx->codec_id);
2508 avctx->codec_id = codec->id;
2509 avctx->workaround_bugs = workaround_bugs;
2510 if(stream_lowres > av_codec_get_max_lowres(codec)){
2511 av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2512 av_codec_get_max_lowres(codec));
2513 stream_lowres = av_codec_get_max_lowres(codec);
2515 av_codec_set_lowres(avctx, stream_lowres);
2516 avctx->error_concealment = error_concealment;
2518 if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2519 if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2520 if(codec->capabilities & CODEC_CAP_DR1)
2521 avctx->flags |= CODEC_FLAG_EMU_EDGE;
2523 opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2524 if (!av_dict_get(opts, "threads", NULL, 0))
2525 av_dict_set(&opts, "threads", "auto", 0);
2527 av_dict_set(&opts, "lowres", av_asprintf("%d", stream_lowres), AV_DICT_DONT_STRDUP_VAL);
2528 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2529 av_dict_set(&opts, "refcounted_frames", "1", 0);
2530 if (avcodec_open2(avctx, codec, &opts) < 0)
2532 if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2533 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2534 return AVERROR_OPTION_NOT_FOUND;
2537 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2538 switch (avctx->codec_type) {
2539 case AVMEDIA_TYPE_AUDIO:
2544 is->audio_filter_src.freq = avctx->sample_rate;
2545 is->audio_filter_src.channels = avctx->channels;
2546 is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2547 is->audio_filter_src.fmt = avctx->sample_fmt;
2548 if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2550 link = is->out_audio_filter->inputs[0];
2551 sample_rate = link->sample_rate;
2552 nb_channels = link->channels;
2553 channel_layout = link->channel_layout;
2556 sample_rate = avctx->sample_rate;
2557 nb_channels = avctx->channels;
2558 channel_layout = avctx->channel_layout;
2561 /* prepare audio output */
2562 if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2564 is->audio_hw_buf_size = ret;
2565 is->audio_src = is->audio_tgt;
2566 is->audio_buf_size = 0;
2567 is->audio_buf_index = 0;
2569 /* init averaging filter */
2570 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2571 is->audio_diff_avg_count = 0;
2572 /* since we do not have a precise anough audio fifo fullness,
2573 we correct audio sync only if larger than this threshold */
2574 is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec;
2576 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2577 memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2578 is->audio_pkt_temp.stream_index = -1;
2580 is->audio_stream = stream_index;
2581 is->audio_st = ic->streams[stream_index];
2583 packet_queue_start(&is->audioq);
2586 case AVMEDIA_TYPE_VIDEO:
2587 is->video_stream = stream_index;
2588 is->video_st = ic->streams[stream_index];
2590 packet_queue_start(&is->videoq);
2591 is->video_tid = SDL_CreateThread(video_thread, is);
2592 is->queue_attachments_req = 1;
2594 case AVMEDIA_TYPE_SUBTITLE:
2595 is->subtitle_stream = stream_index;
2596 is->subtitle_st = ic->streams[stream_index];
2597 packet_queue_start(&is->subtitleq);
2599 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2607 static void stream_component_close(VideoState *is, int stream_index)
2609 AVFormatContext *ic = is->ic;
2610 AVCodecContext *avctx;
2612 if (stream_index < 0 || stream_index >= ic->nb_streams)
2614 avctx = ic->streams[stream_index]->codec;
2616 switch (avctx->codec_type) {
2617 case AVMEDIA_TYPE_AUDIO:
2618 packet_queue_abort(&is->audioq);
2622 packet_queue_flush(&is->audioq);
2623 av_free_packet(&is->audio_pkt);
2624 swr_free(&is->swr_ctx);
2625 av_freep(&is->audio_buf1);
2626 is->audio_buf1_size = 0;
2627 is->audio_buf = NULL;
2628 av_frame_free(&is->frame);
2631 av_rdft_end(is->rdft);
2632 av_freep(&is->rdft_data);
2637 avfilter_graph_free(&is->agraph);
2640 case AVMEDIA_TYPE_VIDEO:
2641 packet_queue_abort(&is->videoq);
2643 /* note: we also signal this mutex to make sure we deblock the
2644 video thread in all cases */
2645 SDL_LockMutex(is->pictq_mutex);
2646 SDL_CondSignal(is->pictq_cond);
2647 SDL_UnlockMutex(is->pictq_mutex);
2649 SDL_WaitThread(is->video_tid, NULL);
2651 packet_queue_flush(&is->videoq);
2653 case AVMEDIA_TYPE_SUBTITLE:
2654 packet_queue_abort(&is->subtitleq);
2656 /* note: we also signal this mutex to make sure we deblock the
2657 video thread in all cases */
2658 SDL_LockMutex(is->subpq_mutex);
2659 SDL_CondSignal(is->subpq_cond);
2660 SDL_UnlockMutex(is->subpq_mutex);
2662 SDL_WaitThread(is->subtitle_tid, NULL);
2664 packet_queue_flush(&is->subtitleq);
2670 ic->streams[stream_index]->discard = AVDISCARD_ALL;
2671 avcodec_close(avctx);
2672 switch (avctx->codec_type) {
2673 case AVMEDIA_TYPE_AUDIO:
2674 is->audio_st = NULL;
2675 is->audio_stream = -1;
2677 case AVMEDIA_TYPE_VIDEO:
2678 is->video_st = NULL;
2679 is->video_stream = -1;
2681 case AVMEDIA_TYPE_SUBTITLE:
2682 is->subtitle_st = NULL;
2683 is->subtitle_stream = -1;
2690 static int decode_interrupt_cb(void *ctx)
2692 VideoState *is = ctx;
2693 return is->abort_request;
2696 static int is_realtime(AVFormatContext *s)
2698 if( !strcmp(s->iformat->name, "rtp")
2699 || !strcmp(s->iformat->name, "rtsp")
2700 || !strcmp(s->iformat->name, "sdp")
2704 if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2705 || !strncmp(s->filename, "udp:", 4)
2712 /* this thread gets the stream from the disk or the network */
2713 static int read_thread(void *arg)
2715 VideoState *is = arg;
2716 AVFormatContext *ic = NULL;
2718 int st_index[AVMEDIA_TYPE_NB];
2719 AVPacket pkt1, *pkt = &pkt1;
2721 int64_t stream_start_time;
2722 int pkt_in_play_range = 0;
2723 AVDictionaryEntry *t;
2724 AVDictionary **opts;
2725 int orig_nb_streams;
2726 SDL_mutex *wait_mutex = SDL_CreateMutex();
2728 memset(st_index, -1, sizeof(st_index));
2729 is->last_video_stream = is->video_stream = -1;
2730 is->last_audio_stream = is->audio_stream = -1;
2731 is->last_subtitle_stream = is->subtitle_stream = -1;
2733 ic = avformat_alloc_context();
2734 ic->interrupt_callback.callback = decode_interrupt_cb;
2735 ic->interrupt_callback.opaque = is;
2736 err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2738 print_error(is->filename, err);
2742 if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2743 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2744 ret = AVERROR_OPTION_NOT_FOUND;
2750 ic->flags |= AVFMT_FLAG_GENPTS;
2752 opts = setup_find_stream_info_opts(ic, codec_opts);
2753 orig_nb_streams = ic->nb_streams;
2755 err = avformat_find_stream_info(ic, opts);
2757 av_log(NULL, AV_LOG_WARNING,
2758 "%s: could not find codec parameters\n", is->filename);
2762 for (i = 0; i < orig_nb_streams; i++)
2763 av_dict_free(&opts[i]);
2767 ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2769 if (seek_by_bytes < 0)
2770 seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2772 is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2774 if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2775 window_title = av_asprintf("%s - %s", t->value, input_filename);
2777 /* if seeking requested, we execute it */
2778 if (start_time != AV_NOPTS_VALUE) {
2781 timestamp = start_time;
2782 /* add the stream start time */
2783 if (ic->start_time != AV_NOPTS_VALUE)
2784 timestamp += ic->start_time;
2785 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2787 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2788 is->filename, (double)timestamp / AV_TIME_BASE);
2792 is->realtime = is_realtime(ic);
2794 for (i = 0; i < ic->nb_streams; i++)
2795 ic->streams[i]->discard = AVDISCARD_ALL;
2797 st_index[AVMEDIA_TYPE_VIDEO] =
2798 av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2799 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2801 st_index[AVMEDIA_TYPE_AUDIO] =
2802 av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2803 wanted_stream[AVMEDIA_TYPE_AUDIO],
2804 st_index[AVMEDIA_TYPE_VIDEO],
2806 if (!video_disable && !subtitle_disable)
2807 st_index[AVMEDIA_TYPE_SUBTITLE] =
2808 av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2809 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2810 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2811 st_index[AVMEDIA_TYPE_AUDIO] :
2812 st_index[AVMEDIA_TYPE_VIDEO]),
2815 av_dump_format(ic, 0, is->filename, 0);
2818 is->show_mode = show_mode;
2819 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2820 AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2821 AVCodecContext *avctx = st->codec;
2822 VideoPicture vp = {.width = avctx->width, .height = avctx->height, .sar = av_guess_sample_aspect_ratio(ic, st, NULL)};
2824 set_default_window_size(&vp);
2827 /* open the streams */
2828 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2829 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2833 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2834 ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2836 if (is->show_mode == SHOW_MODE_NONE)
2837 is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2839 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2840 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2843 if (is->video_stream < 0 && is->audio_stream < 0) {
2844 av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2850 if (infinite_buffer < 0 && is->realtime)
2851 infinite_buffer = 1;
2854 if (is->abort_request)
2856 if (is->paused != is->last_paused) {
2857 is->last_paused = is->paused;
2859 is->read_pause_return = av_read_pause(ic);
2863 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2865 (!strcmp(ic->iformat->name, "rtsp") ||
2866 (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2867 /* wait 10 ms to avoid trying to get another packet */
2874 int64_t seek_target = is->seek_pos;
2875 int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2876 int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2877 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2878 // of the seek_pos/seek_rel variables
2880 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2882 av_log(NULL, AV_LOG_ERROR,
2883 "%s: error while seeking\n", is->ic->filename);
2885 if (is->audio_stream >= 0) {
2886 packet_queue_flush(&is->audioq);
2887 packet_queue_put(&is->audioq, &flush_pkt);
2889 if (is->subtitle_stream >= 0) {
2890 packet_queue_flush(&is->subtitleq);
2891 packet_queue_put(&is->subtitleq, &flush_pkt);
2893 if (is->video_stream >= 0) {
2894 packet_queue_flush(&is->videoq);
2895 packet_queue_put(&is->videoq, &flush_pkt);
2897 if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2898 set_clock(&is->extclk, NAN, 0);
2900 set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2904 is->queue_attachments_req = 1;
2907 step_to_next_frame(is);
2909 if (is->queue_attachments_req) {
2910 if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2912 if ((ret = av_copy_packet(©, &is->video_st->attached_pic)) < 0)
2914 packet_queue_put(&is->videoq, ©);
2915 packet_queue_put_nullpacket(&is->videoq, is->video_stream);
2917 is->queue_attachments_req = 0;
2920 /* if the queue are full, no need to read more */
2921 if (infinite_buffer<1 &&
2922 (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2923 || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2924 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
2925 || (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC))
2926 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2928 SDL_LockMutex(wait_mutex);
2929 SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2930 SDL_UnlockMutex(wait_mutex);
2934 (!is->audio_st || is->audio_finished == is->audioq.serial) &&
2935 (!is->video_st || (is->video_finished == is->videoq.serial && is->pictq_size == 0))) {
2936 if (loop != 1 && (!loop || --loop)) {
2937 stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2938 } else if (autoexit) {
2944 if (is->video_stream >= 0)
2945 packet_queue_put_nullpacket(&is->videoq, is->video_stream);
2946 if (is->audio_stream >= 0)
2947 packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
2952 ret = av_read_frame(ic, pkt);
2954 if (ret == AVERROR_EOF || url_feof(ic->pb))
2956 if (ic->pb && ic->pb->error)
2958 SDL_LockMutex(wait_mutex);
2959 SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2960 SDL_UnlockMutex(wait_mutex);
2963 /* check if packet is in play range specified by user, then queue, otherwise discard */
2964 stream_start_time = ic->streams[pkt->stream_index]->start_time;
2965 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2966 (pkt->pts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
2967 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2968 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2969 <= ((double)duration / 1000000);
2970 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2971 packet_queue_put(&is->audioq, pkt);
2972 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
2973 && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
2974 packet_queue_put(&is->videoq, pkt);
2975 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2976 packet_queue_put(&is->subtitleq, pkt);
2978 av_free_packet(pkt);
2981 /* wait until the end */
2982 while (!is->abort_request) {
2988 /* close each stream */
2989 if (is->audio_stream >= 0)
2990 stream_component_close(is, is->audio_stream);
2991 if (is->video_stream >= 0)
2992 stream_component_close(is, is->video_stream);
2993 if (is->subtitle_stream >= 0)
2994 stream_component_close(is, is->subtitle_stream);
2996 avformat_close_input(&is->ic);
3002 event.type = FF_QUIT_EVENT;
3003 event.user.data1 = is;
3004 SDL_PushEvent(&event);
3006 SDL_DestroyMutex(wait_mutex);
3010 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3014 is = av_mallocz(sizeof(VideoState));
3017 av_strlcpy(is->filename, filename, sizeof(is->filename));
3018 is->iformat = iformat;
3022 /* start video display */
3023 is->pictq_mutex = SDL_CreateMutex();
3024 is->pictq_cond = SDL_CreateCond();
3026 is->subpq_mutex = SDL_CreateMutex();
3027 is->subpq_cond = SDL_CreateCond();
3029 packet_queue_init(&is->videoq);
3030 packet_queue_init(&is->audioq);
3031 packet_queue_init(&is->subtitleq);
3033 is->continue_read_thread = SDL_CreateCond();
3035 init_clock(&is->vidclk, &is->videoq.serial);
3036 init_clock(&is->audclk, &is->audioq.serial);
3037 init_clock(&is->extclk, &is->extclk.serial);
3038 is->audio_clock_serial = -1;
3039 is->audio_last_serial = -1;
3040 is->av_sync_type = av_sync_type;
3041 is->read_tid = SDL_CreateThread(read_thread, is);
3042 if (!is->read_tid) {
3049 static void stream_cycle_channel(VideoState *is, int codec_type)
3051 AVFormatContext *ic = is->ic;
3052 int start_index, stream_index;
3055 AVProgram *p = NULL;
3056 int nb_streams = is->ic->nb_streams;
3058 if (codec_type == AVMEDIA_TYPE_VIDEO) {
3059 start_index = is->last_video_stream;
3060 old_index = is->video_stream;
3061 } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3062 start_index = is->last_audio_stream;
3063 old_index = is->audio_stream;
3065 start_index = is->last_subtitle_stream;
3066 old_index = is->subtitle_stream;
3068 stream_index = start_index;
3070 if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3071 p = av_find_program_from_stream(ic, NULL, is->video_stream);
3073 nb_streams = p->nb_stream_indexes;
3074 for (start_index = 0; start_index < nb_streams; start_index++)
3075 if (p->stream_index[start_index] == stream_index)
3077 if (start_index == nb_streams)
3079 stream_index = start_index;
3084 if (++stream_index >= nb_streams)
3086 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3089 is->last_subtitle_stream = -1;
3092 if (start_index == -1)
3096 if (stream_index == start_index)
3098 st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3099 if (st->codec->codec_type == codec_type) {
3100 /* check that parameters are OK */
3101 switch (codec_type) {
3102 case AVMEDIA_TYPE_AUDIO:
3103 if (st->codec->sample_rate != 0 &&
3104 st->codec->channels != 0)
3107 case AVMEDIA_TYPE_VIDEO:
3108 case AVMEDIA_TYPE_SUBTITLE:
3116 if (p && stream_index != -1)
3117 stream_index = p->stream_index[stream_index];
3118 stream_component_close(is, old_index);
3119 stream_component_open(is, stream_index);
3123 static void toggle_full_screen(VideoState *is)
3125 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3126 /* OS X needs to reallocate the SDL overlays */
3128 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3129 is->pictq[i].reallocate = 1;
3131 is_full_screen = !is_full_screen;
3132 video_open(is, 1, NULL);
3135 static void toggle_audio_display(VideoState *is)
3137 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3138 int next = is->show_mode;
3140 next = (next + 1) % SHOW_MODE_NB;
3141 } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3142 if (is->show_mode != next) {
3143 fill_rectangle(screen,
3144 is->xleft, is->ytop, is->width, is->height,
3146 is->force_refresh = 1;
3147 is->show_mode = next;
3151 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3152 double remaining_time = 0.0;
3154 while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3155 if (!cursor_hidden && av_gettime() - cursor_last_shown > CURSOR_HIDE_DELAY) {
3159 if (remaining_time > 0.0)
3160 av_usleep((int64_t)(remaining_time * 1000000.0));
3161 remaining_time = REFRESH_RATE;
3162 if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3163 video_refresh(is, &remaining_time);
3168 /* handle an event sent by the GUI */
3169 static void event_loop(VideoState *cur_stream)
3172 double incr, pos, frac;
3176 refresh_loop_wait_event(cur_stream, &event);
3177 switch (event.type) {
3179 if (exit_on_keydown) {
3180 do_exit(cur_stream);
3183 switch (event.key.keysym.sym) {
3186 do_exit(cur_stream);
3189 toggle_full_screen(cur_stream);
3190 cur_stream->force_refresh = 1;
3194 toggle_pause(cur_stream);
3196 case SDLK_s: // S: Step to next frame
3197 step_to_next_frame(cur_stream);
3200 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3203 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3206 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3207 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3208 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3211 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3214 toggle_audio_display(cur_stream);
3234 if (seek_by_bytes) {
3235 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3236 pos = cur_stream->video_current_pos;
3237 } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3238 pos = cur_stream->audio_pkt.pos;
3240 pos = avio_tell(cur_stream->ic->pb);
3241 if (cur_stream->ic->bit_rate)
3242 incr *= cur_stream->ic->bit_rate / 8.0;
3246 stream_seek(cur_stream, pos, incr, 1);
3248 pos = get_master_clock(cur_stream);
3250 pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3252 if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3253 pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3254 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3261 case SDL_VIDEOEXPOSE:
3262 cur_stream->force_refresh = 1;
3264 case SDL_MOUSEBUTTONDOWN:
3265 if (exit_on_mousedown) {
3266 do_exit(cur_stream);
3269 case SDL_MOUSEMOTION:
3270 if (cursor_hidden) {
3274 cursor_last_shown = av_gettime();
3275 if (event.type == SDL_MOUSEBUTTONDOWN) {
3278 if (event.motion.state != SDL_PRESSED)
3282 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3283 uint64_t size = avio_size(cur_stream->ic->pb);
3284 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3288 int tns, thh, tmm, tss;
3289 tns = cur_stream->ic->duration / 1000000LL;
3291 tmm = (tns % 3600) / 60;
3293 frac = x / cur_stream->width;
3296 mm = (ns % 3600) / 60;
3298 av_log(NULL, AV_LOG_INFO,
3299 "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3300 hh, mm, ss, thh, tmm, tss);
3301 ts = frac * cur_stream->ic->duration;
3302 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3303 ts += cur_stream->ic->start_time;
3304 stream_seek(cur_stream, ts, 0, 0);
3307 case SDL_VIDEORESIZE:
3308 screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3309 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3311 av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3312 do_exit(cur_stream);
3314 screen_width = cur_stream->width = screen->w;
3315 screen_height = cur_stream->height = screen->h;
3316 cur_stream->force_refresh = 1;
3320 do_exit(cur_stream);
3322 case FF_ALLOC_EVENT:
3323 alloc_picture(event.user.data1);
3331 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3333 av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3334 return opt_default(NULL, "video_size", arg);
3337 static int opt_width(void *optctx, const char *opt, const char *arg)
3339 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3343 static int opt_height(void *optctx, const char *opt, const char *arg)
3345 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3349 static int opt_format(void *optctx, const char *opt, const char *arg)
3351 file_iformat = av_find_input_format(arg);
3352 if (!file_iformat) {
3353 av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3354 return AVERROR(EINVAL);
3359 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3361 av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3362 return opt_default(NULL, "pixel_format", arg);
3365 static int opt_sync(void *optctx, const char *opt, const char *arg)
3367 if (!strcmp(arg, "audio"))
3368 av_sync_type = AV_SYNC_AUDIO_MASTER;
3369 else if (!strcmp(arg, "video"))
3370 av_sync_type = AV_SYNC_VIDEO_MASTER;
3371 else if (!strcmp(arg, "ext"))
3372 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3374 av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3380 static int opt_seek(void *optctx, const char *opt, const char *arg)
3382 start_time = parse_time_or_die(opt, arg, 1);
3386 static int opt_duration(void *optctx, const char *opt, const char *arg)
3388 duration = parse_time_or_die(opt, arg, 1);
3392 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3394 show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3395 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3396 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3397 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3401 static void opt_input_file(void *optctx, const char *filename)
3403 if (input_filename) {
3404 av_log(NULL, AV_LOG_FATAL,
3405 "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3406 filename, input_filename);
3409 if (!strcmp(filename, "-"))
3411 input_filename = filename;
3414 static int opt_codec(void *optctx, const char *opt, const char *arg)
3416 const char *spec = strchr(opt, ':');
3418 av_log(NULL, AV_LOG_ERROR,
3419 "No media specifier was specified in '%s' in option '%s'\n",
3421 return AVERROR(EINVAL);
3425 case 'a' : audio_codec_name = arg; break;
3426 case 's' : subtitle_codec_name = arg; break;
3427 case 'v' : video_codec_name = arg; break;
3429 av_log(NULL, AV_LOG_ERROR,
3430 "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3431 return AVERROR(EINVAL);
3438 static const OptionDef options[] = {
3439 #include "cmdutils_common_opts.h"
3440 { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3441 { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3442 { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3443 { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3444 { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3445 { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3446 { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3447 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3448 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3449 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3450 { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3451 { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3452 { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3453 { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3454 { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3455 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3456 { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3457 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3458 { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3459 { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3460 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3461 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3462 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
3463 { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3464 { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3465 { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3466 { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3467 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3468 { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3469 { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3470 { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3472 { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
3473 { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3475 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3476 { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3477 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3478 { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3479 { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3480 { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3481 { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3482 { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3486 static void show_usage(void)
3488 av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3489 av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3490 av_log(NULL, AV_LOG_INFO, "\n");
3493 void show_help_default(const char *opt, const char *arg)
3495 av_log_set_callback(log_callback_help);
3497 show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3498 show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3500 show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3501 show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3502 #if !CONFIG_AVFILTER
3503 show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3505 show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3507 printf("\nWhile playing:\n"
3509 "f toggle full screen\n"
3511 "a cycle audio channel in the current program\n"
3512 "v cycle video channel\n"
3513 "t cycle subtitle channel in the current program\n"
3515 "w show audio waves\n"
3516 "s activate frame-step mode\n"
3517 "left/right seek backward/forward 10 seconds\n"
3518 "down/up seek backward/forward 1 minute\n"
3519 "page down/page up seek backward/forward 10 minutes\n"
3520 "mouse click seek to percentage in file corresponding to fraction of width\n"
3524 static int lockmgr(void **mtx, enum AVLockOp op)
3527 case AV_LOCK_CREATE:
3528 *mtx = SDL_CreateMutex();
3532 case AV_LOCK_OBTAIN:
3533 return !!SDL_LockMutex(*mtx);
3534 case AV_LOCK_RELEASE:
3535 return !!SDL_UnlockMutex(*mtx);
3536 case AV_LOCK_DESTROY:
3537 SDL_DestroyMutex(*mtx);
3543 /* Called from the main */
3544 int main(int argc, char **argv)
3548 char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3550 av_log_set_flags(AV_LOG_SKIP_REPEATED);
3551 parse_loglevel(argc, argv, options);
3553 /* register all codecs, demux and protocols */
3555 avdevice_register_all();
3558 avfilter_register_all();
3561 avformat_network_init();
3565 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3566 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3568 show_banner(argc, argv, options);
3570 parse_options(NULL, argc, argv, options, opt_input_file);
3572 if (!input_filename) {
3574 av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3575 av_log(NULL, AV_LOG_FATAL,
3576 "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3580 if (display_disable) {
3583 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3585 flags &= ~SDL_INIT_AUDIO;
3586 if (display_disable)
3587 SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3588 #if !defined(__MINGW32__) && !defined(__APPLE__)
3589 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3591 if (SDL_Init (flags)) {
3592 av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3593 av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3597 if (!display_disable) {
3598 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3599 fs_screen_width = vi->current_w;
3600 fs_screen_height = vi->current_h;
3603 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3604 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3605 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3607 if (av_lockmgr_register(lockmgr)) {
3608 av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3612 av_init_packet(&flush_pkt);
3613 flush_pkt.data = (uint8_t *)&flush_pkt;
3615 is = stream_open(input_filename, file_iformat);
3617 av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");