2 * Copyright (c) 2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * simple media player based on the FFmpeg libraries
33 #include "libavutil/avstring.h"
34 #include "libavutil/colorspace.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
48 #include "libswresample/swresample.h"
51 # include "libavfilter/avcodec.h"
52 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/buffersrc.h"
58 #include <SDL_thread.h>
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
70 /* SDL audio buffer size, in samples. Should be small to have precise
71 A/V sync as SDL does not have hardware buffer fullness info. */
72 #define SDL_AUDIO_BUFFER_SIZE 1024
74 /* no AV sync correction is done if below the minimum AV sync threshold */
75 #define AV_SYNC_THRESHOLD_MIN 0.04
76 /* AV sync correction is done if above the maximum AV sync threshold */
77 #define AV_SYNC_THRESHOLD_MAX 0.1
78 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
79 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
80 /* no AV correction is done if too big error */
81 #define AV_NOSYNC_THRESHOLD 10.0
83 /* maximum audio speed change to get correct sync */
84 #define SAMPLE_CORRECTION_PERCENT_MAX 10
86 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
87 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
88 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
89 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
91 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
92 #define AUDIO_DIFF_AVG_NB 20
94 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
95 #define REFRESH_RATE 0.01
97 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
98 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
99 #define SAMPLE_ARRAY_SIZE (8 * 65536)
101 #define CURSOR_HIDE_DELAY 1000000
103 static int64_t sws_flags = SWS_BICUBIC;
105 typedef struct MyAVPacketList {
107 struct MyAVPacketList *next;
111 typedef struct PacketQueue {
112 MyAVPacketList *first_pkt, *last_pkt;
121 #define VIDEO_PICTURE_QUEUE_SIZE 3
122 #define SUBPICTURE_QUEUE_SIZE 4
124 typedef struct VideoPicture {
125 double pts; // presentation timestamp for this picture
126 double duration; // estimated duration based on frame rate
127 int64_t pos; // byte position in file
129 int width, height; /* source height & width */
137 typedef struct SubPicture {
138 double pts; /* presentation time stamp for this picture */
143 typedef struct AudioParams {
146 int64_t channel_layout;
147 enum AVSampleFormat fmt;
152 typedef struct Clock {
153 double pts; /* clock base */
154 double pts_drift; /* clock base minus time at which we updated the clock */
157 int serial; /* clock is based on a packet with this serial */
159 int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
163 AV_SYNC_AUDIO_MASTER, /* default choice */
164 AV_SYNC_VIDEO_MASTER,
165 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
168 typedef struct VideoState {
169 SDL_Thread *read_tid;
170 SDL_Thread *video_tid;
171 AVInputFormat *iformat;
177 int queue_attachments_req;
182 int read_pause_return;
197 int audio_clock_serial;
198 double audio_diff_cum; /* used for AV difference average computation */
199 double audio_diff_avg_coef;
200 double audio_diff_threshold;
201 int audio_diff_avg_count;
204 int audio_hw_buf_size;
205 uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
208 unsigned int audio_buf_size; /* in bytes */
209 unsigned int audio_buf1_size;
210 int audio_buf_index; /* in bytes */
211 int audio_write_buf_size;
212 int audio_buf_frames_pending;
213 AVPacket audio_pkt_temp;
215 int audio_pkt_temp_serial;
216 int audio_last_serial;
217 struct AudioParams audio_src;
219 struct AudioParams audio_filter_src;
221 struct AudioParams audio_tgt;
222 struct SwrContext *swr_ctx;
223 int frame_drops_early;
224 int frame_drops_late;
226 int64_t audio_frame_next_pts;
229 SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
231 int16_t sample_array[SAMPLE_ARRAY_SIZE];
232 int sample_array_index;
236 FFTSample *rdft_data;
238 double last_vis_time;
240 SDL_Thread *subtitle_tid;
242 AVStream *subtitle_st;
243 PacketQueue subtitleq;
244 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
245 int subpq_size, subpq_rindex, subpq_windex;
246 SDL_mutex *subpq_mutex;
247 SDL_cond *subpq_cond;
250 double frame_last_returned_time;
251 double frame_last_filter_delay;
255 int64_t video_current_pos; // current displayed file pos
256 double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
257 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
258 int pictq_size, pictq_rindex, pictq_windex;
259 SDL_mutex *pictq_mutex;
260 SDL_cond *pictq_cond;
262 struct SwsContext *img_convert_ctx;
264 SDL_Rect last_display_rect;
267 int width, height, xleft, ytop;
272 AVFilterContext *in_video_filter; // the first filter in the video chain
273 AVFilterContext *out_video_filter; // the last filter in the video chain
274 AVFilterContext *in_audio_filter; // the first filter in the audio chain
275 AVFilterContext *out_audio_filter; // the last filter in the audio chain
276 AVFilterGraph *agraph; // audio filter graph
279 int last_video_stream, last_audio_stream, last_subtitle_stream;
281 SDL_cond *continue_read_thread;
284 /* options specified by the user */
285 static AVInputFormat *file_iformat;
286 static const char *input_filename;
287 static const char *window_title;
288 static int fs_screen_width;
289 static int fs_screen_height;
290 static int default_width = 640;
291 static int default_height = 480;
292 static int screen_width = 0;
293 static int screen_height = 0;
294 static int audio_disable;
295 static int video_disable;
296 static int subtitle_disable;
297 static int wanted_stream[AVMEDIA_TYPE_NB] = {
298 [AVMEDIA_TYPE_AUDIO] = -1,
299 [AVMEDIA_TYPE_VIDEO] = -1,
300 [AVMEDIA_TYPE_SUBTITLE] = -1,
302 static int seek_by_bytes = -1;
303 static int display_disable;
304 static int show_status = 1;
305 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
306 static int64_t start_time = AV_NOPTS_VALUE;
307 static int64_t duration = AV_NOPTS_VALUE;
308 static int workaround_bugs = 1;
310 static int genpts = 0;
311 static int lowres = 0;
312 static int error_concealment = 3;
313 static int decoder_reorder_pts = -1;
315 static int exit_on_keydown;
316 static int exit_on_mousedown;
318 static int framedrop = -1;
319 static int infinite_buffer = -1;
320 static enum ShowMode show_mode = SHOW_MODE_NONE;
321 static const char *audio_codec_name;
322 static const char *subtitle_codec_name;
323 static const char *video_codec_name;
324 double rdftspeed = 0.02;
325 static int64_t cursor_last_shown;
326 static int cursor_hidden = 0;
328 static const char **vfilters_list = NULL;
329 static int nb_vfilters = 0;
330 static char *afilters = NULL;
332 static int autorotate = 1;
334 /* current context */
335 static int is_full_screen;
336 static int64_t audio_callback_time;
338 static AVPacket flush_pkt;
340 #define FF_ALLOC_EVENT (SDL_USEREVENT)
341 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
343 static SDL_Surface *screen;
346 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
348 GROW_ARRAY(vfilters_list, nb_vfilters);
349 vfilters_list[nb_vfilters - 1] = arg;
355 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
356 enum AVSampleFormat fmt2, int64_t channel_count2)
358 /* If channel count == 1, planar and non-planar formats are the same */
359 if (channel_count1 == 1 && channel_count2 == 1)
360 return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
362 return channel_count1 != channel_count2 || fmt1 != fmt2;
366 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
368 if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
369 return channel_layout;
374 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
376 MyAVPacketList *pkt1;
378 if (q->abort_request)
381 pkt1 = av_malloc(sizeof(MyAVPacketList));
386 if (pkt == &flush_pkt)
388 pkt1->serial = q->serial;
393 q->last_pkt->next = pkt1;
396 q->size += pkt1->pkt.size + sizeof(*pkt1);
397 /* XXX: should duplicate packet data in DV case */
398 SDL_CondSignal(q->cond);
402 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
406 /* duplicate the packet */
407 if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
410 SDL_LockMutex(q->mutex);
411 ret = packet_queue_put_private(q, pkt);
412 SDL_UnlockMutex(q->mutex);
414 if (pkt != &flush_pkt && ret < 0)
420 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
422 AVPacket pkt1, *pkt = &pkt1;
426 pkt->stream_index = stream_index;
427 return packet_queue_put(q, pkt);
430 /* packet queue handling */
431 static void packet_queue_init(PacketQueue *q)
433 memset(q, 0, sizeof(PacketQueue));
434 q->mutex = SDL_CreateMutex();
435 q->cond = SDL_CreateCond();
436 q->abort_request = 1;
439 static void packet_queue_flush(PacketQueue *q)
441 MyAVPacketList *pkt, *pkt1;
443 SDL_LockMutex(q->mutex);
444 for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
446 av_free_packet(&pkt->pkt);
453 SDL_UnlockMutex(q->mutex);
456 static void packet_queue_destroy(PacketQueue *q)
458 packet_queue_flush(q);
459 SDL_DestroyMutex(q->mutex);
460 SDL_DestroyCond(q->cond);
463 static void packet_queue_abort(PacketQueue *q)
465 SDL_LockMutex(q->mutex);
467 q->abort_request = 1;
469 SDL_CondSignal(q->cond);
471 SDL_UnlockMutex(q->mutex);
474 static void packet_queue_start(PacketQueue *q)
476 SDL_LockMutex(q->mutex);
477 q->abort_request = 0;
478 packet_queue_put_private(q, &flush_pkt);
479 SDL_UnlockMutex(q->mutex);
482 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
483 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
485 MyAVPacketList *pkt1;
488 SDL_LockMutex(q->mutex);
491 if (q->abort_request) {
498 q->first_pkt = pkt1->next;
502 q->size -= pkt1->pkt.size + sizeof(*pkt1);
505 *serial = pkt1->serial;
513 SDL_CondWait(q->cond, q->mutex);
516 SDL_UnlockMutex(q->mutex);
520 static inline void fill_rectangle(SDL_Surface *screen,
521 int x, int y, int w, int h, int color, int update)
528 SDL_FillRect(screen, &rect, color);
529 if (update && w > 0 && h > 0)
530 SDL_UpdateRect(screen, x, y, w, h);
533 /* draw only the border of a rectangle */
534 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
538 /* fill the background */
542 w2 = width - (x + w);
548 h2 = height - (y + h);
551 fill_rectangle(screen,
555 fill_rectangle(screen,
556 xleft + width - w2, ytop,
559 fill_rectangle(screen,
563 fill_rectangle(screen,
564 xleft + w1, ytop + height - h2,
569 #define ALPHA_BLEND(a, oldp, newp, s)\
570 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
572 #define RGBA_IN(r, g, b, a, s)\
574 unsigned int v = ((const uint32_t *)(s))[0];\
575 a = (v >> 24) & 0xff;\
576 r = (v >> 16) & 0xff;\
577 g = (v >> 8) & 0xff;\
581 #define YUVA_IN(y, u, v, a, s, pal)\
583 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
584 a = (val >> 24) & 0xff;\
585 y = (val >> 16) & 0xff;\
586 u = (val >> 8) & 0xff;\
590 #define YUVA_OUT(d, y, u, v, a)\
592 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
598 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
600 int wrap, wrap3, width2, skip2;
601 int y, u, v, a, u1, v1, a1, w, h;
602 uint8_t *lum, *cb, *cr;
605 int dstx, dsty, dstw, dsth;
607 dstw = av_clip(rect->w, 0, imgw);
608 dsth = av_clip(rect->h, 0, imgh);
609 dstx = av_clip(rect->x, 0, imgw - dstw);
610 dsty = av_clip(rect->y, 0, imgh - dsth);
611 lum = dst->data[0] + dsty * dst->linesize[0];
612 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
613 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
615 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
617 wrap = dst->linesize[0];
618 wrap3 = rect->pict.linesize[0];
619 p = rect->pict.data[0];
620 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
628 YUVA_IN(y, u, v, a, p, pal);
629 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
637 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
638 YUVA_IN(y, u, v, a, p, pal);
642 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644 YUVA_IN(y, u, v, a, p + BPP, pal);
648 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
649 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
650 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
657 YUVA_IN(y, u, v, a, p, pal);
658 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
659 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
660 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
664 p += wrap3 - dstw * BPP;
665 lum += wrap - dstw - dstx;
666 cb += dst->linesize[1] - width2 - skip2;
667 cr += dst->linesize[2] - width2 - skip2;
669 for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
675 YUVA_IN(y, u, v, a, p, pal);
679 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
682 YUVA_IN(y, u, v, a, p, pal);
686 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
687 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
688 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
694 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
695 YUVA_IN(y, u, v, a, p, pal);
699 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
701 YUVA_IN(y, u, v, a, p + BPP, pal);
705 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
709 YUVA_IN(y, u, v, a, p, pal);
713 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
715 YUVA_IN(y, u, v, a, p + BPP, pal);
719 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
721 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
722 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
726 p += -wrap3 + 2 * BPP;
730 YUVA_IN(y, u, v, a, p, pal);
734 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
737 YUVA_IN(y, u, v, a, p, pal);
741 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
742 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
743 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
749 p += wrap3 + (wrap3 - dstw * BPP);
750 lum += wrap + (wrap - dstw - dstx);
751 cb += dst->linesize[1] - width2 - skip2;
752 cr += dst->linesize[2] - width2 - skip2;
754 /* handle odd height */
761 YUVA_IN(y, u, v, a, p, pal);
762 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
763 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
764 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
770 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
771 YUVA_IN(y, u, v, a, p, pal);
775 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
777 YUVA_IN(y, u, v, a, p + BPP, pal);
781 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
782 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
783 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
790 YUVA_IN(y, u, v, a, p, pal);
791 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
792 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
793 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
798 static void free_picture(VideoPicture *vp)
801 SDL_FreeYUVOverlay(vp->bmp);
806 static void free_subpicture(SubPicture *sp)
808 avsubtitle_free(&sp->sub);
811 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
814 int width, height, x, y;
816 if (vp->sar.num == 0)
819 aspect_ratio = av_q2d(vp->sar);
821 if (aspect_ratio <= 0.0)
823 aspect_ratio *= (float)vp->width / (float)vp->height;
825 /* XXX: we suppose the screen has a 1.0 pixel ratio */
827 width = ((int)rint(height * aspect_ratio)) & ~1;
828 if (width > scr_width) {
830 height = ((int)rint(width / aspect_ratio)) & ~1;
832 x = (scr_width - width) / 2;
833 y = (scr_height - height) / 2;
834 rect->x = scr_xleft + x;
835 rect->y = scr_ytop + y;
836 rect->w = FFMAX(width, 1);
837 rect->h = FFMAX(height, 1);
840 static void video_image_display(VideoState *is)
848 vp = &is->pictq[is->pictq_rindex];
850 if (is->subtitle_st) {
851 if (is->subpq_size > 0) {
852 sp = &is->subpq[is->subpq_rindex];
854 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
855 SDL_LockYUVOverlay (vp->bmp);
857 pict.data[0] = vp->bmp->pixels[0];
858 pict.data[1] = vp->bmp->pixels[2];
859 pict.data[2] = vp->bmp->pixels[1];
861 pict.linesize[0] = vp->bmp->pitches[0];
862 pict.linesize[1] = vp->bmp->pitches[2];
863 pict.linesize[2] = vp->bmp->pitches[1];
865 for (i = 0; i < sp->sub.num_rects; i++)
866 blend_subrect(&pict, sp->sub.rects[i],
867 vp->bmp->w, vp->bmp->h);
869 SDL_UnlockYUVOverlay (vp->bmp);
874 calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
876 SDL_DisplayYUVOverlay(vp->bmp, &rect);
878 if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
879 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
880 fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
881 is->last_display_rect = rect;
886 static inline int compute_mod(int a, int b)
888 return a < 0 ? a%b + b : a%b;
891 static void video_audio_display(VideoState *s)
893 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
894 int ch, channels, h, h2, bgcolor, fgcolor;
896 int rdft_bits, nb_freq;
898 for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
900 nb_freq = 1 << (rdft_bits - 1);
902 /* compute display index : center on currently output samples */
903 channels = s->audio_tgt.channels;
904 nb_display_channels = channels;
906 int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
908 delay = s->audio_write_buf_size;
911 /* to be more precise, we take into account the time spent since
912 the last buffer computation */
913 if (audio_callback_time) {
914 time_diff = av_gettime_relative() - audio_callback_time;
915 delay -= (time_diff * s->audio_tgt.freq) / 1000000;
918 delay += 2 * data_used;
919 if (delay < data_used)
922 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
923 if (s->show_mode == SHOW_MODE_WAVES) {
925 for (i = 0; i < 1000; i += channels) {
926 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
927 int a = s->sample_array[idx];
928 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
929 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
930 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
932 if (h < score && (b ^ c) < 0) {
939 s->last_i_start = i_start;
941 i_start = s->last_i_start;
944 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
945 if (s->show_mode == SHOW_MODE_WAVES) {
946 fill_rectangle(screen,
947 s->xleft, s->ytop, s->width, s->height,
950 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
952 /* total height for one channel */
953 h = s->height / nb_display_channels;
954 /* graph height / 2 */
956 for (ch = 0; ch < nb_display_channels; ch++) {
958 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
959 for (x = 0; x < s->width; x++) {
960 y = (s->sample_array[i] * h2) >> 15;
967 fill_rectangle(screen,
968 s->xleft + x, ys, 1, y,
971 if (i >= SAMPLE_ARRAY_SIZE)
972 i -= SAMPLE_ARRAY_SIZE;
976 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
978 for (ch = 1; ch < nb_display_channels; ch++) {
979 y = s->ytop + ch * h;
980 fill_rectangle(screen,
981 s->xleft, y, s->width, 1,
984 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
986 nb_display_channels= FFMIN(nb_display_channels, 2);
987 if (rdft_bits != s->rdft_bits) {
988 av_rdft_end(s->rdft);
989 av_free(s->rdft_data);
990 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
991 s->rdft_bits = rdft_bits;
992 s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
996 for (ch = 0; ch < nb_display_channels; ch++) {
997 data[ch] = s->rdft_data + 2 * nb_freq * ch;
999 for (x = 0; x < 2 * nb_freq; x++) {
1000 double w = (x-nb_freq) * (1.0 / nb_freq);
1001 data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1003 if (i >= SAMPLE_ARRAY_SIZE)
1004 i -= SAMPLE_ARRAY_SIZE;
1006 av_rdft_calc(s->rdft, data[ch]);
1008 /* Least efficient way to do this, we should of course
1009 * directly access it but it is more than fast enough. */
1010 for (y = 0; y < s->height; y++) {
1011 double w = 1 / sqrt(nb_freq);
1012 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1013 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
1014 + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
1017 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1019 fill_rectangle(screen,
1020 s->xpos, s->height-y, 1, 1,
1024 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1027 if (s->xpos >= s->width)
1032 static void stream_close(VideoState *is)
1035 /* XXX: use a special url_shutdown call to abort parse cleanly */
1036 is->abort_request = 1;
1037 SDL_WaitThread(is->read_tid, NULL);
1038 packet_queue_destroy(&is->videoq);
1039 packet_queue_destroy(&is->audioq);
1040 packet_queue_destroy(&is->subtitleq);
1042 /* free all pictures */
1043 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
1044 free_picture(&is->pictq[i]);
1045 for (i = 0; i < SUBPICTURE_QUEUE_SIZE; i++)
1046 free_subpicture(&is->subpq[i]);
1047 SDL_DestroyMutex(is->pictq_mutex);
1048 SDL_DestroyCond(is->pictq_cond);
1049 SDL_DestroyMutex(is->subpq_mutex);
1050 SDL_DestroyCond(is->subpq_cond);
1051 SDL_DestroyCond(is->continue_read_thread);
1052 #if !CONFIG_AVFILTER
1053 sws_freeContext(is->img_convert_ctx);
1058 static void do_exit(VideoState *is)
1063 av_lockmgr_register(NULL);
1066 av_freep(&vfilters_list);
1068 avformat_network_deinit();
1072 av_log(NULL, AV_LOG_QUIET, "%s", "");
1076 static void sigterm_handler(int sig)
1081 static void set_default_window_size(VideoPicture *vp)
1084 calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1085 default_width = rect.w;
1086 default_height = rect.h;
1089 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1091 int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1094 if (is_full_screen) flags |= SDL_FULLSCREEN;
1095 else flags |= SDL_RESIZABLE;
1097 if (vp && vp->width)
1098 set_default_window_size(vp);
1100 if (is_full_screen && fs_screen_width) {
1101 w = fs_screen_width;
1102 h = fs_screen_height;
1103 } else if (!is_full_screen && screen_width) {
1110 w = FFMIN(16383, w);
1111 if (screen && is->width == screen->w && screen->w == w
1112 && is->height== screen->h && screen->h == h && !force_set_video_mode)
1114 screen = SDL_SetVideoMode(w, h, 0, flags);
1116 av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1120 window_title = input_filename;
1121 SDL_WM_SetCaption(window_title, window_title);
1123 is->width = screen->w;
1124 is->height = screen->h;
1129 /* display the current picture, if any */
1130 static void video_display(VideoState *is)
1133 video_open(is, 0, NULL);
1134 if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1135 video_audio_display(is);
1136 else if (is->video_st)
1137 video_image_display(is);
1140 static double get_clock(Clock *c)
1142 if (*c->queue_serial != c->serial)
1147 double time = av_gettime_relative() / 1000000.0;
1148 return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1152 static void set_clock_at(Clock *c, double pts, int serial, double time)
1155 c->last_updated = time;
1156 c->pts_drift = c->pts - time;
1160 static void set_clock(Clock *c, double pts, int serial)
1162 double time = av_gettime_relative() / 1000000.0;
1163 set_clock_at(c, pts, serial, time);
1166 static void set_clock_speed(Clock *c, double speed)
1168 set_clock(c, get_clock(c), c->serial);
1172 static void init_clock(Clock *c, int *queue_serial)
1176 c->queue_serial = queue_serial;
1177 set_clock(c, NAN, -1);
1180 static void sync_clock_to_slave(Clock *c, Clock *slave)
1182 double clock = get_clock(c);
1183 double slave_clock = get_clock(slave);
1184 if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1185 set_clock(c, slave_clock, slave->serial);
1188 static int get_master_sync_type(VideoState *is) {
1189 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1191 return AV_SYNC_VIDEO_MASTER;
1193 return AV_SYNC_AUDIO_MASTER;
1194 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1196 return AV_SYNC_AUDIO_MASTER;
1198 return AV_SYNC_EXTERNAL_CLOCK;
1200 return AV_SYNC_EXTERNAL_CLOCK;
1204 /* get the current master clock value */
1205 static double get_master_clock(VideoState *is)
1209 switch (get_master_sync_type(is)) {
1210 case AV_SYNC_VIDEO_MASTER:
1211 val = get_clock(&is->vidclk);
1213 case AV_SYNC_AUDIO_MASTER:
1214 val = get_clock(&is->audclk);
1217 val = get_clock(&is->extclk);
1223 static void check_external_clock_speed(VideoState *is) {
1224 if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1225 is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1226 set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP));
1227 } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1228 (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1229 set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP));
1231 double speed = is->extclk.speed;
1233 set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1237 /* seek in the stream */
1238 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1240 if (!is->seek_req) {
1243 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1245 is->seek_flags |= AVSEEK_FLAG_BYTE;
1247 SDL_CondSignal(is->continue_read_thread);
1251 /* pause or resume the video */
1252 static void stream_toggle_pause(VideoState *is)
1255 is->frame_timer += av_gettime_relative() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
1256 if (is->read_pause_return != AVERROR(ENOSYS)) {
1257 is->vidclk.paused = 0;
1259 set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1261 set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1262 is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1265 static void toggle_pause(VideoState *is)
1267 stream_toggle_pause(is);
1271 static void step_to_next_frame(VideoState *is)
1273 /* if the stream is paused unpause it, then step */
1275 stream_toggle_pause(is);
1279 static double compute_target_delay(double delay, VideoState *is)
1281 double sync_threshold, diff;
1283 /* update delay to follow master synchronisation source */
1284 if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1285 /* if video is slave, we try to correct big delays by
1286 duplicating or deleting a frame */
1287 diff = get_clock(&is->vidclk) - get_master_clock(is);
1289 /* skip or repeat frame. We take into account the
1290 delay to compute the threshold. I still don't know
1291 if it is the best guess */
1292 sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1293 if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1294 if (diff <= -sync_threshold)
1295 delay = FFMAX(0, delay + diff);
1296 else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1297 delay = delay + diff;
1298 else if (diff >= sync_threshold)
1303 av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1309 static double vp_duration(VideoState *is, VideoPicture *vp, VideoPicture *nextvp) {
1310 if (vp->serial == nextvp->serial) {
1311 double duration = nextvp->pts - vp->pts;
1312 if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1313 return vp->duration;
1321 static void pictq_next_picture(VideoState *is) {
1322 /* update queue size and signal for next picture */
1323 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1324 is->pictq_rindex = 0;
1326 SDL_LockMutex(is->pictq_mutex);
1328 SDL_CondSignal(is->pictq_cond);
1329 SDL_UnlockMutex(is->pictq_mutex);
1332 static int pictq_prev_picture(VideoState *is) {
1333 VideoPicture *prevvp;
1335 /* update queue size and signal for the previous picture */
1336 prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1337 if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
1338 SDL_LockMutex(is->pictq_mutex);
1339 if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE) {
1340 if (--is->pictq_rindex == -1)
1341 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1345 SDL_CondSignal(is->pictq_cond);
1346 SDL_UnlockMutex(is->pictq_mutex);
1351 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1352 /* update current video pts */
1353 set_clock(&is->vidclk, pts, serial);
1354 sync_clock_to_slave(&is->extclk, &is->vidclk);
1355 is->video_current_pos = pos;
1358 /* called to display each frame */
1359 static void video_refresh(void *opaque, double *remaining_time)
1361 VideoState *is = opaque;
1364 SubPicture *sp, *sp2;
1366 if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1367 check_external_clock_speed(is);
1369 if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1370 time = av_gettime_relative() / 1000000.0;
1371 if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1373 is->last_vis_time = time;
1375 *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1380 if (is->force_refresh)
1381 redisplay = pictq_prev_picture(is);
1383 if (is->pictq_size == 0) {
1384 // nothing to do, no picture to display in the queue
1386 double last_duration, duration, delay;
1387 VideoPicture *vp, *lastvp;
1389 /* dequeue the picture */
1390 vp = &is->pictq[is->pictq_rindex];
1391 lastvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1393 if (vp->serial != is->videoq.serial) {
1394 pictq_next_picture(is);
1395 is->video_current_pos = -1;
1400 if (lastvp->serial != vp->serial && !redisplay)
1401 is->frame_timer = av_gettime_relative() / 1000000.0;
1406 /* compute nominal last_duration */
1407 last_duration = vp_duration(is, lastvp, vp);
1411 delay = compute_target_delay(last_duration, is);
1413 time= av_gettime_relative()/1000000.0;
1414 if (time < is->frame_timer + delay && !redisplay) {
1415 *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1419 is->frame_timer += delay;
1420 if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1421 is->frame_timer = time;
1423 SDL_LockMutex(is->pictq_mutex);
1424 if (!redisplay && !isnan(vp->pts))
1425 update_video_pts(is, vp->pts, vp->pos, vp->serial);
1426 SDL_UnlockMutex(is->pictq_mutex);
1428 if (is->pictq_size > 1) {
1429 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1430 duration = vp_duration(is, vp, nextvp);
1431 if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1433 is->frame_drops_late++;
1434 pictq_next_picture(is);
1440 if (is->subtitle_st) {
1441 while (is->subpq_size > 0) {
1442 sp = &is->subpq[is->subpq_rindex];
1444 if (is->subpq_size > 1)
1445 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1449 if (sp->serial != is->subtitleq.serial
1450 || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1451 || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1453 free_subpicture(sp);
1455 /* update queue size and signal for next picture */
1456 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1457 is->subpq_rindex = 0;
1459 SDL_LockMutex(is->subpq_mutex);
1461 SDL_CondSignal(is->subpq_cond);
1462 SDL_UnlockMutex(is->subpq_mutex);
1470 /* display picture */
1471 if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1474 pictq_next_picture(is);
1476 if (is->step && !is->paused)
1477 stream_toggle_pause(is);
1480 is->force_refresh = 0;
1482 static int64_t last_time;
1484 int aqsize, vqsize, sqsize;
1487 cur_time = av_gettime_relative();
1488 if (!last_time || (cur_time - last_time) >= 30000) {
1493 aqsize = is->audioq.size;
1495 vqsize = is->videoq.size;
1496 if (is->subtitle_st)
1497 sqsize = is->subtitleq.size;
1499 if (is->audio_st && is->video_st)
1500 av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1501 else if (is->video_st)
1502 av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1503 else if (is->audio_st)
1504 av_diff = get_master_clock(is) - get_clock(&is->audclk);
1505 av_log(NULL, AV_LOG_INFO,
1506 "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1507 get_master_clock(is),
1508 (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1510 is->frame_drops_early + is->frame_drops_late,
1514 is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1515 is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1517 last_time = cur_time;
1522 /* allocate a picture (needs to do that in main thread to avoid
1523 potential locking problems */
1524 static void alloc_picture(VideoState *is)
1529 vp = &is->pictq[is->pictq_windex];
1533 video_open(is, 0, vp);
1535 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1538 bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1539 if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1540 /* SDL allocates a buffer smaller than requested if the video
1541 * overlay hardware is unable to support the requested size. */
1542 av_log(NULL, AV_LOG_FATAL,
1543 "Error: the video system does not support an image\n"
1544 "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1545 "to reduce the image size.\n", vp->width, vp->height );
1549 SDL_LockMutex(is->pictq_mutex);
1551 SDL_CondSignal(is->pictq_cond);
1552 SDL_UnlockMutex(is->pictq_mutex);
1555 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1556 int i, width, height;
1558 for (i = 0; i < 3; i++) {
1565 if (bmp->pitches[i] > width) {
1566 maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1567 for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1573 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1577 #if defined(DEBUG_SYNC) && 0
1578 printf("frame_type=%c pts=%0.3f\n",
1579 av_get_picture_type_char(src_frame->pict_type), pts);
1582 /* wait until we have space to put a new picture */
1583 SDL_LockMutex(is->pictq_mutex);
1585 /* keep the last already displayed picture in the queue */
1586 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 1 &&
1587 !is->videoq.abort_request) {
1588 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1590 SDL_UnlockMutex(is->pictq_mutex);
1592 if (is->videoq.abort_request)
1595 vp = &is->pictq[is->pictq_windex];
1597 vp->sar = src_frame->sample_aspect_ratio;
1599 /* alloc or resize hardware picture buffer */
1600 if (!vp->bmp || vp->reallocate || !vp->allocated ||
1601 vp->width != src_frame->width ||
1602 vp->height != src_frame->height) {
1607 vp->width = src_frame->width;
1608 vp->height = src_frame->height;
1610 /* the allocation must be done in the main thread to avoid
1611 locking problems. */
1612 event.type = FF_ALLOC_EVENT;
1613 event.user.data1 = is;
1614 SDL_PushEvent(&event);
1616 /* wait until the picture is allocated */
1617 SDL_LockMutex(is->pictq_mutex);
1618 while (!vp->allocated && !is->videoq.abort_request) {
1619 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1621 /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1622 if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1623 while (!vp->allocated && !is->abort_request) {
1624 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1627 SDL_UnlockMutex(is->pictq_mutex);
1629 if (is->videoq.abort_request)
1633 /* if the frame is not skipped, then display it */
1635 AVPicture pict = { { 0 } };
1637 /* get a pointer on the bitmap */
1638 SDL_LockYUVOverlay (vp->bmp);
1640 pict.data[0] = vp->bmp->pixels[0];
1641 pict.data[1] = vp->bmp->pixels[2];
1642 pict.data[2] = vp->bmp->pixels[1];
1644 pict.linesize[0] = vp->bmp->pitches[0];
1645 pict.linesize[1] = vp->bmp->pitches[2];
1646 pict.linesize[2] = vp->bmp->pitches[1];
1649 // FIXME use direct rendering
1650 av_picture_copy(&pict, (AVPicture *)src_frame,
1651 src_frame->format, vp->width, vp->height);
1653 av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1654 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1655 vp->width, vp->height, src_frame->format, vp->width, vp->height,
1656 AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1657 if (is->img_convert_ctx == NULL) {
1658 av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1661 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1662 0, vp->height, pict.data, pict.linesize);
1664 /* workaround SDL PITCH_WORKAROUND */
1665 duplicate_right_border_pixels(vp->bmp);
1666 /* update the bitmap content */
1667 SDL_UnlockYUVOverlay(vp->bmp);
1670 vp->duration = duration;
1672 vp->serial = serial;
1674 /* now we can update the picture count */
1675 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1676 is->pictq_windex = 0;
1677 SDL_LockMutex(is->pictq_mutex);
1679 SDL_UnlockMutex(is->pictq_mutex);
1684 static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
1688 if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1691 if (pkt->data == flush_pkt.data) {
1692 avcodec_flush_buffers(is->video_st->codec);
1696 if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1699 if (!got_picture && !pkt->data)
1700 is->video_finished = *serial;
1706 if (decoder_reorder_pts == -1) {
1707 frame->pts = av_frame_get_best_effort_timestamp(frame);
1708 } else if (decoder_reorder_pts) {
1709 frame->pts = frame->pkt_pts;
1711 frame->pts = frame->pkt_dts;
1714 if (frame->pts != AV_NOPTS_VALUE)
1715 dpts = av_q2d(is->video_st->time_base) * frame->pts;
1717 frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1719 if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1720 if (frame->pts != AV_NOPTS_VALUE) {
1721 double diff = dpts - get_master_clock(is);
1722 if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1723 diff - is->frame_last_filter_delay < 0 &&
1724 *serial == is->vidclk.serial &&
1725 is->videoq.nb_packets) {
1726 is->frame_drops_early++;
1727 av_frame_unref(frame);
1739 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1740 AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1743 int nb_filters = graph->nb_filters;
1744 AVFilterInOut *outputs = NULL, *inputs = NULL;
1747 outputs = avfilter_inout_alloc();
1748 inputs = avfilter_inout_alloc();
1749 if (!outputs || !inputs) {
1750 ret = AVERROR(ENOMEM);
1754 outputs->name = av_strdup("in");
1755 outputs->filter_ctx = source_ctx;
1756 outputs->pad_idx = 0;
1757 outputs->next = NULL;
1759 inputs->name = av_strdup("out");
1760 inputs->filter_ctx = sink_ctx;
1761 inputs->pad_idx = 0;
1762 inputs->next = NULL;
1764 if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1767 if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1771 /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1772 for (i = 0; i < graph->nb_filters - nb_filters; i++)
1773 FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1775 ret = avfilter_graph_config(graph, NULL);
1777 avfilter_inout_free(&outputs);
1778 avfilter_inout_free(&inputs);
1782 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1784 static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1785 char sws_flags_str[128];
1786 char buffersrc_args[256];
1788 AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1789 AVCodecContext *codec = is->video_st->codec;
1790 AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1792 av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1793 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1794 graph->scale_sws_opts = av_strdup(sws_flags_str);
1796 snprintf(buffersrc_args, sizeof(buffersrc_args),
1797 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1798 frame->width, frame->height, frame->format,
1799 is->video_st->time_base.num, is->video_st->time_base.den,
1800 codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1801 if (fr.num && fr.den)
1802 av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1804 if ((ret = avfilter_graph_create_filter(&filt_src,
1805 avfilter_get_by_name("buffer"),
1806 "ffplay_buffer", buffersrc_args, NULL,
1810 ret = avfilter_graph_create_filter(&filt_out,
1811 avfilter_get_by_name("buffersink"),
1812 "ffplay_buffersink", NULL, NULL, graph);
1816 if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1819 last_filter = filt_out;
1821 /* Note: this macro adds a filter before the lastly added filter, so the
1822 * processing order of the filters is in reverse */
1823 #define INSERT_FILT(name, arg) do { \
1824 AVFilterContext *filt_ctx; \
1826 ret = avfilter_graph_create_filter(&filt_ctx, \
1827 avfilter_get_by_name(name), \
1828 "ffplay_" name, arg, NULL, graph); \
1832 ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1836 last_filter = filt_ctx; \
1839 /* SDL YUV code is not handling odd width/height for some driver
1840 * combinations, therefore we crop the picture to an even width/height. */
1841 INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
1844 AVDictionaryEntry *rotate_tag = av_dict_get(is->video_st->metadata, "rotate", NULL, 0);
1845 if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
1846 if (!strcmp(rotate_tag->value, "90")) {
1847 INSERT_FILT("transpose", "clock");
1848 } else if (!strcmp(rotate_tag->value, "180")) {
1849 INSERT_FILT("hflip", NULL);
1850 INSERT_FILT("vflip", NULL);
1851 } else if (!strcmp(rotate_tag->value, "270")) {
1852 INSERT_FILT("transpose", "cclock");
1854 char rotate_buf[64];
1855 snprintf(rotate_buf, sizeof(rotate_buf), "%s*PI/180", rotate_tag->value);
1856 INSERT_FILT("rotate", rotate_buf);
1861 if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1864 is->in_video_filter = filt_src;
1865 is->out_video_filter = filt_out;
1871 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1873 static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
1874 int sample_rates[2] = { 0, -1 };
1875 int64_t channel_layouts[2] = { 0, -1 };
1876 int channels[2] = { 0, -1 };
1877 AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1878 char aresample_swr_opts[512] = "";
1879 AVDictionaryEntry *e = NULL;
1880 char asrc_args[256];
1883 avfilter_graph_free(&is->agraph);
1884 if (!(is->agraph = avfilter_graph_alloc()))
1885 return AVERROR(ENOMEM);
1887 while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1888 av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1889 if (strlen(aresample_swr_opts))
1890 aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1891 av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1893 ret = snprintf(asrc_args, sizeof(asrc_args),
1894 "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1895 is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1896 is->audio_filter_src.channels,
1897 1, is->audio_filter_src.freq);
1898 if (is->audio_filter_src.channel_layout)
1899 snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1900 ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1902 ret = avfilter_graph_create_filter(&filt_asrc,
1903 avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1904 asrc_args, NULL, is->agraph);
1909 ret = avfilter_graph_create_filter(&filt_asink,
1910 avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1911 NULL, NULL, is->agraph);
1915 if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1917 if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1920 if (force_output_format) {
1921 channel_layouts[0] = is->audio_tgt.channel_layout;
1922 channels [0] = is->audio_tgt.channels;
1923 sample_rates [0] = is->audio_tgt.freq;
1924 if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1926 if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1928 if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1930 if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1935 if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1938 is->in_audio_filter = filt_asrc;
1939 is->out_audio_filter = filt_asink;
1943 avfilter_graph_free(&is->agraph);
1946 #endif /* CONFIG_AVFILTER */
1948 static int video_thread(void *arg)
1950 AVPacket pkt = { 0 };
1951 VideoState *is = arg;
1952 AVFrame *frame = av_frame_alloc();
1957 AVRational tb = is->video_st->time_base;
1958 AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
1961 AVFilterGraph *graph = avfilter_graph_alloc();
1962 AVFilterContext *filt_out = NULL, *filt_in = NULL;
1965 enum AVPixelFormat last_format = -2;
1966 int last_serial = -1;
1967 int last_vfilter_idx = 0;
1971 while (is->paused && !is->videoq.abort_request)
1974 av_free_packet(&pkt);
1976 ret = get_video_frame(is, frame, &pkt, &serial);
1983 if ( last_w != frame->width
1984 || last_h != frame->height
1985 || last_format != frame->format
1986 || last_serial != serial
1987 || last_vfilter_idx != is->vfilter_idx) {
1988 av_log(NULL, AV_LOG_DEBUG,
1989 "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1991 (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1992 frame->width, frame->height,
1993 (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1994 avfilter_graph_free(&graph);
1995 graph = avfilter_graph_alloc();
1996 if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
1998 event.type = FF_QUIT_EVENT;
1999 event.user.data1 = is;
2000 SDL_PushEvent(&event);
2003 filt_in = is->in_video_filter;
2004 filt_out = is->out_video_filter;
2005 last_w = frame->width;
2006 last_h = frame->height;
2007 last_format = frame->format;
2008 last_serial = serial;
2009 last_vfilter_idx = is->vfilter_idx;
2010 frame_rate = filt_out->inputs[0]->frame_rate;
2013 ret = av_buffersrc_add_frame(filt_in, frame);
2018 is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2020 ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2022 if (ret == AVERROR_EOF)
2023 is->video_finished = serial;
2028 is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2029 if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2030 is->frame_last_filter_delay = 0;
2031 tb = filt_out->inputs[0]->time_base;
2033 duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2034 pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2035 ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), serial);
2036 av_frame_unref(frame);
2046 avfilter_graph_free(&graph);
2048 av_free_packet(&pkt);
2049 av_frame_free(&frame);
2053 static int subtitle_thread(void *arg)
2055 VideoState *is = arg;
2057 AVPacket pkt1, *pkt = &pkt1;
2062 int r, g, b, y, u, v, a;
2065 while (is->paused && !is->subtitleq.abort_request) {
2068 if (packet_queue_get(&is->subtitleq, pkt, 1, &serial) < 0)
2071 if (pkt->data == flush_pkt.data) {
2072 avcodec_flush_buffers(is->subtitle_st->codec);
2075 SDL_LockMutex(is->subpq_mutex);
2076 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
2077 !is->subtitleq.abort_request) {
2078 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
2080 SDL_UnlockMutex(is->subpq_mutex);
2082 if (is->subtitleq.abort_request)
2085 sp = &is->subpq[is->subpq_windex];
2087 /* NOTE: ipts is the PTS of the _first_ picture beginning in
2088 this packet, if any */
2090 if (pkt->pts != AV_NOPTS_VALUE)
2091 pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2093 avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
2094 &got_subtitle, pkt);
2095 if (got_subtitle && sp->sub.format == 0) {
2096 if (sp->sub.pts != AV_NOPTS_VALUE)
2097 pts = sp->sub.pts / (double)AV_TIME_BASE;
2099 sp->serial = serial;
2101 for (i = 0; i < sp->sub.num_rects; i++)
2103 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2105 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2106 y = RGB_TO_Y_CCIR(r, g, b);
2107 u = RGB_TO_U_CCIR(r, g, b, 0);
2108 v = RGB_TO_V_CCIR(r, g, b, 0);
2109 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2113 /* now we can update the picture count */
2114 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2115 is->subpq_windex = 0;
2116 SDL_LockMutex(is->subpq_mutex);
2118 SDL_UnlockMutex(is->subpq_mutex);
2119 } else if (got_subtitle) {
2120 avsubtitle_free(&sp->sub);
2122 av_free_packet(pkt);
2127 /* copy samples for viewing in editor window */
2128 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2132 size = samples_size / sizeof(short);
2134 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2137 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2139 is->sample_array_index += len;
2140 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2141 is->sample_array_index = 0;
2146 /* return the wanted number of samples to get better sync if sync_type is video
2147 * or external master clock */
2148 static int synchronize_audio(VideoState *is, int nb_samples)
2150 int wanted_nb_samples = nb_samples;
2152 /* if not master, then we try to remove or add samples to correct the clock */
2153 if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
2154 double diff, avg_diff;
2155 int min_nb_samples, max_nb_samples;
2157 diff = get_clock(&is->audclk) - get_master_clock(is);
2159 if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2160 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2161 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2162 /* not enough measures to have a correct estimate */
2163 is->audio_diff_avg_count++;
2165 /* estimate the A-V difference */
2166 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2168 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2169 wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2170 min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2171 max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2172 wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2174 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2175 diff, avg_diff, wanted_nb_samples - nb_samples,
2176 is->audio_clock, is->audio_diff_threshold);
2179 /* too big difference : may be initial PTS errors, so
2181 is->audio_diff_avg_count = 0;
2182 is->audio_diff_cum = 0;
2186 return wanted_nb_samples;
2190 * Decode one audio frame and return its uncompressed size.
2192 * The processed audio frame is decoded, converted if required, and
2193 * stored in is->audio_buf, with size in bytes given by the return
2196 static int audio_decode_frame(VideoState *is)
2198 AVPacket *pkt_temp = &is->audio_pkt_temp;
2199 AVPacket *pkt = &is->audio_pkt;
2200 AVCodecContext *dec = is->audio_st->codec;
2201 int len1, data_size, resampled_data_size;
2202 int64_t dec_channel_layout;
2204 av_unused double audio_clock0;
2205 int wanted_nb_samples;
2211 /* NOTE: the audio packet can contain several frames */
2212 while (pkt_temp->stream_index != -1 || is->audio_buf_frames_pending) {
2214 if (!(is->frame = av_frame_alloc()))
2215 return AVERROR(ENOMEM);
2217 av_frame_unref(is->frame);
2220 if (is->audioq.serial != is->audio_pkt_temp_serial)
2226 if (!is->audio_buf_frames_pending) {
2227 len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2229 /* if error, we skip the frame */
2235 pkt_temp->pts = AV_NOPTS_VALUE;
2236 pkt_temp->data += len1;
2237 pkt_temp->size -= len1;
2238 if (pkt_temp->data && pkt_temp->size <= 0 || !pkt_temp->data && !got_frame)
2239 pkt_temp->stream_index = -1;
2240 if (!pkt_temp->data && !got_frame)
2241 is->audio_finished = is->audio_pkt_temp_serial;
2246 tb = (AVRational){1, is->frame->sample_rate};
2247 if (is->frame->pts != AV_NOPTS_VALUE)
2248 is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb);
2249 else if (is->frame->pkt_pts != AV_NOPTS_VALUE)
2250 is->frame->pts = av_rescale_q(is->frame->pkt_pts, is->audio_st->time_base, tb);
2251 else if (is->audio_frame_next_pts != AV_NOPTS_VALUE)
2253 is->frame->pts = av_rescale_q(is->audio_frame_next_pts, (AVRational){1, is->audio_filter_src.freq}, tb);
2255 is->frame->pts = av_rescale_q(is->audio_frame_next_pts, (AVRational){1, is->audio_src.freq}, tb);
2258 if (is->frame->pts != AV_NOPTS_VALUE)
2259 is->audio_frame_next_pts = is->frame->pts + is->frame->nb_samples;
2262 dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2265 cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2266 is->frame->format, av_frame_get_channels(is->frame)) ||
2267 is->audio_filter_src.channel_layout != dec_channel_layout ||
2268 is->audio_filter_src.freq != is->frame->sample_rate ||
2269 is->audio_pkt_temp_serial != is->audio_last_serial;
2272 char buf1[1024], buf2[1024];
2273 av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2274 av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2275 av_log(NULL, AV_LOG_DEBUG,
2276 "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2277 is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial,
2278 is->frame->sample_rate, av_frame_get_channels(is->frame), av_get_sample_fmt_name(is->frame->format), buf2, is->audio_pkt_temp_serial);
2280 is->audio_filter_src.fmt = is->frame->format;
2281 is->audio_filter_src.channels = av_frame_get_channels(is->frame);
2282 is->audio_filter_src.channel_layout = dec_channel_layout;
2283 is->audio_filter_src.freq = is->frame->sample_rate;
2284 is->audio_last_serial = is->audio_pkt_temp_serial;
2286 if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2290 if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2295 if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) {
2296 if (ret == AVERROR(EAGAIN)) {
2297 is->audio_buf_frames_pending = 0;
2300 if (ret == AVERROR_EOF)
2301 is->audio_finished = is->audio_pkt_temp_serial;
2304 is->audio_buf_frames_pending = 1;
2305 tb = is->out_audio_filter->inputs[0]->time_base;
2308 data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(is->frame),
2309 is->frame->nb_samples,
2310 is->frame->format, 1);
2312 dec_channel_layout =
2313 (is->frame->channel_layout && av_frame_get_channels(is->frame) == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2314 is->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(is->frame));
2315 wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2317 if (is->frame->format != is->audio_src.fmt ||
2318 dec_channel_layout != is->audio_src.channel_layout ||
2319 is->frame->sample_rate != is->audio_src.freq ||
2320 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2321 swr_free(&is->swr_ctx);
2322 is->swr_ctx = swr_alloc_set_opts(NULL,
2323 is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2324 dec_channel_layout, is->frame->format, is->frame->sample_rate,
2326 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2327 av_log(NULL, AV_LOG_ERROR,
2328 "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2329 is->frame->sample_rate, av_get_sample_fmt_name(is->frame->format), av_frame_get_channels(is->frame),
2330 is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2333 is->audio_src.channel_layout = dec_channel_layout;
2334 is->audio_src.channels = av_frame_get_channels(is->frame);
2335 is->audio_src.freq = is->frame->sample_rate;
2336 is->audio_src.fmt = is->frame->format;
2340 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2341 uint8_t **out = &is->audio_buf1;
2342 int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2343 int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2346 av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2349 if (wanted_nb_samples != is->frame->nb_samples) {
2350 if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2351 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2352 av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2356 av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2357 if (!is->audio_buf1)
2358 return AVERROR(ENOMEM);
2359 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2361 av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2364 if (len2 == out_count) {
2365 av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2366 swr_init(is->swr_ctx);
2368 is->audio_buf = is->audio_buf1;
2369 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2371 is->audio_buf = is->frame->data[0];
2372 resampled_data_size = data_size;
2375 audio_clock0 = is->audio_clock;
2376 /* update the audio clock with the pts */
2377 if (is->frame->pts != AV_NOPTS_VALUE)
2378 is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2380 is->audio_clock = NAN;
2381 is->audio_clock_serial = is->audio_pkt_temp_serial;
2384 static double last_clock;
2385 printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2386 is->audio_clock - last_clock,
2387 is->audio_clock, audio_clock0);
2388 last_clock = is->audio_clock;
2391 return resampled_data_size;
2394 /* free the current packet */
2396 av_free_packet(pkt);
2397 memset(pkt_temp, 0, sizeof(*pkt_temp));
2398 pkt_temp->stream_index = -1;
2400 if (is->audioq.abort_request) {
2404 if (is->audioq.nb_packets == 0)
2405 SDL_CondSignal(is->continue_read_thread);
2407 /* read next packet */
2408 if ((packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2411 if (pkt->data == flush_pkt.data) {
2412 avcodec_flush_buffers(dec);
2413 is->audio_buf_frames_pending = 0;
2414 is->audio_frame_next_pts = AV_NOPTS_VALUE;
2415 if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek)
2416 is->audio_frame_next_pts = is->audio_st->start_time;
2423 /* prepare a new audio buffer */
2424 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2426 VideoState *is = opaque;
2427 int audio_size, len1;
2429 audio_callback_time = av_gettime_relative();
2432 if (is->audio_buf_index >= is->audio_buf_size) {
2433 audio_size = audio_decode_frame(is);
2434 if (audio_size < 0) {
2435 /* if error, just output silence */
2436 is->audio_buf = is->silence_buf;
2437 is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2439 if (is->show_mode != SHOW_MODE_VIDEO)
2440 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2441 is->audio_buf_size = audio_size;
2443 is->audio_buf_index = 0;
2445 len1 = is->audio_buf_size - is->audio_buf_index;
2448 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2451 is->audio_buf_index += len1;
2453 is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2454 /* Let's assume the audio driver that is used by SDL has two periods. */
2455 if (!isnan(is->audio_clock)) {
2456 set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2457 sync_clock_to_slave(&is->extclk, &is->audclk);
2461 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2463 SDL_AudioSpec wanted_spec, spec;
2465 static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2466 static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2467 int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2469 env = SDL_getenv("SDL_AUDIO_CHANNELS");
2471 wanted_nb_channels = atoi(env);
2472 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2474 if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2475 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2476 wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2478 wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2479 wanted_spec.channels = wanted_nb_channels;
2480 wanted_spec.freq = wanted_sample_rate;
2481 if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2482 av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2485 while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2486 next_sample_rate_idx--;
2487 wanted_spec.format = AUDIO_S16SYS;
2488 wanted_spec.silence = 0;
2489 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2490 wanted_spec.callback = sdl_audio_callback;
2491 wanted_spec.userdata = opaque;
2492 while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2493 av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2494 wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2495 wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2496 if (!wanted_spec.channels) {
2497 wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2498 wanted_spec.channels = wanted_nb_channels;
2499 if (!wanted_spec.freq) {
2500 av_log(NULL, AV_LOG_ERROR,
2501 "No more combinations to try, audio open failed\n");
2505 wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2507 if (spec.format != AUDIO_S16SYS) {
2508 av_log(NULL, AV_LOG_ERROR,
2509 "SDL advised audio format %d is not supported!\n", spec.format);
2512 if (spec.channels != wanted_spec.channels) {
2513 wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2514 if (!wanted_channel_layout) {
2515 av_log(NULL, AV_LOG_ERROR,
2516 "SDL advised channel count %d is not supported!\n", spec.channels);
2521 audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2522 audio_hw_params->freq = spec.freq;
2523 audio_hw_params->channel_layout = wanted_channel_layout;
2524 audio_hw_params->channels = spec.channels;
2525 audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2526 audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2527 if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2528 av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2534 /* open a given stream. Return 0 if OK */
2535 static int stream_component_open(VideoState *is, int stream_index)
2537 AVFormatContext *ic = is->ic;
2538 AVCodecContext *avctx;
2540 const char *forced_codec_name = NULL;
2542 AVDictionaryEntry *t = NULL;
2543 int sample_rate, nb_channels;
2544 int64_t channel_layout;
2546 int stream_lowres = lowres;
2548 if (stream_index < 0 || stream_index >= ic->nb_streams)
2550 avctx = ic->streams[stream_index]->codec;
2552 codec = avcodec_find_decoder(avctx->codec_id);
2554 switch(avctx->codec_type){
2555 case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2556 case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2557 case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2559 if (forced_codec_name)
2560 codec = avcodec_find_decoder_by_name(forced_codec_name);
2562 if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2563 "No codec could be found with name '%s'\n", forced_codec_name);
2564 else av_log(NULL, AV_LOG_WARNING,
2565 "No codec could be found with id %d\n", avctx->codec_id);
2569 avctx->codec_id = codec->id;
2570 avctx->workaround_bugs = workaround_bugs;
2571 if(stream_lowres > av_codec_get_max_lowres(codec)){
2572 av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2573 av_codec_get_max_lowres(codec));
2574 stream_lowres = av_codec_get_max_lowres(codec);
2576 av_codec_set_lowres(avctx, stream_lowres);
2577 avctx->error_concealment = error_concealment;
2579 if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2580 if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2581 if(codec->capabilities & CODEC_CAP_DR1)
2582 avctx->flags |= CODEC_FLAG_EMU_EDGE;
2584 opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2585 if (!av_dict_get(opts, "threads", NULL, 0))
2586 av_dict_set(&opts, "threads", "auto", 0);
2588 av_dict_set(&opts, "lowres", av_asprintf("%d", stream_lowres), AV_DICT_DONT_STRDUP_VAL);
2589 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2590 av_dict_set(&opts, "refcounted_frames", "1", 0);
2591 if (avcodec_open2(avctx, codec, &opts) < 0)
2593 if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2594 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2595 return AVERROR_OPTION_NOT_FOUND;
2598 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2599 switch (avctx->codec_type) {
2600 case AVMEDIA_TYPE_AUDIO:
2605 is->audio_filter_src.freq = avctx->sample_rate;
2606 is->audio_filter_src.channels = avctx->channels;
2607 is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2608 is->audio_filter_src.fmt = avctx->sample_fmt;
2609 if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2611 link = is->out_audio_filter->inputs[0];
2612 sample_rate = link->sample_rate;
2613 nb_channels = link->channels;
2614 channel_layout = link->channel_layout;
2617 sample_rate = avctx->sample_rate;
2618 nb_channels = avctx->channels;
2619 channel_layout = avctx->channel_layout;
2622 /* prepare audio output */
2623 if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2625 is->audio_hw_buf_size = ret;
2626 is->audio_src = is->audio_tgt;
2627 is->audio_buf_size = 0;
2628 is->audio_buf_index = 0;
2630 /* init averaging filter */
2631 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2632 is->audio_diff_avg_count = 0;
2633 /* since we do not have a precise anough audio fifo fullness,
2634 we correct audio sync only if larger than this threshold */
2635 is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec;
2637 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2638 memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2639 is->audio_pkt_temp.stream_index = -1;
2641 is->audio_stream = stream_index;
2642 is->audio_st = ic->streams[stream_index];
2644 packet_queue_start(&is->audioq);
2647 case AVMEDIA_TYPE_VIDEO:
2648 is->video_stream = stream_index;
2649 is->video_st = ic->streams[stream_index];
2651 packet_queue_start(&is->videoq);
2652 is->video_tid = SDL_CreateThread(video_thread, is);
2653 is->queue_attachments_req = 1;
2655 case AVMEDIA_TYPE_SUBTITLE:
2656 is->subtitle_stream = stream_index;
2657 is->subtitle_st = ic->streams[stream_index];
2658 packet_queue_start(&is->subtitleq);
2660 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2668 static void stream_component_close(VideoState *is, int stream_index)
2670 AVFormatContext *ic = is->ic;
2671 AVCodecContext *avctx;
2673 if (stream_index < 0 || stream_index >= ic->nb_streams)
2675 avctx = ic->streams[stream_index]->codec;
2677 switch (avctx->codec_type) {
2678 case AVMEDIA_TYPE_AUDIO:
2679 packet_queue_abort(&is->audioq);
2683 packet_queue_flush(&is->audioq);
2684 av_free_packet(&is->audio_pkt);
2685 swr_free(&is->swr_ctx);
2686 av_freep(&is->audio_buf1);
2687 is->audio_buf1_size = 0;
2688 is->audio_buf = NULL;
2689 av_frame_free(&is->frame);
2692 av_rdft_end(is->rdft);
2693 av_freep(&is->rdft_data);
2698 avfilter_graph_free(&is->agraph);
2701 case AVMEDIA_TYPE_VIDEO:
2702 packet_queue_abort(&is->videoq);
2704 /* note: we also signal this mutex to make sure we deblock the
2705 video thread in all cases */
2706 SDL_LockMutex(is->pictq_mutex);
2707 SDL_CondSignal(is->pictq_cond);
2708 SDL_UnlockMutex(is->pictq_mutex);
2710 SDL_WaitThread(is->video_tid, NULL);
2712 packet_queue_flush(&is->videoq);
2714 case AVMEDIA_TYPE_SUBTITLE:
2715 packet_queue_abort(&is->subtitleq);
2717 /* note: we also signal this mutex to make sure we deblock the
2718 video thread in all cases */
2719 SDL_LockMutex(is->subpq_mutex);
2720 SDL_CondSignal(is->subpq_cond);
2721 SDL_UnlockMutex(is->subpq_mutex);
2723 SDL_WaitThread(is->subtitle_tid, NULL);
2725 packet_queue_flush(&is->subtitleq);
2731 ic->streams[stream_index]->discard = AVDISCARD_ALL;
2732 avcodec_close(avctx);
2733 switch (avctx->codec_type) {
2734 case AVMEDIA_TYPE_AUDIO:
2735 is->audio_st = NULL;
2736 is->audio_stream = -1;
2738 case AVMEDIA_TYPE_VIDEO:
2739 is->video_st = NULL;
2740 is->video_stream = -1;
2742 case AVMEDIA_TYPE_SUBTITLE:
2743 is->subtitle_st = NULL;
2744 is->subtitle_stream = -1;
2751 static int decode_interrupt_cb(void *ctx)
2753 VideoState *is = ctx;
2754 return is->abort_request;
2757 static int is_realtime(AVFormatContext *s)
2759 if( !strcmp(s->iformat->name, "rtp")
2760 || !strcmp(s->iformat->name, "rtsp")
2761 || !strcmp(s->iformat->name, "sdp")
2765 if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2766 || !strncmp(s->filename, "udp:", 4)
2773 /* this thread gets the stream from the disk or the network */
2774 static int read_thread(void *arg)
2776 VideoState *is = arg;
2777 AVFormatContext *ic = NULL;
2779 int st_index[AVMEDIA_TYPE_NB];
2780 AVPacket pkt1, *pkt = &pkt1;
2782 int64_t stream_start_time;
2783 int pkt_in_play_range = 0;
2784 AVDictionaryEntry *t;
2785 AVDictionary **opts;
2786 int orig_nb_streams;
2787 SDL_mutex *wait_mutex = SDL_CreateMutex();
2789 memset(st_index, -1, sizeof(st_index));
2790 is->last_video_stream = is->video_stream = -1;
2791 is->last_audio_stream = is->audio_stream = -1;
2792 is->last_subtitle_stream = is->subtitle_stream = -1;
2794 ic = avformat_alloc_context();
2795 ic->interrupt_callback.callback = decode_interrupt_cb;
2796 ic->interrupt_callback.opaque = is;
2797 err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2799 print_error(is->filename, err);
2803 if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2804 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2805 ret = AVERROR_OPTION_NOT_FOUND;
2811 ic->flags |= AVFMT_FLAG_GENPTS;
2813 av_format_inject_global_side_data(ic);
2815 opts = setup_find_stream_info_opts(ic, codec_opts);
2816 orig_nb_streams = ic->nb_streams;
2818 err = avformat_find_stream_info(ic, opts);
2820 av_log(NULL, AV_LOG_WARNING,
2821 "%s: could not find codec parameters\n", is->filename);
2825 for (i = 0; i < orig_nb_streams; i++)
2826 av_dict_free(&opts[i]);
2830 ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2832 if (seek_by_bytes < 0)
2833 seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2835 is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2837 if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2838 window_title = av_asprintf("%s - %s", t->value, input_filename);
2840 /* if seeking requested, we execute it */
2841 if (start_time != AV_NOPTS_VALUE) {
2844 timestamp = start_time;
2845 /* add the stream start time */
2846 if (ic->start_time != AV_NOPTS_VALUE)
2847 timestamp += ic->start_time;
2848 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2850 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2851 is->filename, (double)timestamp / AV_TIME_BASE);
2855 is->realtime = is_realtime(ic);
2857 for (i = 0; i < ic->nb_streams; i++)
2858 ic->streams[i]->discard = AVDISCARD_ALL;
2860 st_index[AVMEDIA_TYPE_VIDEO] =
2861 av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2862 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2864 st_index[AVMEDIA_TYPE_AUDIO] =
2865 av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2866 wanted_stream[AVMEDIA_TYPE_AUDIO],
2867 st_index[AVMEDIA_TYPE_VIDEO],
2869 if (!video_disable && !subtitle_disable)
2870 st_index[AVMEDIA_TYPE_SUBTITLE] =
2871 av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2872 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2873 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2874 st_index[AVMEDIA_TYPE_AUDIO] :
2875 st_index[AVMEDIA_TYPE_VIDEO]),
2878 av_dump_format(ic, 0, is->filename, 0);
2881 is->show_mode = show_mode;
2882 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2883 AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2884 AVCodecContext *avctx = st->codec;
2885 VideoPicture vp = {0};
2886 vp.width = avctx->width;
2887 vp.height = avctx->height;
2888 vp.sar = av_guess_sample_aspect_ratio(ic, st, NULL);
2890 set_default_window_size(&vp);
2893 /* open the streams */
2894 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2895 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2899 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2900 ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2902 if (is->show_mode == SHOW_MODE_NONE)
2903 is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2905 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2906 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2909 if (is->video_stream < 0 && is->audio_stream < 0) {
2910 av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2916 if (infinite_buffer < 0 && is->realtime)
2917 infinite_buffer = 1;
2920 if (is->abort_request)
2922 if (is->paused != is->last_paused) {
2923 is->last_paused = is->paused;
2925 is->read_pause_return = av_read_pause(ic);
2929 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2931 (!strcmp(ic->iformat->name, "rtsp") ||
2932 (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2933 /* wait 10 ms to avoid trying to get another packet */
2940 int64_t seek_target = is->seek_pos;
2941 int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2942 int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2943 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2944 // of the seek_pos/seek_rel variables
2946 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2948 av_log(NULL, AV_LOG_ERROR,
2949 "%s: error while seeking\n", is->ic->filename);
2951 if (is->audio_stream >= 0) {
2952 packet_queue_flush(&is->audioq);
2953 packet_queue_put(&is->audioq, &flush_pkt);
2955 if (is->subtitle_stream >= 0) {
2956 packet_queue_flush(&is->subtitleq);
2957 packet_queue_put(&is->subtitleq, &flush_pkt);
2959 if (is->video_stream >= 0) {
2960 packet_queue_flush(&is->videoq);
2961 packet_queue_put(&is->videoq, &flush_pkt);
2963 if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2964 set_clock(&is->extclk, NAN, 0);
2966 set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2970 is->queue_attachments_req = 1;
2973 step_to_next_frame(is);
2975 if (is->queue_attachments_req) {
2976 if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2978 if ((ret = av_copy_packet(©, &is->video_st->attached_pic)) < 0)
2980 packet_queue_put(&is->videoq, ©);
2981 packet_queue_put_nullpacket(&is->videoq, is->video_stream);
2983 is->queue_attachments_req = 0;
2986 /* if the queue are full, no need to read more */
2987 if (infinite_buffer<1 &&
2988 (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2989 || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2990 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
2991 || (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC))
2992 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2994 SDL_LockMutex(wait_mutex);
2995 SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2996 SDL_UnlockMutex(wait_mutex);
3000 (!is->audio_st || is->audio_finished == is->audioq.serial) &&
3001 (!is->video_st || (is->video_finished == is->videoq.serial && is->pictq_size == 0))) {
3002 if (loop != 1 && (!loop || --loop)) {
3003 stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3004 } else if (autoexit) {
3010 if (is->video_stream >= 0)
3011 packet_queue_put_nullpacket(&is->videoq, is->video_stream);
3012 if (is->audio_stream >= 0)
3013 packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
3014 if (is->subtitle_stream >= 0)
3015 packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
3020 ret = av_read_frame(ic, pkt);
3022 if (ret == AVERROR_EOF || url_feof(ic->pb))
3024 if (ic->pb && ic->pb->error)
3026 SDL_LockMutex(wait_mutex);
3027 SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3028 SDL_UnlockMutex(wait_mutex);
3031 /* check if packet is in play range specified by user, then queue, otherwise discard */
3032 stream_start_time = ic->streams[pkt->stream_index]->start_time;
3033 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3034 (pkt->pts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3035 av_q2d(ic->streams[pkt->stream_index]->time_base) -
3036 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3037 <= ((double)duration / 1000000);
3038 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3039 packet_queue_put(&is->audioq, pkt);
3040 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3041 && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3042 packet_queue_put(&is->videoq, pkt);
3043 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3044 packet_queue_put(&is->subtitleq, pkt);
3046 av_free_packet(pkt);
3049 /* wait until the end */
3050 while (!is->abort_request) {
3056 /* close each stream */
3057 if (is->audio_stream >= 0)
3058 stream_component_close(is, is->audio_stream);
3059 if (is->video_stream >= 0)
3060 stream_component_close(is, is->video_stream);
3061 if (is->subtitle_stream >= 0)
3062 stream_component_close(is, is->subtitle_stream);
3064 avformat_close_input(&is->ic);
3070 event.type = FF_QUIT_EVENT;
3071 event.user.data1 = is;
3072 SDL_PushEvent(&event);
3074 SDL_DestroyMutex(wait_mutex);
3078 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3082 is = av_mallocz(sizeof(VideoState));
3085 av_strlcpy(is->filename, filename, sizeof(is->filename));
3086 is->iformat = iformat;
3090 /* start video display */
3091 is->pictq_mutex = SDL_CreateMutex();
3092 is->pictq_cond = SDL_CreateCond();
3094 is->subpq_mutex = SDL_CreateMutex();
3095 is->subpq_cond = SDL_CreateCond();
3097 packet_queue_init(&is->videoq);
3098 packet_queue_init(&is->audioq);
3099 packet_queue_init(&is->subtitleq);
3101 is->continue_read_thread = SDL_CreateCond();
3103 init_clock(&is->vidclk, &is->videoq.serial);
3104 init_clock(&is->audclk, &is->audioq.serial);
3105 init_clock(&is->extclk, &is->extclk.serial);
3106 is->audio_clock_serial = -1;
3107 is->audio_last_serial = -1;
3108 is->av_sync_type = av_sync_type;
3109 is->read_tid = SDL_CreateThread(read_thread, is);
3110 if (!is->read_tid) {
3117 static void stream_cycle_channel(VideoState *is, int codec_type)
3119 AVFormatContext *ic = is->ic;
3120 int start_index, stream_index;
3123 AVProgram *p = NULL;
3124 int nb_streams = is->ic->nb_streams;
3126 if (codec_type == AVMEDIA_TYPE_VIDEO) {
3127 start_index = is->last_video_stream;
3128 old_index = is->video_stream;
3129 } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3130 start_index = is->last_audio_stream;
3131 old_index = is->audio_stream;
3133 start_index = is->last_subtitle_stream;
3134 old_index = is->subtitle_stream;
3136 stream_index = start_index;
3138 if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3139 p = av_find_program_from_stream(ic, NULL, is->video_stream);
3141 nb_streams = p->nb_stream_indexes;
3142 for (start_index = 0; start_index < nb_streams; start_index++)
3143 if (p->stream_index[start_index] == stream_index)
3145 if (start_index == nb_streams)
3147 stream_index = start_index;
3152 if (++stream_index >= nb_streams)
3154 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3157 is->last_subtitle_stream = -1;
3160 if (start_index == -1)
3164 if (stream_index == start_index)
3166 st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3167 if (st->codec->codec_type == codec_type) {
3168 /* check that parameters are OK */
3169 switch (codec_type) {
3170 case AVMEDIA_TYPE_AUDIO:
3171 if (st->codec->sample_rate != 0 &&
3172 st->codec->channels != 0)
3175 case AVMEDIA_TYPE_VIDEO:
3176 case AVMEDIA_TYPE_SUBTITLE:
3184 if (p && stream_index != -1)
3185 stream_index = p->stream_index[stream_index];
3186 av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3187 av_get_media_type_string(codec_type),
3191 stream_component_close(is, old_index);
3192 stream_component_open(is, stream_index);
3196 static void toggle_full_screen(VideoState *is)
3198 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3199 /* OS X needs to reallocate the SDL overlays */
3201 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3202 is->pictq[i].reallocate = 1;
3204 is_full_screen = !is_full_screen;
3205 video_open(is, 1, NULL);
3208 static void toggle_audio_display(VideoState *is)
3210 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3211 int next = is->show_mode;
3213 next = (next + 1) % SHOW_MODE_NB;
3214 } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3215 if (is->show_mode != next) {
3216 fill_rectangle(screen,
3217 is->xleft, is->ytop, is->width, is->height,
3219 is->force_refresh = 1;
3220 is->show_mode = next;
3224 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3225 double remaining_time = 0.0;
3227 while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3228 if (!cursor_hidden && av_gettime_relative() - cursor_last_shown > CURSOR_HIDE_DELAY) {
3232 if (remaining_time > 0.0)
3233 av_usleep((int64_t)(remaining_time * 1000000.0));
3234 remaining_time = REFRESH_RATE;
3235 if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3236 video_refresh(is, &remaining_time);
3241 static void seek_chapter(VideoState *is, int incr)
3243 int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3246 if (!is->ic->nb_chapters)
3249 /* find the current chapter */
3250 for (i = 0; i < is->ic->nb_chapters; i++) {
3251 AVChapter *ch = is->ic->chapters[i];
3252 if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3260 if (i >= is->ic->nb_chapters)
3263 av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3264 stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3265 AV_TIME_BASE_Q), 0, 0);
3268 /* handle an event sent by the GUI */
3269 static void event_loop(VideoState *cur_stream)
3272 double incr, pos, frac;
3276 refresh_loop_wait_event(cur_stream, &event);
3277 switch (event.type) {
3279 if (exit_on_keydown) {
3280 do_exit(cur_stream);
3283 switch (event.key.keysym.sym) {
3286 do_exit(cur_stream);
3289 toggle_full_screen(cur_stream);
3290 cur_stream->force_refresh = 1;
3294 toggle_pause(cur_stream);
3296 case SDLK_s: // S: Step to next frame
3297 step_to_next_frame(cur_stream);
3300 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3303 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3306 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3307 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3308 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3311 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3315 if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3316 if (++cur_stream->vfilter_idx >= nb_vfilters)
3317 cur_stream->vfilter_idx = 0;
3319 cur_stream->vfilter_idx = 0;
3320 toggle_audio_display(cur_stream);
3323 toggle_audio_display(cur_stream);
3327 if (cur_stream->ic->nb_chapters <= 1) {
3331 seek_chapter(cur_stream, 1);
3334 if (cur_stream->ic->nb_chapters <= 1) {
3338 seek_chapter(cur_stream, -1);
3352 if (seek_by_bytes) {
3353 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3354 pos = cur_stream->video_current_pos;
3355 } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3356 pos = cur_stream->audio_pkt.pos;
3358 pos = avio_tell(cur_stream->ic->pb);
3359 if (cur_stream->ic->bit_rate)
3360 incr *= cur_stream->ic->bit_rate / 8.0;
3364 stream_seek(cur_stream, pos, incr, 1);
3366 pos = get_master_clock(cur_stream);
3368 pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3370 if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3371 pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3372 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3379 case SDL_VIDEOEXPOSE:
3380 cur_stream->force_refresh = 1;
3382 case SDL_MOUSEBUTTONDOWN:
3383 if (exit_on_mousedown) {
3384 do_exit(cur_stream);
3387 case SDL_MOUSEMOTION:
3388 if (cursor_hidden) {
3392 cursor_last_shown = av_gettime_relative();
3393 if (event.type == SDL_MOUSEBUTTONDOWN) {
3396 if (event.motion.state != SDL_PRESSED)
3400 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3401 uint64_t size = avio_size(cur_stream->ic->pb);
3402 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3406 int tns, thh, tmm, tss;
3407 tns = cur_stream->ic->duration / 1000000LL;
3409 tmm = (tns % 3600) / 60;
3411 frac = x / cur_stream->width;
3414 mm = (ns % 3600) / 60;
3416 av_log(NULL, AV_LOG_INFO,
3417 "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3418 hh, mm, ss, thh, tmm, tss);
3419 ts = frac * cur_stream->ic->duration;
3420 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3421 ts += cur_stream->ic->start_time;
3422 stream_seek(cur_stream, ts, 0, 0);
3425 case SDL_VIDEORESIZE:
3426 screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3427 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3429 av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3430 do_exit(cur_stream);
3432 screen_width = cur_stream->width = screen->w;
3433 screen_height = cur_stream->height = screen->h;
3434 cur_stream->force_refresh = 1;
3438 do_exit(cur_stream);
3440 case FF_ALLOC_EVENT:
3441 alloc_picture(event.user.data1);
3449 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3451 av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3452 return opt_default(NULL, "video_size", arg);
3455 static int opt_width(void *optctx, const char *opt, const char *arg)
3457 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3461 static int opt_height(void *optctx, const char *opt, const char *arg)
3463 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3467 static int opt_format(void *optctx, const char *opt, const char *arg)
3469 file_iformat = av_find_input_format(arg);
3470 if (!file_iformat) {
3471 av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3472 return AVERROR(EINVAL);
3477 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3479 av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3480 return opt_default(NULL, "pixel_format", arg);
3483 static int opt_sync(void *optctx, const char *opt, const char *arg)
3485 if (!strcmp(arg, "audio"))
3486 av_sync_type = AV_SYNC_AUDIO_MASTER;
3487 else if (!strcmp(arg, "video"))
3488 av_sync_type = AV_SYNC_VIDEO_MASTER;
3489 else if (!strcmp(arg, "ext"))
3490 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3492 av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3498 static int opt_seek(void *optctx, const char *opt, const char *arg)
3500 start_time = parse_time_or_die(opt, arg, 1);
3504 static int opt_duration(void *optctx, const char *opt, const char *arg)
3506 duration = parse_time_or_die(opt, arg, 1);
3510 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3512 show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3513 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3514 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3515 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3519 static void opt_input_file(void *optctx, const char *filename)
3521 if (input_filename) {
3522 av_log(NULL, AV_LOG_FATAL,
3523 "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3524 filename, input_filename);
3527 if (!strcmp(filename, "-"))
3529 input_filename = filename;
3532 static int opt_codec(void *optctx, const char *opt, const char *arg)
3534 const char *spec = strchr(opt, ':');
3536 av_log(NULL, AV_LOG_ERROR,
3537 "No media specifier was specified in '%s' in option '%s'\n",
3539 return AVERROR(EINVAL);
3543 case 'a' : audio_codec_name = arg; break;
3544 case 's' : subtitle_codec_name = arg; break;
3545 case 'v' : video_codec_name = arg; break;
3547 av_log(NULL, AV_LOG_ERROR,
3548 "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3549 return AVERROR(EINVAL);
3556 static const OptionDef options[] = {
3557 #include "cmdutils_common_opts.h"
3558 { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3559 { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3560 { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3561 { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3562 { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3563 { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3564 { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3565 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3566 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3567 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3568 { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3569 { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3570 { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3571 { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3572 { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3573 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3574 { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3575 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3576 { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3577 { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3578 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3579 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3580 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
3581 { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3582 { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3583 { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3584 { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3585 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3586 { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3587 { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3588 { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3590 { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3591 { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3593 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3594 { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3595 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3596 { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3597 { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3598 { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3599 { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3600 { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3601 { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3605 static void show_usage(void)
3607 av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3608 av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3609 av_log(NULL, AV_LOG_INFO, "\n");
3612 void show_help_default(const char *opt, const char *arg)
3614 av_log_set_callback(log_callback_help);
3616 show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3617 show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3619 show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3620 show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3621 #if !CONFIG_AVFILTER
3622 show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3624 show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3626 printf("\nWhile playing:\n"
3628 "f toggle full screen\n"
3630 "a cycle audio channel in the current program\n"
3631 "v cycle video channel\n"
3632 "t cycle subtitle channel in the current program\n"
3634 "w cycle video filters or show modes\n"
3635 "s activate frame-step mode\n"
3636 "left/right seek backward/forward 10 seconds\n"
3637 "down/up seek backward/forward 1 minute\n"
3638 "page down/page up seek backward/forward 10 minutes\n"
3639 "mouse click seek to percentage in file corresponding to fraction of width\n"
3643 static int lockmgr(void **mtx, enum AVLockOp op)
3646 case AV_LOCK_CREATE:
3647 *mtx = SDL_CreateMutex();
3651 case AV_LOCK_OBTAIN:
3652 return !!SDL_LockMutex(*mtx);
3653 case AV_LOCK_RELEASE:
3654 return !!SDL_UnlockMutex(*mtx);
3655 case AV_LOCK_DESTROY:
3656 SDL_DestroyMutex(*mtx);
3662 /* Called from the main */
3663 int main(int argc, char **argv)
3667 char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3669 av_log_set_flags(AV_LOG_SKIP_REPEATED);
3670 parse_loglevel(argc, argv, options);
3672 /* register all codecs, demux and protocols */
3674 avdevice_register_all();
3677 avfilter_register_all();
3680 avformat_network_init();
3684 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3685 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3687 show_banner(argc, argv, options);
3689 parse_options(NULL, argc, argv, options, opt_input_file);
3691 if (!input_filename) {
3693 av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3694 av_log(NULL, AV_LOG_FATAL,
3695 "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3699 if (display_disable) {
3702 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3704 flags &= ~SDL_INIT_AUDIO;
3705 if (display_disable)
3706 SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3707 #if !defined(_WIN32) && !defined(__APPLE__)
3708 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3710 if (SDL_Init (flags)) {
3711 av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3712 av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3716 if (!display_disable) {
3717 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3718 fs_screen_width = vi->current_w;
3719 fs_screen_height = vi->current_h;
3722 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3723 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3724 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3726 if (av_lockmgr_register(lockmgr)) {
3727 av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3731 av_init_packet(&flush_pkt);
3732 flush_pkt.data = (uint8_t *)&flush_pkt;
3734 is = stream_open(input_filename, file_iformat);
3736 av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");