]> git.sesse.net Git - ffmpeg/blobdiff - ffplay.c
regressiontest: try mp2 for wtv
[ffmpeg] / ffplay.c
index b41820c51bd854826da93e887a2036a7316eba92..7ebe6f3b445a9dcc5b1d6af7571929055e285352 100644 (file)
--- a/ffplay.c
+++ b/ffplay.c
@@ -102,6 +102,7 @@ typedef struct VideoPicture {
     SDL_Overlay *bmp;
     int width, height; /* source height & width */
     int allocated;
+    int reallocate;
     enum PixelFormat pix_fmt;
 
 #if CONFIG_AVFILTER
@@ -150,11 +151,10 @@ typedef struct VideoState {
     AVStream *audio_st;
     PacketQueue audioq;
     int audio_hw_buf_size;
-    /* samples output by the codec. we reserve more space for avsync
-       compensation, resampling and format conversion */
-    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
+    uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
     uint8_t *audio_buf;
+    uint8_t *audio_buf1;
     unsigned int audio_buf_size; /* in bytes */
     int audio_buf_index; /* in bytes */
     int audio_write_buf_size;
@@ -171,6 +171,9 @@ typedef struct VideoState {
     struct SwrContext *swr_ctx;
     double audio_current_pts;
     double audio_current_pts_drift;
+    int frame_drops_early;
+    int frame_drops_late;
+    AVFrame *frame;
 
     enum ShowMode {
         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
@@ -196,6 +199,10 @@ typedef struct VideoState {
     double frame_timer;
     double frame_last_pts;
     double frame_last_duration;
+    double frame_last_dropped_pts;
+    double frame_last_returned_time;
+    double frame_last_filter_delay;
+    int64_t frame_last_dropped_pos;
     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
     int video_stream;
     AVStream *video_st;
@@ -283,7 +290,7 @@ static AVPacket flush_pkt;
 
 static SDL_Surface *screen;
 
-void exit_program(int ret)
+void av_noreturn exit_program(int ret)
 {
     exit(ret);
 }
@@ -907,6 +914,7 @@ static void do_exit(VideoState *is)
 #if CONFIG_AVFILTER
     avfilter_uninit();
 #endif
+    avformat_network_deinit();
     if (show_status)
         printf("\n");
     SDL_Quit();
@@ -1101,20 +1109,35 @@ static void pictq_next_picture(VideoState *is) {
     SDL_UnlockMutex(is->pictq_mutex);
 }
 
+static void update_video_pts(VideoState *is, double pts, int64_t pos) {
+    double time = av_gettime() / 1000000.0;
+    /* update current video pts */
+    is->video_current_pts = pts;
+    is->video_current_pts_drift = is->video_current_pts - time;
+    is->video_current_pos = pos;
+    is->frame_last_pts = pts;
+}
+
 /* called to display each frame */
 static void video_refresh(void *opaque)
 {
     VideoState *is = opaque;
     VideoPicture *vp;
+    double time;
 
     SubPicture *sp, *sp2;
 
     if (is->video_st) {
 retry:
         if (is->pictq_size == 0) {
+            SDL_LockMutex(is->pictq_mutex);
+            if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
+                update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
+                is->frame_last_dropped_pts = AV_NOPTS_VALUE;
+            }
+            SDL_UnlockMutex(is->pictq_mutex);
             //nothing to do, no picture to display in the que
         } else {
-            double time= av_gettime()/1000000.0;
             double last_duration, duration, delay;
             /* dequeue the picture */
             vp = &is->pictq[is->pictq_rindex];
@@ -1132,17 +1155,16 @@ retry:
             }
             delay = compute_target_delay(is->frame_last_duration, is);
 
+            time= av_gettime()/1000000.0;
             if(time < is->frame_timer + delay)
                 return;
 
-            is->frame_last_pts = vp->pts;
             if (delay > 0)
                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
 
-            /* update current video pts */
-            is->video_current_pts = vp->pts;
-            is->video_current_pts_drift = is->video_current_pts - time;
-            is->video_current_pos = vp->pos;
+            SDL_LockMutex(is->pictq_mutex);
+            update_video_pts(is, vp->pts, vp->pos);
+            SDL_UnlockMutex(is->pictq_mutex);
 
             if(is->pictq_size > 1) {
                  VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
@@ -1153,6 +1175,7 @@ retry:
 
             if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
                 if(is->pictq_size > 1){
+                    is->frame_drops_late++;
                     pictq_next_picture(is);
                     goto retry;
                 }
@@ -1238,9 +1261,10 @@ retry:
             av_diff = 0;
             if (is->audio_st && is->video_st)
                 av_diff = get_audio_clock(is) - get_video_clock(is);
-            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
+            printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
                    get_master_clock(is),
                    av_diff,
+                   is->frame_drops_early + is->frame_drops_late,
                    aqsize / 1024,
                    vqsize / 1024,
                    sqsize,
@@ -1338,7 +1362,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_
     vp->duration = frame_delay;
 
     /* alloc or resize hardware picture buffer */
-    if (!vp->bmp ||
+    if (!vp->bmp || vp->reallocate ||
 #if CONFIG_AVFILTER
         vp->width  != is->out_video_filter->inputs[0]->w ||
         vp->height != is->out_video_filter->inputs[0]->h) {
@@ -1348,7 +1372,8 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_
 #endif
         SDL_Event event;
 
-        vp->allocated = 0;
+        vp->allocated  = 0;
+        vp->reallocate = 0;
 
         /* the allocation must be done in the main thread to avoid
            locking problems */
@@ -1446,19 +1471,22 @@ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke
             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
         }
         is->video_current_pos = -1;
-        SDL_UnlockMutex(is->pictq_mutex);
-
         is->frame_last_pts = AV_NOPTS_VALUE;
         is->frame_last_duration = 0;
         is->frame_timer = (double)av_gettime() / 1000000.0;
+        is->frame_last_dropped_pts = AV_NOPTS_VALUE;
+        SDL_UnlockMutex(is->pictq_mutex);
+
         return 0;
     }
 
     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
 
     if (got_picture) {
+        int ret = 1;
+
         if (decoder_reorder_pts == -1) {
-            *pts = frame->best_effort_timestamp;
+            *pts = *(int64_t*)av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp");
         } else if (decoder_reorder_pts) {
             *pts = frame->pkt_pts;
         } else {
@@ -1469,8 +1497,29 @@ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke
             *pts = 0;
         }
 
-        return 1;
+        if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
+             (framedrop>0 || (framedrop && is->audio_st))) {
+            SDL_LockMutex(is->pictq_mutex);
+            if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
+                double clockdiff = get_video_clock(is) - get_master_clock(is);
+                double dpts = av_q2d(is->video_st->time_base) * *pts;
+                double ptsdiff = dpts - is->frame_last_pts;
+                if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
+                     ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
+                     clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
+                    is->frame_last_dropped_pos = pkt->pos;
+                    is->frame_last_dropped_pts = dpts;
+                    is->frame_drops_early++;
+                    ret = 0;
+                }
+            }
+            SDL_UnlockMutex(is->pictq_mutex);
+        }
+
+        if (ret)
+            is->frame_last_returned_time = av_gettime() / 1000000.0;
 
+        return ret;
     }
     return 0;
 }
@@ -1513,7 +1562,10 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
     w += edge << 1;
     h += edge << 1;
-
+    if (codec->pix_fmt != ctx->outputs[0]->format) {
+        av_log(codec, AV_LOG_ERROR, "Pixel format mismatches %d %d\n", codec->pix_fmt, ctx->outputs[0]->format);
+        return -1;
+    }
     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
         return -1;
 
@@ -1794,6 +1846,10 @@ static int video_thread(void *arg)
 
         if (ret < 0) goto the_end;
 
+        is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
+        if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
+            is->frame_last_filter_delay = 0;
+
 #if CONFIG_AVFILTER
         if (!picref)
             continue;
@@ -1942,8 +1998,8 @@ static int synchronize_audio(VideoState *is, short *samples,
                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
                     if (wanted_size < min_size)
                         wanted_size = min_size;
-                    else if (wanted_size > FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2)))
-                        wanted_size = FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2));
+                    else if (wanted_size > FFMIN3(max_size, samples_size, sizeof(is->audio_buf2)))
+                        wanted_size = FFMIN3(max_size, samples_size, sizeof(is->audio_buf2));
 
                     /* add or remove samples to correction the synchro */
                     if (wanted_size < samples_size) {
@@ -1987,7 +2043,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
     AVPacket *pkt = &is->audio_pkt;
     AVCodecContext *dec= is->audio_st->codec;
     int len1, len2, data_size, resampled_data_size;
-    int64_t dec_channel_layout;
+    int64_t dec_channel_layout, got_frame;
     double pts;
     int new_packet = 0;
     int flush_complete = 0;
@@ -1995,13 +2051,16 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
     for(;;) {
         /* NOTE: the audio packet can contain several frames */
         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
+            if (!is->frame) {
+                if (!(is->frame = avcodec_alloc_frame()))
+                    return AVERROR(ENOMEM);
+            } else
+                avcodec_get_frame_defaults(is->frame);
+
             if (flush_complete)
                 break;
             new_packet = 0;
-            data_size = sizeof(is->audio_buf1);
-            len1 = avcodec_decode_audio3(dec,
-                                        (int16_t *)is->audio_buf1, &data_size,
-                                        pkt_temp);
+            len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
             if (len1 < 0) {
                 /* if error, we skip the frame */
                 pkt_temp->size = 0;
@@ -2011,21 +2070,25 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
             pkt_temp->data += len1;
             pkt_temp->size -= len1;
 
-            if (data_size <= 0) {
+            if (!got_frame) {
                 /* stop sending empty packets if the decoder is finished */
                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
                     flush_complete = 1;
                 continue;
             }
+            data_size = av_samples_get_buffer_size(NULL, dec->channels,
+                                                   is->frame->nb_samples,
+                                                   dec->sample_fmt, 1);
 
             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
 
             if (dec->sample_fmt != is->audio_src_fmt || dec_channel_layout != is->audio_src_channel_layout || dec->sample_rate != is->audio_src_freq) {
                 if (is->swr_ctx)
                     swr_free(&is->swr_ctx);
-                is->swr_ctx = swr_alloc2(NULL, is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
-                                               dec_channel_layout,          dec->sample_fmt,   dec->sample_rate,
-                                               0, NULL);
+                is->swr_ctx = swr_alloc_set_opts(NULL,
+                                                 is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
+                                                 dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
+                                                 0, NULL);
                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
                         dec->sample_rate,
@@ -2044,7 +2107,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
 
             resampled_data_size = data_size;
             if (is->swr_ctx) {
-                const uint8_t *in[] = {is->audio_buf1};
+                const uint8_t *in[] = { is->frame->data[0] };
                 uint8_t *out[] = {is->audio_buf2};
                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
                                                 in, data_size / dec->channels / av_get_bytes_per_sample(dec->sample_fmt));
@@ -2059,7 +2122,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
                 is->audio_buf = is->audio_buf2;
                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
             } else {
-                is->audio_buf= is->audio_buf1;
+                is->audio_buf = is->frame->data[0];
             }
 
             /* if no pts, then compute it */
@@ -2093,8 +2156,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
         if (pkt->data == flush_pkt.data)
             avcodec_flush_buffers(dec);
 
-        pkt_temp->data = pkt->data;
-        pkt_temp->size = pkt->size;
+        *pkt_temp = *pkt;
 
         /* if update the audio clock with the pts */
         if (pkt->pts != AV_NOPTS_VALUE) {
@@ -2118,9 +2180,8 @@ static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
            audio_size = audio_decode_frame(is, &pts);
            if (audio_size < 0) {
                 /* if error, just output silence */
-               is->audio_buf = is->audio_buf1;
-               is->audio_buf_size = 256 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
-               memset(is->audio_buf, 0, is->audio_buf_size);
+               is->audio_buf      = is->silence_buf;
+               is->audio_buf_size = sizeof(is->silence_buf);
            } else {
                if (is->show_mode != SHOW_MODE_VIDEO)
                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
@@ -2160,9 +2221,9 @@ static int stream_component_open(VideoState *is, int stream_index)
         return -1;
     avctx = ic->streams[stream_index]->codec;
 
-    opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
-
     codec = avcodec_find_decoder(avctx->codec_id);
+    opts = filter_codec_opts(codec_opts, codec, ic, ic->streams[stream_index]);
+
     switch(avctx->codec_type){
         case AVMEDIA_TYPE_AUDIO   : if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
         case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
@@ -2173,7 +2234,12 @@ static int stream_component_open(VideoState *is, int stream_index)
 
     avctx->workaround_bugs = workaround_bugs;
     avctx->lowres = lowres;
-    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
+    if(avctx->lowres > codec->max_lowres){
+        av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
+                codec->max_lowres);
+        avctx->lowres= codec->max_lowres;
+    }
+    if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
     avctx->idct_algo= idct;
     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
     avctx->skip_frame= skip_frame;
@@ -2291,10 +2357,15 @@ static void stream_component_close(VideoState *is, int stream_index)
         if (is->swr_ctx)
             swr_free(&is->swr_ctx);
         av_free_packet(&is->audio_pkt);
+        av_freep(&is->audio_buf1);
+        is->audio_buf = NULL;
+        av_freep(&is->frame);
 
         if (is->rdft) {
             av_rdft_end(is->rdft);
             av_freep(&is->rdft_data);
+            is->rdft = NULL;
+            is->rdft_bits = 0;
         }
         break;
     case AVMEDIA_TYPE_VIDEO:
@@ -2353,7 +2424,7 @@ static void stream_component_close(VideoState *is, int stream_index)
    variable instead of a thread local variable */
 static VideoState *global_video_state;
 
-static int decode_interrupt_cb(void)
+static int decode_interrupt_cb(void *ctx)
 {
     return (global_video_state && global_video_state->abort_request);
 }
@@ -2378,8 +2449,9 @@ static int read_thread(void *arg)
     is->subtitle_stream = -1;
 
     global_video_state = is;
-    avio_set_interrupt_cb(decode_interrupt_cb);
 
+    ic = avformat_alloc_context();
+    ic->interrupt_callback.callback = decode_interrupt_cb;
     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
     if (err < 0) {
         print_error(is->filename, err);
@@ -2491,8 +2563,10 @@ static int read_thread(void *arg)
             else
                 av_read_play(ic);
         }
-#if CONFIG_RTSP_DEMUXER
-        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
+#if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
+        if (is->paused &&
+                (!strcmp(ic->iformat->name, "rtsp") ||
+                 (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
             /* wait 10 ms to avoid trying to get another packet */
             /* XXX: horrible */
             SDL_Delay(10);
@@ -2703,6 +2777,12 @@ static void stream_cycle_channel(VideoState *is, int codec_type)
 static void toggle_full_screen(VideoState *is)
 {
     is_full_screen = !is_full_screen;
+#if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
+    /* OS X needs to reallocate the SDL overlays */
+    for (int i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
+        is->pictq[i].reallocate = 1;
+    }
+#endif
     video_open(is);
 }
 
@@ -3083,6 +3163,7 @@ int main(int argc, char **argv)
     avfilter_register_all();
 #endif
     av_register_all();
+    avformat_network_init();
 
     init_opts();