]> git.sesse.net Git - ffmpeg/blobdiff - ffplay.c
Update H263_AIC asm offset for the apple variant
[ffmpeg] / ffplay.c
index e65226a45391fe9c13f540ad7e61d30cc226b9e4..c85c9ddc1a8b473f507ca198362a9028b1938bff 100644 (file)
--- a/ffplay.c
+++ b/ffplay.c
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
+#define _XOPEN_SOURCE 600
+
 #include "config.h"
 #include <inttypes.h>
 #include <math.h>
 #include <limits.h>
 #include "libavutil/avstring.h"
+#include "libavutil/colorspace.h"
 #include "libavutil/pixdesc.h"
+#include "libavcore/imgutils.h"
+#include "libavcore/parseutils.h"
 #include "libavformat/avformat.h"
 #include "libavdevice/avdevice.h"
 #include "libswscale/swscale.h"
 #include "libavcodec/audioconvert.h"
-#include "libavcodec/colorspace.h"
 #include "libavcodec/opt.h"
 #include "libavcodec/avfft.h"
 
@@ -104,7 +108,7 @@ typedef struct VideoPicture {
     enum PixelFormat pix_fmt;
 
 #if CONFIG_AVFILTER
-    AVFilterPicRef *picref;
+    AVFilterBufferRef *picref;
 #endif
 } VideoPicture;
 
@@ -260,6 +264,8 @@ static int error_recognition = FF_ER_CAREFUL;
 static int error_concealment = 3;
 static int decoder_reorder_pts= -1;
 static int autoexit;
+static int exit_on_keydown;
+static int exit_on_mousedown;
 static int loop=1;
 static int framedrop=1;
 
@@ -675,18 +681,7 @@ static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw,
 
 static void free_subpicture(SubPicture *sp)
 {
-    int i;
-
-    for (i = 0; i < sp->sub.num_rects; i++)
-    {
-        av_freep(&sp->sub.rects[i]->pict.data[0]);
-        av_freep(&sp->sub.rects[i]->pict.data[1]);
-        av_freep(&sp->sub.rects[i]);
-    }
-
-    av_free(sp->sub.rects);
-
-    memset(&sp->sub, 0, sizeof(AVSubtitle));
+    avsubtitle_free(&sp->sub);
 }
 
 static void video_image_display(VideoState *is)
@@ -702,10 +697,10 @@ static void video_image_display(VideoState *is)
     vp = &is->pictq[is->pictq_rindex];
     if (vp->bmp) {
 #if CONFIG_AVFILTER
-         if (vp->picref->pixel_aspect.num == 0)
+         if (vp->picref->video->pixel_aspect.num == 0)
              aspect_ratio = 0;
          else
-             aspect_ratio = av_q2d(vp->picref->pixel_aspect);
+             aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
 #else
 
         /* XXX: use variable in the frame */
@@ -941,7 +936,8 @@ static void video_audio_display(VideoState *s)
             for(y=0; y<s->height; y++){
                 double w= 1/sqrt(nb_freq);
                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
-                int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
+                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
+                       + data[1][2*y+1]*data[1][2*y+1])) : a;
                 a= FFMIN(a,255);
                 b= FFMIN(b,255);
                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
@@ -1297,6 +1293,60 @@ retry:
     }
 }
 
+static void stream_close(VideoState *is)
+{
+    VideoPicture *vp;
+    int i;
+    /* XXX: use a special url_shutdown call to abort parse cleanly */
+    is->abort_request = 1;
+    SDL_WaitThread(is->parse_tid, NULL);
+    SDL_WaitThread(is->refresh_tid, NULL);
+
+    /* free all pictures */
+    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
+        vp = &is->pictq[i];
+#if CONFIG_AVFILTER
+        if (vp->picref) {
+            avfilter_unref_buffer(vp->picref);
+            vp->picref = NULL;
+        }
+#endif
+        if (vp->bmp) {
+            SDL_FreeYUVOverlay(vp->bmp);
+            vp->bmp = NULL;
+        }
+    }
+    SDL_DestroyMutex(is->pictq_mutex);
+    SDL_DestroyCond(is->pictq_cond);
+    SDL_DestroyMutex(is->subpq_mutex);
+    SDL_DestroyCond(is->subpq_cond);
+#if !CONFIG_AVFILTER
+    if (is->img_convert_ctx)
+        sws_freeContext(is->img_convert_ctx);
+#endif
+    av_free(is);
+}
+
+static void do_exit(void)
+{
+    int i;
+    if (cur_stream) {
+        stream_close(cur_stream);
+        cur_stream = NULL;
+    }
+    for (i = 0; i < AVMEDIA_TYPE_NB; i++)
+        av_free(avcodec_opts[i]);
+    av_free(avformat_opts);
+    av_free(sws_opts);
+#if CONFIG_AVFILTER
+    avfilter_uninit();
+#endif
+    if (show_status)
+        printf("\n");
+    SDL_Quit();
+    exit(0);
+}
+
 /* allocate a picture (needs to do that in main thread to avoid
    potential locking problems */
 static void alloc_picture(void *opaque)
@@ -1311,7 +1361,7 @@ static void alloc_picture(void *opaque)
 
 #if CONFIG_AVFILTER
     if (vp->picref)
-        avfilter_unref_pic(vp->picref);
+        avfilter_unref_buffer(vp->picref);
     vp->picref = NULL;
 
     vp->width   = is->out_video_filter->inputs[0]->w;
@@ -1326,6 +1376,14 @@ static void alloc_picture(void *opaque)
     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
                                    SDL_YV12_OVERLAY,
                                    screen);
+    if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
+        /* SDL allocates a buffer smaller than requested if the video
+         * overlay hardware is unable to support the requested size. */
+        fprintf(stderr, "Error: the video system does not support an image\n"
+                        "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
+                        "to reduce the image size.\n", vp->width, vp->height );
+        do_exit();
+    }
 
     SDL_LockMutex(is->pictq_mutex);
     vp->allocated = 1;
@@ -1396,7 +1454,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t
         AVPicture pict;
 #if CONFIG_AVFILTER
         if(vp->picref)
-            avfilter_unref_pic(vp->picref);
+            avfilter_unref_buffer(vp->picref);
         vp->picref = src_frame->opaque;
 #endif
 
@@ -1567,7 +1625,7 @@ typedef struct {
 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
 {
     AVFilterContext *ctx = codec->opaque;
-    AVFilterPicRef  *ref;
+    AVFilterBufferRef  *ref;
     int perms = AV_PERM_WRITE;
     int i, w, h, stride[4];
     unsigned edge;
@@ -1589,11 +1647,11 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
         return -1;
 
-    ref->w = codec->width;
-    ref->h = codec->height;
-    for(i = 0; i < 3; i ++) {
-        unsigned hshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_w;
-        unsigned vshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_h;
+    ref->video->w = codec->width;
+    ref->video->h = codec->height;
+    for(i = 0; i < 4; i ++) {
+        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
+        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
 
         if (ref->data[i]) {
             ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
@@ -1611,20 +1669,20 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
 {
     memset(pic->data, 0, sizeof(pic->data));
-    avfilter_unref_pic(pic->opaque);
+    avfilter_unref_buffer(pic->opaque);
 }
 
 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
 {
-    AVFilterPicRef *ref = pic->opaque;
+    AVFilterBufferRef *ref = pic->opaque;
 
     if (pic->data[0] == NULL) {
         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
         return codec->get_buffer(codec, pic);
     }
 
-    if ((codec->width != ref->w) || (codec->height != ref->h) ||
-        (codec->pix_fmt != ref->pic->format)) {
+    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
+        (codec->pix_fmt != ref->format)) {
         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
         return -1;
     }
@@ -1663,7 +1721,7 @@ static void input_uninit(AVFilterContext *ctx)
 static int input_request_frame(AVFilterLink *link)
 {
     FilterPriv *priv = link->src->priv;
-    AVFilterPicRef *picref;
+    AVFilterBufferRef *picref;
     int64_t pts = 0;
     AVPacket pkt;
     int ret;
@@ -1674,17 +1732,18 @@ static int input_request_frame(AVFilterLink *link)
         return -1;
 
     if(priv->use_dr1) {
-        picref = avfilter_ref_pic(priv->frame->opaque, ~0);
+        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
     } else {
         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
-        av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
-                        picref->pic->format, link->w, link->h);
+        av_image_copy(picref->data, picref->linesize,
+                      priv->frame->data, priv->frame->linesize,
+                      picref->format, link->w, link->h);
     }
     av_free_packet(&pkt);
 
     picref->pts = pts;
     picref->pos = pkt.pos;
-    picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
+    picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
     avfilter_start_frame(link, picref);
     avfilter_draw_slice(link, 0, link->h, 1);
     avfilter_end_frame(link);
@@ -1748,13 +1807,13 @@ static int output_query_formats(AVFilterContext *ctx)
 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
                                     int64_t *pts, int64_t *pos)
 {
-    AVFilterPicRef *pic;
+    AVFilterBufferRef *pic;
 
     if(avfilter_request_frame(ctx->inputs[0]))
         return -1;
-    if(!(pic = ctx->inputs[0]->cur_pic))
+    if(!(pic = ctx->inputs[0]->cur_buf))
         return -1;
-    ctx->inputs[0]->cur_pic = NULL;
+    ctx->inputs[0]->cur_buf = NULL;
 
     frame->opaque = pic;
     *pts          = pic->pts;
@@ -1797,8 +1856,8 @@ static int video_thread(void *arg)
     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
     graph->scale_sws_opts = av_strdup(sws_flags_str);
 
-    if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
-    if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
+    if (avfilter_open(&filt_src, &input_filter,  "src") < 0) goto the_end;
+    if (avfilter_open(&filt_out, &output_filter, "out") < 0) goto the_end;
 
     if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
     if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
@@ -2665,40 +2724,6 @@ static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
     return is;
 }
 
-static void stream_close(VideoState *is)
-{
-    VideoPicture *vp;
-    int i;
-    /* XXX: use a special url_shutdown call to abort parse cleanly */
-    is->abort_request = 1;
-    SDL_WaitThread(is->parse_tid, NULL);
-    SDL_WaitThread(is->refresh_tid, NULL);
-
-    /* free all pictures */
-    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
-        vp = &is->pictq[i];
-#if CONFIG_AVFILTER
-        if (vp->picref) {
-            avfilter_unref_pic(vp->picref);
-            vp->picref = NULL;
-        }
-#endif
-        if (vp->bmp) {
-            SDL_FreeYUVOverlay(vp->bmp);
-            vp->bmp = NULL;
-        }
-    }
-    SDL_DestroyMutex(is->pictq_mutex);
-    SDL_DestroyCond(is->pictq_cond);
-    SDL_DestroyMutex(is->subpq_mutex);
-    SDL_DestroyCond(is->subpq_cond);
-#if !CONFIG_AVFILTER
-    if (is->img_convert_ctx)
-        sws_freeContext(is->img_convert_ctx);
-#endif
-    av_free(is);
-}
-
 static void stream_cycle_channel(VideoState *is, int codec_type)
 {
     AVFormatContext *ic = is->ic;
@@ -2776,26 +2801,6 @@ static void step_to_next_frame(void)
     step = 1;
 }
 
-static void do_exit(void)
-{
-    int i;
-    if (cur_stream) {
-        stream_close(cur_stream);
-        cur_stream = NULL;
-    }
-    for (i = 0; i < AVMEDIA_TYPE_NB; i++)
-        av_free(avcodec_opts[i]);
-    av_free(avformat_opts);
-    av_free(sws_opts);
-#if CONFIG_AVFILTER
-    avfilter_uninit();
-#endif
-    if (show_status)
-        printf("\n");
-    SDL_Quit();
-    exit(0);
-}
-
 static void toggle_audio_display(void)
 {
     if (cur_stream) {
@@ -2819,6 +2824,10 @@ static void event_loop(void)
         SDL_WaitEvent(&event);
         switch(event.type) {
         case SDL_KEYDOWN:
+            if (exit_on_keydown) {
+                do_exit();
+                break;
+            }
             switch(event.key.keysym.sym) {
             case SDLK_ESCAPE:
             case SDLK_q:
@@ -2887,6 +2896,10 @@ static void event_loop(void)
             }
             break;
         case SDL_MOUSEBUTTONDOWN:
+            if (exit_on_mousedown) {
+                do_exit();
+                break;
+            }
         case SDL_MOUSEMOTION:
             if(event.type ==SDL_MOUSEBUTTONDOWN){
                 x= event.button.x;
@@ -2949,7 +2962,7 @@ static void event_loop(void)
 
 static void opt_frame_size(const char *arg)
 {
-    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
+    if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
         fprintf(stderr, "Incorrect frame size\n");
         exit(1);
     }
@@ -3068,6 +3081,8 @@ static const OptionDef options[] = {
     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
+    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
+    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },