]> git.sesse.net Git - ffmpeg/blobdiff - avplay.c
mpegaudiodecheader: check the header in avpriv_mpegaudio_decode_header
[ffmpeg] / avplay.c
index 546fb905663356756f881cafe78e1a1dbba62244..56ec7316e5c7e71bc06074589da64ae55971d1ed 100644 (file)
--- a/avplay.c
+++ b/avplay.c
@@ -299,7 +299,7 @@ static void packet_queue_flush(PacketQueue *q)
     SDL_LockMutex(q->mutex);
     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
         pkt1 = pkt->next;
-        av_free_packet(&pkt->pkt);
+        av_packet_unref(&pkt->pkt);
         av_freep(&pkt);
     }
     q->last_pkt = NULL;
@@ -320,10 +320,6 @@ static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
 {
     AVPacketList *pkt1;
 
-    /* duplicate the packet */
-    if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
-        return -1;
-
     pkt1 = av_malloc(sizeof(AVPacketList));
     if (!pkt1)
         return -1;
@@ -435,7 +431,8 @@ static inline void fill_rectangle(SDL_Surface *screen,
 
 #define BPP 1
 
-static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
+static void blend_subrect(uint8_t *dst[4], uint16_t dst_linesize[4],
+                          const AVSubtitleRect *rect, int imgw, int imgh)
 {
     int wrap, wrap3, width2, skip2;
     int y, u, v, a, u1, v1, a1, w, h;
@@ -448,16 +445,17 @@ static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw,
     dsth = av_clip(rect->h, 0, imgh);
     dstx = av_clip(rect->x, 0, imgw - dstw);
     dsty = av_clip(rect->y, 0, imgh - dsth);
-    lum = dst->data[0] + dsty * dst->linesize[0];
-    cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
-    cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
+    /* sdl has U and V inverted */
+    lum = dst[0] +  dsty       * dst_linesize[0];
+    cb  = dst[2] + (dsty >> 1) * dst_linesize[2];
+    cr  = dst[1] + (dsty >> 1) * dst_linesize[1];
 
     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
     skip2 = dstx >> 1;
-    wrap = dst->linesize[0];
-    wrap3 = rect->pict.linesize[0];
-    p = rect->pict.data[0];
-    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
+    wrap = dst_linesize[0];
+    wrap3 = rect->linesize[0];
+    p = rect->data[0];
+    pal = (const uint32_t *)rect->data[1];  /* Now in YCrCb! */
 
     if (dsty & 1) {
         lum += dstx;
@@ -503,8 +501,8 @@ static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw,
         }
         p += wrap3 - dstw * BPP;
         lum += wrap - dstw - dstx;
-        cb += dst->linesize[1] - width2 - skip2;
-        cr += dst->linesize[2] - width2 - skip2;
+        cb += dst_linesize[2] - width2 - skip2;
+        cr += dst_linesize[1] - width2 - skip2;
     }
     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
         lum += dstx;
@@ -588,8 +586,8 @@ static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw,
         }
         p += wrap3 + (wrap3 - dstw * BPP);
         lum += wrap + (wrap - dstw - dstx);
-        cb += dst->linesize[1] - width2 - skip2;
-        cr += dst->linesize[2] - width2 - skip2;
+        cb += dst_linesize[2] - width2 - skip2;
+        cr += dst_linesize[1] - width2 - skip2;
     }
     /* handle odd height */
     if (h) {
@@ -644,7 +642,6 @@ static void video_image_display(VideoState *is)
 {
     VideoPicture *vp;
     SubPicture *sp;
-    AVPicture pict;
     float aspect_ratio;
     int width, height, x, y;
     SDL_Rect rect;
@@ -681,17 +678,9 @@ static void video_image_display(VideoState *is)
                 {
                     SDL_LockYUVOverlay (vp->bmp);
 
-                    pict.data[0] = vp->bmp->pixels[0];
-                    pict.data[1] = vp->bmp->pixels[2];
-                    pict.data[2] = vp->bmp->pixels[1];
-
-                    pict.linesize[0] = vp->bmp->pitches[0];
-                    pict.linesize[1] = vp->bmp->pitches[2];
-                    pict.linesize[2] = vp->bmp->pitches[1];
-
                     for (i = 0; i < sp->sub.num_rects; i++)
-                        blend_subrect(&pict, sp->sub.rects[i],
-                                      vp->bmp->w, vp->bmp->h);
+                        blend_subrect(vp->bmp->pixels, vp->bmp->pitches,
+                                      sp->sub.rects[i], vp->bmp->w, vp->bmp->h);
 
                     SDL_UnlockYUVOverlay (vp->bmp);
                 }
@@ -1303,9 +1292,7 @@ static void alloc_picture(void *opaque)
 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
 {
     VideoPicture *vp;
-#if CONFIG_AVFILTER
-    AVPicture pict_src;
-#else
+#if !CONFIG_AVFILTER
     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
 #endif
     /* wait until we have space to put a new picture */
@@ -1360,31 +1347,24 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t
 
     /* if the frame is not skipped, then display it */
     if (vp->bmp) {
-        AVPicture pict = { { 0 } };
+        uint8_t *data[4];
+        int linesize[4];
 
         /* get a pointer on the bitmap */
         SDL_LockYUVOverlay (vp->bmp);
 
-        pict.data[0] = vp->bmp->pixels[0];
-        pict.data[1] = vp->bmp->pixels[2];
-        pict.data[2] = vp->bmp->pixels[1];
+        data[0] = vp->bmp->pixels[0];
+        data[1] = vp->bmp->pixels[2];
+        data[2] = vp->bmp->pixels[1];
 
-        pict.linesize[0] = vp->bmp->pitches[0];
-        pict.linesize[1] = vp->bmp->pitches[2];
-        pict.linesize[2] = vp->bmp->pitches[1];
+        linesize[0] = vp->bmp->pitches[0];
+        linesize[1] = vp->bmp->pitches[2];
+        linesize[2] = vp->bmp->pitches[1];
 
 #if CONFIG_AVFILTER
-        pict_src.data[0] = src_frame->data[0];
-        pict_src.data[1] = src_frame->data[1];
-        pict_src.data[2] = src_frame->data[2];
-
-        pict_src.linesize[0] = src_frame->linesize[0];
-        pict_src.linesize[1] = src_frame->linesize[1];
-        pict_src.linesize[2] = src_frame->linesize[2];
-
         // FIXME use direct rendering
-        av_picture_copy(&pict, &pict_src,
-                        vp->pix_fmt, vp->width, vp->height);
+        av_image_copy(data, linesize, src_frame->data, src_frame->linesize,
+                      vp->pix_fmt, vp->width, vp->height);
 #else
         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
@@ -1395,7 +1375,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t
             exit(1);
         }
         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
-                  0, vp->height, pict.data, pict.linesize);
+                  0, vp->height, data, linesize);
 #endif
         /* update the bitmap content */
         SDL_UnlockYUVOverlay(vp->bmp);
@@ -1637,7 +1617,7 @@ static int video_thread(void *arg)
         while (is->paused && !is->videoq.abort_request)
             SDL_Delay(10);
 
-        av_free_packet(&pkt);
+        av_packet_unref(&pkt);
 
         ret = get_video_frame(is, frame, &pts_int, &pkt);
         if (ret < 0)
@@ -1704,7 +1684,7 @@ static int video_thread(void *arg)
     av_freep(&vfilters);
     avfilter_graph_free(&graph);
 #endif
-    av_free_packet(&pkt);
+    av_packet_unref(&pkt);
     av_frame_free(&frame);
     return 0;
 }
@@ -1758,11 +1738,11 @@ static int subtitle_thread(void *arg)
             {
                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
                 {
-                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
+                    RGBA_IN(r, g, b, a, (uint32_t *)sp->sub.rects[i]->data[1] + j);
                     y = RGB_TO_Y_CCIR(r, g, b);
                     u = RGB_TO_U_CCIR(r, g, b, 0);
                     v = RGB_TO_V_CCIR(r, g, b, 0);
-                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
+                    YUVA_OUT((uint32_t *)sp->sub.rects[i]->data[1] + j, y, u, v, a);
                 }
             }
 
@@ -1773,7 +1753,7 @@ static int subtitle_thread(void *arg)
             is->subpq_size++;
             SDL_UnlockMutex(is->subpq_mutex);
         }
-        av_free_packet(pkt);
+        av_packet_unref(pkt);
     }
     return 0;
 }
@@ -2004,7 +1984,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
 
         /* free the current packet */
         if (pkt->data)
-            av_free_packet(pkt);
+            av_packet_unref(pkt);
         memset(pkt_temp, 0, sizeof(*pkt_temp));
 
         if (is->paused || is->audioq.abort_request) {
@@ -2201,7 +2181,7 @@ static void stream_component_close(VideoState *is, int stream_index)
         SDL_CloseAudio();
 
         packet_queue_end(&is->audioq);
-        av_free_packet(&is->audio_pkt);
+        av_packet_unref(&is->audio_pkt);
         if (is->avr)
             avresample_free(&is->avr);
         av_freep(&is->audio_buf1);
@@ -2510,7 +2490,7 @@ static int decode_thread(void *arg)
         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
             packet_queue_put(&is->subtitleq, pkt);
         } else {
-            av_free_packet(pkt);
+            av_packet_unref(pkt);
         }
     }
     /* wait until the end */