]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/h264dec.c
h264dec: track the last seen value of x264_build
[ffmpeg] / libavcodec / h264dec.c
index 1086eab8d3fc0a99b2bdee9a1463bdc091c386cf..7a8293efa5f8ac9341c3c864666eebb9e1cd9a92 100644 (file)
@@ -36,6 +36,7 @@
 #include "cabac_functions.h"
 #include "error_resilience.h"
 #include "avcodec.h"
+#include "golomb_legacy.h"
 #include "h264.h"
 #include "h264dec.h"
 #include "h2645_parse.h"
@@ -43,7 +44,6 @@
 #include "h264chroma.h"
 #include "h264_mvpred.h"
 #include "h264_ps.h"
-#include "golomb.h"
 #include "mathops.h"
 #include "me_cmp.h"
 #include "mpegutils.h"
@@ -285,11 +285,15 @@ static int h264_init_context(AVCodecContext *avctx, H264Context *h)
 
     h->avctx                 = avctx;
 
+    h->width_from_caller     = avctx->width;
+    h->height_from_caller    = avctx->height;
+
     h->picture_structure     = PICT_FRAME;
     h->workaround_bugs       = avctx->workaround_bugs;
     h->flags                 = avctx->flags;
     h->poc.prev_poc_msb      = 1 << 16;
     h->recovery_frame        = -1;
+    h->x264_build            = -1;
     h->frame_recovered       = 0;
 
     h->next_outputed_poc = INT_MIN;
@@ -361,7 +365,7 @@ static av_cold int h264_decode_end(AVCodecContext *avctx)
 
 static AVOnce h264_vlc_init = AV_ONCE_INIT;
 
-av_cold int ff_h264_decode_init(AVCodecContext *avctx)
+static av_cold int h264_decode_init(AVCodecContext *avctx)
 {
     H264Context *h = avctx->priv_data;
     int ret;
@@ -449,7 +453,6 @@ void ff_h264_flush_change(H264Context *h)
     if (h->cur_pic_ptr)
         h->cur_pic_ptr->reference = 0;
     h->first_field = 0;
-    ff_h264_sei_uninit(&h->sei);
     h->recovery_frame = -1;
     h->frame_recovered = 0;
 }
@@ -463,6 +466,7 @@ static void flush_dpb(AVCodecContext *avctx)
     memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
 
     ff_h264_flush_change(h);
+    ff_h264_sei_uninit(&h->sei);
 
     for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
         ff_h264_unref_picture(h, &h->DPB[i]);
@@ -478,7 +482,7 @@ static void flush_dpb(AVCodecContext *avctx)
 static int get_last_needed_nal(H264Context *h)
 {
     int nals_needed = 0;
-    int i;
+    int i, ret;
 
     for (i = 0; i < h->pkt.nb_nals; i++) {
         H2645NAL *nal = &h->pkt.nals[i];
@@ -496,7 +500,14 @@ static int get_last_needed_nal(H264Context *h)
         case H264_NAL_DPA:
         case H264_NAL_IDR_SLICE:
         case H264_NAL_SLICE:
-            init_get_bits(&gb, nal->data + 1, (nal->size - 1) * 8);
+            ret = init_get_bits8(&gb, nal->data + 1, nal->size - 1);
+            if (ret < 0) {
+                av_log(h->avctx, AV_LOG_ERROR, "Invalid zero-sized VCL NAL unit\n");
+                if (h->avctx->err_recognition & AV_EF_EXPLODE)
+                    return ret;
+
+                break;
+            }
             if (!get_ue_golomb(&gb))
                 nals_needed = i;
         }
@@ -513,6 +524,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
 
     if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
         h->current_slice = 0;
+        h->field_started = 0;
         if (!h->first_field)
             h->cur_pic_ptr = NULL;
         ff_h264_sei_uninit(&h->sei);
@@ -523,7 +535,24 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
     if (ret < 0) {
         av_log(avctx, AV_LOG_ERROR,
                "Error splitting the input into NAL units.\n");
-        return ret;
+
+        /* There are samples in the wild with mp4-style extradata, but Annex B
+         * data in the packets. If we fail parsing the packet as mp4, try it again
+         * as Annex B. */
+        if (h->is_avc && !(avctx->err_recognition & AV_EF_EXPLODE)) {
+            int err = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, 0, 0,
+                                            avctx->codec_id);
+            if (err >= 0) {
+                av_log(avctx, AV_LOG_WARNING,
+                       "The stream seems to contain AVCC extradata with Annex B "
+                       "formatted data, which is invalid.");
+                h->is_avc = 0;
+                ret       = 0;
+            }
+        }
+
+        if (ret < 0)
+            return ret;
     }
 
     if (avctx->active_thread_type & FF_THREAD_FRAME)
@@ -549,17 +578,18 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
             if ((err = ff_h264_queue_decode_slice(h, nal)))
                 break;
 
-            if (avctx->active_thread_type & FF_THREAD_FRAME && !h->avctx->hwaccel &&
-                i >= nals_needed) {
+            if (avctx->active_thread_type & FF_THREAD_FRAME &&
+                i >= nals_needed && !h->setup_finished && h->cur_pic_ptr) {
                 ff_thread_finish_setup(avctx);
                 h->setup_finished = 1;
             }
 
             max_slice_ctx = avctx->hwaccel ? 1 : h->nb_slice_ctx;
             if (h->nb_slice_ctx_queued == max_slice_ctx) {
-                if (avctx->hwaccel)
+                if (avctx->hwaccel) {
                     ret = avctx->hwaccel->decode_slice(avctx, nal->raw_data, nal->raw_size);
-                else
+                    h->nb_slice_ctx_queued = 0;
+                } else
                     ret = ff_h264_execute_decode_slices(h);
                 if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
                     goto end;
@@ -633,26 +663,6 @@ static int get_consumed_bytes(int pos, int buf_size)
     return pos;
 }
 
-static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src)
-{
-    int i;
-    int ret = av_frame_ref(dst, src);
-    if (ret < 0)
-        return ret;
-
-    if (!h->ps.sps || !h->ps.sps->crop)
-        return 0;
-
-    for (i = 0; i < 3; i++) {
-        int hshift = (i > 0) ? h->chroma_x_shift : 0;
-        int vshift = (i > 0) ? h->chroma_y_shift : 0;
-        int off    = ((h->ps.sps->crop_left >> hshift) << h->pixel_shift) +
-                     (h->ps.sps->crop_top >> vshift) * dst->linesize[i];
-        dst->data[i] += off;
-    }
-    return 0;
-}
-
 static int h264_decode_frame(AVCodecContext *avctx, void *data,
                              int *got_frame, AVPacket *avpkt)
 {
@@ -694,7 +704,7 @@ out:
             h->delayed_pic[i] = h->delayed_pic[i + 1];
 
         if (out) {
-            ret = output_frame(h, pict, out->f);
+            ret = av_frame_ref(pict, out->f);
             if (ret < 0)
                 return ret;
             *got_frame = 1;
@@ -732,11 +742,12 @@ out:
 
     if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
         (h->mb_y >= h->mb_height && h->mb_height)) {
-        ff_h264_field_end(h, &h->slice_ctx[0], 0);
+        if (h->field_started)
+            ff_h264_field_end(h, &h->slice_ctx[0], 0);
 
         *got_frame = 0;
         if (h->output_frame->buf[0]) {
-            ret = output_frame(h, pict, h->output_frame) ;
+            ret = av_frame_ref(pict, h->output_frame);
             av_frame_unref(h->output_frame);
             if (ret < 0)
                 return ret;
@@ -769,13 +780,13 @@ AVCodec ff_h264_decoder = {
     .type                  = AVMEDIA_TYPE_VIDEO,
     .id                    = AV_CODEC_ID_H264,
     .priv_data_size        = sizeof(H264Context),
-    .init                  = ff_h264_decode_init,
+    .init                  = h264_decode_init,
     .close                 = h264_decode_end,
     .decode                = h264_decode_frame,
     .capabilities          = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
                              AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS |
                              AV_CODEC_CAP_FRAME_THREADS,
-    .caps_internal         = FF_CODEC_CAP_INIT_THREADSAFE,
+    .caps_internal         = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_EXPORTS_CROPPING,
     .flush                 = flush_dpb,
     .init_thread_copy      = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
     .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),