]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/svq3.c
d3d11va: make av_d3d11va_alloc_context() available at all times
[ffmpeg] / libavcodec / svq3.c
index d28b2d2f19bec5af9a0105e0e6382dace4646374..80bc46abdb76cf9a88658f690c3d7661fc11f291 100644 (file)
@@ -313,26 +313,26 @@ static inline void svq3_mc_dir_part(SVQ3Context *s,
     }
 
     /* form component predictions */
-    dest = h->cur_pic.f.data[0] + x + y * h->linesize;
-    src  = pic->f.data[0] + mx + my * h->linesize;
+    dest = h->cur_pic.f->data[0] + x + y * sl->linesize;
+    src  = pic->f->data[0] + mx + my * sl->linesize;
 
     if (emu) {
         h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
-                                 h->linesize, h->linesize,
+                                 sl->linesize, sl->linesize,
                                  width + 1, height + 1,
                                  mx, my, s->h_edge_pos, s->v_edge_pos);
         src = sl->edge_emu_buffer;
     }
     if (thirdpel)
         (avg ? s->tdsp.avg_tpel_pixels_tab
-             : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
+             : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, sl->linesize,
                                                  width, height);
     else
         (avg ? s->hdsp.avg_pixels_tab
-             : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
+             : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, sl->linesize,
                                                        height);
 
-    if (!(h->flags & CODEC_FLAG_GRAY)) {
+    if (!(h->flags & AV_CODEC_FLAG_GRAY)) {
         mx     = mx + (mx < (int) x) >> 1;
         my     = my + (my < (int) y) >> 1;
         width  = width  >> 1;
@@ -340,12 +340,12 @@ static inline void svq3_mc_dir_part(SVQ3Context *s,
         blocksize++;
 
         for (i = 1; i < 3; i++) {
-            dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
-            src  = pic->f.data[i] + mx + my * h->uvlinesize;
+            dest = h->cur_pic.f->data[i] + (x >> 1) + (y >> 1) * sl->uvlinesize;
+            src  = pic->f->data[i] + mx + my * sl->uvlinesize;
 
             if (emu) {
                 h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src,
-                                         h->uvlinesize, h->uvlinesize,
+                                         sl->uvlinesize, sl->uvlinesize,
                                          width + 1, height + 1,
                                          mx, my, (s->h_edge_pos >> 1),
                                          s->v_edge_pos >> 1);
@@ -354,12 +354,12 @@ static inline void svq3_mc_dir_part(SVQ3Context *s,
             if (thirdpel)
                 (avg ? s->tdsp.avg_tpel_pixels_tab
                      : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
-                                                         h->uvlinesize,
+                                                         sl->uvlinesize,
                                                          width, height);
             else
                 (avg ? s->hdsp.avg_pixels_tab
                      : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
-                                                               h->uvlinesize,
+                                                               sl->uvlinesize,
                                                                height);
         }
     }
@@ -884,9 +884,30 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx)
         return AVERROR(ENOMEM);
     }
 
+    s->cur_pic->f  = av_frame_alloc();
+    s->last_pic->f = av_frame_alloc();
+    s->next_pic->f = av_frame_alloc();
+    if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
+        return AVERROR(ENOMEM);
+
     if (ff_h264_decode_init(avctx) < 0)
         return -1;
 
+    // we will overwrite it later during decoding
+    av_frame_free(&h->cur_pic.f);
+
+    ff_h264dsp_init(&h->h264dsp, 8, 1);
+    ff_h264chroma_init(&h->h264chroma, 8);
+    ff_h264qpel_init(&h->h264qpel, 8);
+    ff_h264_pred_init(&h->hpc, AV_CODEC_ID_SVQ3, 8, 1);
+    ff_videodsp_init(&h->vdsp, 8);
+
+    memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
+    memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
+
+    h->sps.bit_depth_luma = 8;
+    h->chroma_format_idc = 1;
+
     ff_hpeldsp_init(&s->hdsp, avctx->flags);
     ff_tpeldsp_init(&s->tdsp);
 
@@ -1054,7 +1075,7 @@ static void free_picture(AVCodecContext *avctx, H264Picture *pic)
     }
     av_buffer_unref(&pic->mb_type_buf);
 
-    av_frame_unref(&pic->f);
+    av_frame_unref(pic->f);
 }
 
 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
@@ -1090,19 +1111,19 @@ static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
     }
     pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
 
-    ret = ff_get_buffer(avctx, &pic->f,
+    ret = ff_get_buffer(avctx, pic->f,
                         pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
     if (ret < 0)
         goto fail;
 
     if (!sl->edge_emu_buffer) {
-        sl->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
+        sl->edge_emu_buffer = av_mallocz(pic->f->linesize[0] * 17);
         if (!sl->edge_emu_buffer)
             return AVERROR(ENOMEM);
     }
 
-    h->linesize   = pic->f.linesize[0];
-    h->uvlinesize = pic->f.linesize[1];
+    sl->linesize   = pic->f->linesize[0];
+    sl->uvlinesize = pic->f->linesize[1];
 
     return 0;
 fail:
@@ -1122,8 +1143,8 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
 
     /* special case for last picture */
     if (buf_size == 0) {
-        if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
-            ret = av_frame_ref(data, &s->next_pic->f);
+        if (s->next_pic->f->data[0] && !h->low_delay && !s->last_frame_output) {
+            ret = av_frame_ref(data, s->next_pic->f);
             if (ret < 0)
                 return ret;
             s->last_frame_output = 1;
@@ -1144,57 +1165,53 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
     if (h->pict_type != AV_PICTURE_TYPE_B)
         FFSWAP(H264Picture*, s->next_pic, s->last_pic);
 
-    av_frame_unref(&s->cur_pic->f);
+    av_frame_unref(s->cur_pic->f);
 
     /* for skipping the frame */
-    s->cur_pic->f.pict_type = h->pict_type;
-    s->cur_pic->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
+    s->cur_pic->f->pict_type = h->pict_type;
+    s->cur_pic->f->key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
 
     ret = get_buffer(avctx, s->cur_pic);
     if (ret < 0)
         return ret;
 
     h->cur_pic_ptr = s->cur_pic;
-    av_frame_unref(&h->cur_pic.f);
     h->cur_pic     = *s->cur_pic;
-    ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
-    if (ret < 0)
-        return ret;
 
     for (i = 0; i < 16; i++) {
-        h->block_offset[i]           = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
-        h->block_offset[48 + i]      = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
+        h->block_offset[i]           = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * sl->linesize * ((scan8[i] - scan8[0]) >> 3);
+        h->block_offset[48 + i]      = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * sl->linesize * ((scan8[i] - scan8[0]) >> 3);
     }
     for (i = 0; i < 16; i++) {
         h->block_offset[16 + i]      =
-        h->block_offset[32 + i]      = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
+        h->block_offset[32 + i]      = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * sl->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
         h->block_offset[48 + 16 + i] =
-        h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
+        h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * sl->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
     }
 
     if (h->pict_type != AV_PICTURE_TYPE_I) {
-        if (!s->last_pic->f.data[0]) {
+        if (!s->last_pic->f->data[0]) {
             av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
             ret = get_buffer(avctx, s->last_pic);
             if (ret < 0)
                 return ret;
-            memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
-            memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
-                   s->last_pic->f.linesize[1]);
-            memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
-                   s->last_pic->f.linesize[2]);
+            memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
+            memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
+                   s->last_pic->f->linesize[1]);
+            memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
+                   s->last_pic->f->linesize[2]);
         }
 
-        if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
+        if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
             av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
             ret = get_buffer(avctx, s->next_pic);
             if (ret < 0)
                 return ret;
-            memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
-            memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
-                   s->next_pic->f.linesize[1]);
-            memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
-                   s->next_pic->f.linesize[2]);
+            memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
+            memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
+                   s->next_pic->f->linesize[1]);
+            memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
+                   s->next_pic->f->linesize[2]);
         }
     }
 
@@ -1284,27 +1301,27 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
                     (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
         }
 
-        ff_draw_horiz_band(avctx, &s->cur_pic->f,
-                           s->last_pic->f.data[0] ? &s->last_pic->f : NULL,
+        ff_draw_horiz_band(avctx, s->cur_pic->f,
+                           s->last_pic->f->data[0] ? s->last_pic->f : NULL,
                            16 * sl->mb_y, 16, h->picture_structure, 0,
                            h->low_delay);
     }
 
     if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
-        ret = av_frame_ref(data, &s->cur_pic->f);
-    else if (s->last_pic->f.data[0])
-        ret = av_frame_ref(data, &s->last_pic->f);
+        ret = av_frame_ref(data, s->cur_pic->f);
+    else if (s->last_pic->f->data[0])
+        ret = av_frame_ref(data, s->last_pic->f);
     if (ret < 0)
         return ret;
 
     /* Do not output the last pic after seeking. */
-    if (s->last_pic->f.data[0] || h->low_delay)
+    if (s->last_pic->f->data[0] || h->low_delay)
         *got_frame = 1;
 
     if (h->pict_type != AV_PICTURE_TYPE_B) {
         FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
     } else {
-        av_frame_unref(&s->cur_pic->f);
+        av_frame_unref(s->cur_pic->f);
     }
 
     return buf_size;
@@ -1318,11 +1335,14 @@ static av_cold int svq3_decode_end(AVCodecContext *avctx)
     free_picture(avctx, s->cur_pic);
     free_picture(avctx, s->next_pic);
     free_picture(avctx, s->last_pic);
+    av_frame_free(&s->cur_pic->f);
+    av_frame_free(&s->next_pic->f);
+    av_frame_free(&s->last_pic->f);
     av_freep(&s->cur_pic);
     av_freep(&s->next_pic);
     av_freep(&s->last_pic);
 
-    av_frame_unref(&h->cur_pic.f);
+    memset(&h->cur_pic, 0, sizeof(h->cur_pic));
 
     ff_h264_free_context(h);
 
@@ -1338,9 +1358,9 @@ AVCodec ff_svq3_decoder = {
     .init           = svq3_decode_init,
     .close          = svq3_decode_end,
     .decode         = svq3_decode_frame,
-    .capabilities   = CODEC_CAP_DRAW_HORIZ_BAND |
-                      CODEC_CAP_DR1             |
-                      CODEC_CAP_DELAY,
+    .capabilities   = AV_CODEC_CAP_DRAW_HORIZ_BAND |
+                      AV_CODEC_CAP_DR1             |
+                      AV_CODEC_CAP_DELAY,
     .pix_fmts       = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
                                                      AV_PIX_FMT_NONE},
 };