]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/vdpauvideo.c
cosmetics: whitespaces, empty lines
[ffmpeg] / libavcodec / vdpauvideo.c
index ab4a04868efef6179909041c419ee602f5ecec01..8eb806a1a03eab2056a9dac1d0e7dc98c207be92 100644 (file)
@@ -2,7 +2,7 @@
  * Video Decode and Presentation API for UNIX (VDPAU) is used for
  * HW decode acceleration for MPEG-1/2, H.264 and VC-1.
  *
- * Copyright (c) 2008 NVIDIA.
+ * Copyright (c) 2008 NVIDIA
  *
  * This file is part of FFmpeg.
  *
 #include <limits.h>
 #include "avcodec.h"
 #include "h264.h"
+#include "vc1.h"
 
 #undef NDEBUG
 #include <assert.h>
 
-#include "vdpau_render.h"
+#include "vdpau.h"
 #include "vdpau_internal.h"
 
 /**
@@ -37,9 +38,9 @@
  * @{
  */
 
-static void VDPAU_h264_set_reference_frames(H264Context *h)
+void ff_vdpau_h264_set_reference_frames(MpegEncContext *s)
 {
-    MpegEncContext * s = &h->s;
+    H264Context *h = s->avctx->priv_data;
     struct vdpau_render_state * render, * render_ref;
     VdpReferenceFrameH264 * rf, * rf2;
     Picture * pic;
@@ -106,32 +107,29 @@ static void VDPAU_h264_set_reference_frames(H264Context *h)
     }
 }
 
-void ff_VDPAU_h264_add_data_chunk(H264Context *h, const uint8_t *buf, int buf_size)
+void ff_vdpau_add_data_chunk(MpegEncContext *s,
+                             const uint8_t *buf, int buf_size)
 {
-    MpegEncContext * s = &h->s;
     struct vdpau_render_state * render;
 
     render = (struct vdpau_render_state*)s->current_picture_ptr->data[0];
     assert(render);
 
-    if (!render->bitstreamBuffersUsed)
-        VDPAU_h264_set_reference_frames(h);
-
-    render->bitstreamBuffers= av_fast_realloc(
-        render->bitstreamBuffers,
-        &render->bitstreamBuffersAllocated,
-        sizeof(*render->bitstreamBuffers)*(render->bitstreamBuffersUsed + 1)
+    render->bitstream_buffers= av_fast_realloc(
+        render->bitstream_buffers,
+        &render->bitstream_buffers_allocated,
+        sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1)
     );
 
-    render->bitstreamBuffers[render->bitstreamBuffersUsed].struct_version  = VDP_BITSTREAM_BUFFER_VERSION;
-    render->bitstreamBuffers[render->bitstreamBuffersUsed].bitstream       = buf;
-    render->bitstreamBuffers[render->bitstreamBuffersUsed].bitstream_bytes = buf_size;
-    render->bitstreamBuffersUsed++;
+    render->bitstream_buffers[render->bitstream_buffers_used].struct_version  = VDP_BITSTREAM_BUFFER_VERSION;
+    render->bitstream_buffers[render->bitstream_buffers_used].bitstream       = buf;
+    render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size;
+    render->bitstream_buffers_used++;
 }
 
-void ff_VDPAU_h264_picture_complete(H264Context *h)
+void ff_vdpau_h264_picture_complete(MpegEncContext *s)
 {
-    MpegEncContext * s = &h->s;
+    H264Context *h = s->avctx->priv_data;
     struct vdpau_render_state * render;
 
     render = (struct vdpau_render_state*)s->current_picture_ptr->data[0];
@@ -177,7 +175,131 @@ void ff_VDPAU_h264_picture_complete(H264Context *h)
     memcpy(render->info.h264.scaling_lists_8x8, h->pps.scaling_matrix8, sizeof(render->info.h264.scaling_lists_8x8));
 
     ff_draw_horiz_band(s, 0, s->avctx->height);
-    render->bitstreamBuffersUsed = 0;
+    render->bitstream_buffers_used = 0;
+}
+
+void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
+                                    int buf_size, int slice_count)
+{
+    struct vdpau_render_state * render, * last, * next;
+    int i;
+
+    render = (struct vdpau_render_state*)s->current_picture_ptr->data[0];
+    assert(render);
+
+    /* fill VdpPictureInfoMPEG1Or2 struct */
+    render->info.mpeg.picture_structure          = s->picture_structure;
+    render->info.mpeg.picture_coding_type        = s->pict_type;
+    render->info.mpeg.intra_dc_precision         = s->intra_dc_precision;
+    render->info.mpeg.frame_pred_frame_dct       = s->frame_pred_frame_dct;
+    render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors;
+    render->info.mpeg.intra_vlc_format           = s->intra_vlc_format;
+    render->info.mpeg.alternate_scan             = s->alternate_scan;
+    render->info.mpeg.q_scale_type               = s->q_scale_type;
+    render->info.mpeg.top_field_first            = s->top_field_first;
+    render->info.mpeg.full_pel_forward_vector    = s->full_pel[0]; // MPEG-1 only.  Set 0 for MPEG-2
+    render->info.mpeg.full_pel_backward_vector   = s->full_pel[1]; // MPEG-1 only.  Set 0 for MPEG-2
+    render->info.mpeg.f_code[0][0]               = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert.
+    render->info.mpeg.f_code[0][1]               = s->mpeg_f_code[0][1];
+    render->info.mpeg.f_code[1][0]               = s->mpeg_f_code[1][0];
+    render->info.mpeg.f_code[1][1]               = s->mpeg_f_code[1][1];
+    for (i = 0; i < 64; ++i) {
+        render->info.mpeg.intra_quantizer_matrix[i]     = s->intra_matrix[i];
+        render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
+    }
+
+    render->info.mpeg.forward_reference          = VDP_INVALID_HANDLE;
+    render->info.mpeg.backward_reference         = VDP_INVALID_HANDLE;
+
+    switch(s->pict_type){
+    case  FF_B_TYPE:
+        next = (struct vdpau_render_state*)s->next_picture.data[0];
+        assert(next);
+        render->info.mpeg.backward_reference     = next->surface;
+        // no return here, going to set forward prediction
+    case  FF_P_TYPE:
+        last = (struct vdpau_render_state*)s->last_picture.data[0];
+        if (!last) // FIXME: Does this test make sense?
+            last = render; // predict second field from the first
+        render->info.mpeg.forward_reference      = last->surface;
+    }
+
+    ff_vdpau_add_data_chunk(s, buf, buf_size);
+
+    render->info.mpeg.slice_count                = slice_count;
+
+    if (slice_count)
+        ff_draw_horiz_band(s, 0, s->avctx->height);
+    render->bitstream_buffers_used               = 0;
+}
+
+void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf,
+                                 int buf_size)
+{
+    VC1Context *v = s->avctx->priv_data;
+    struct vdpau_render_state * render, * last, * next;
+
+    render = (struct vdpau_render_state*)s->current_picture.data[0];
+    assert(render);
+
+    /*  fill LvPictureInfoVC1 struct */
+    render->info.vc1.frame_coding_mode  = v->fcm;
+    render->info.vc1.postprocflag       = v->postprocflag;
+    render->info.vc1.pulldown           = v->broadcast;
+    render->info.vc1.interlace          = v->interlace;
+    render->info.vc1.tfcntrflag         = v->tfcntrflag;
+    render->info.vc1.finterpflag        = v->finterpflag;
+    render->info.vc1.psf                = v->psf;
+    render->info.vc1.dquant             = v->dquant;
+    render->info.vc1.panscan_flag       = v->panscanflag;
+    render->info.vc1.refdist_flag       = v->refdist_flag;
+    render->info.vc1.quantizer          = v->quantizer_mode;
+    render->info.vc1.extended_mv        = v->extended_mv;
+    render->info.vc1.extended_dmv       = v->extended_dmv;
+    render->info.vc1.overlap            = v->overlap;
+    render->info.vc1.vstransform        = v->vstransform;
+    render->info.vc1.loopfilter         = v->s.loop_filter;
+    render->info.vc1.fastuvmc           = v->fastuvmc;
+    render->info.vc1.range_mapy_flag    = v->range_mapy_flag;
+    render->info.vc1.range_mapy         = v->range_mapy;
+    render->info.vc1.range_mapuv_flag   = v->range_mapuv_flag;
+    render->info.vc1.range_mapuv        = v->range_mapuv;
+    /* Specific to simple/main profile only */
+    render->info.vc1.multires           = v->multires;
+    render->info.vc1.syncmarker         = v->s.resync_marker;
+    render->info.vc1.rangered           = v->rangered;
+    render->info.vc1.maxbframes         = v->s.max_b_frames;
+
+    render->info.vc1.deblockEnable      = v->postprocflag & 1;
+    render->info.vc1.pquant             = v->pq;
+
+    render->info.vc1.forward_reference  = VDP_INVALID_HANDLE;
+    render->info.vc1.backward_reference = VDP_INVALID_HANDLE;
+
+    if (v->bi_type)
+        render->info.vc1.picture_type = 4;
+    else
+        render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3;
+
+    switch(s->pict_type){
+    case  FF_B_TYPE:
+        next = (struct vdpau_render_state*)s->next_picture.data[0];
+        assert(next);
+        render->info.vc1.backward_reference = next->surface;
+        // no break here, going to set forward prediction
+    case  FF_P_TYPE:
+        last = (struct vdpau_render_state*)s->last_picture.data[0];
+        if (!last) // FIXME: Does this test make sense?
+            last = render; // predict second field from the first
+        render->info.vc1.forward_reference = last->surface;
+    }
+
+    ff_vdpau_add_data_chunk(s, buf, buf_size);
+
+    render->info.vc1.slice_count          = 1;
+
+    ff_draw_horiz_band(s, 0, s->avctx->height);
+    render->bitstream_buffers_used        = 0;
 }
 
 /* @}*/