]> git.sesse.net Git - nageru/blobdiff - nageru/mjpeg_encoder.cpp
Use vaCreateImage + vaPutImage instead of vaDeriveImage.
[nageru] / nageru / mjpeg_encoder.cpp
index 9ae018f81059584621bb383b3f4d6b30c01df0f4..01173e0ac1ed723b6740ce4e5340f3447ca3742e 100644 (file)
@@ -28,6 +28,8 @@ extern "C" {
 using namespace bmusb;
 using namespace std;
 
+static VAImageFormat uyvy_format;
+
 extern void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height);
 
 // From libjpeg (although it's of course identical between implementations).
@@ -238,6 +240,7 @@ unique_ptr<VADisplayWithCleanup> MJPEGEncoder::try_open_va(const string &va_disp
                return nullptr;
        }
 
+       // TODO: Unify with the code in Futatabi.
        int num_formats = vaMaxNumImageFormats(va_dpy->va_dpy);
        assert(num_formats > 0);
 
@@ -250,6 +253,19 @@ unique_ptr<VADisplayWithCleanup> MJPEGEncoder::try_open_va(const string &va_disp
                return nullptr;
        }
 
+       bool found = false;
+       for (int i = 0; i < num_formats; ++i) {
+               if (formats[i].fourcc == VA_FOURCC_UYVY) {
+                       memcpy(&uyvy_format, &formats[i], sizeof(VAImageFormat));
+                       found = true;
+                       break;
+               }
+       }
+       if (!found) {
+               if (error != nullptr) *error = "UYVY format not found";
+               return nullptr;
+       }
+
        return va_dpy;
 }
 
@@ -295,12 +311,9 @@ void MJPEGEncoder::finish_frame(RefCountedFrame frame)
        if (userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_VA_API) {
                VAResources resources __attribute__((unused)) = move(userdata->va_resources);
                ReleaseVAResources release = move(userdata->va_resources_release);
-               VAImage image = move(userdata->va_image);
 
-               VAStatus va_status = vaUnmapBuffer(va_dpy->va_dpy, image.buf);
+               VAStatus va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf);
                CHECK_VASTATUS(va_status, "vaUnmapBuffer");
-               va_status = vaDestroyImage(va_dpy->va_dpy, image.image_id);
-               CHECK_VASTATUS(va_status, "vaDestroyImage");
        }
 }
 
@@ -421,6 +434,9 @@ MJPEGEncoder::VAResources MJPEGEncoder::get_va_resources(unsigned width, unsigne
        va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAEncCodedBufferType, width * height * 3 + 8192, 1, nullptr, &ret.data_buffer);
        CHECK_VASTATUS(va_status, "vaCreateBuffer");
 
+       va_status = vaCreateImage(va_dpy->va_dpy, &uyvy_format, width, height, &ret.image);
+       CHECK_VASTATUS(va_status, "vaCreateImage");
+
        return ret;
 }
 
@@ -660,34 +676,36 @@ void MJPEGEncoder::encode_jpeg_va(QueuedFrame &&qf)
        CHECK_VASTATUS(va_status, "vaCreateBuffer");
        VABufferDestroyer destroy_slice_param(va_dpy->va_dpy, slice_param_buffer);
 
-       VAImage image;
        if (userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_VA_API) {
-               // The pixel data is already uploaded by the caller.
-               image = move(userdata->va_image);
+               va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf);
+               CHECK_VASTATUS(va_status, "vaUnmapBuffer");
+               // The pixel data is already put into the image by the caller.
        } else {
                assert(userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_MALLOC);
 
                // Upload the pixel data.
-               va_status = vaDeriveImage(va_dpy->va_dpy, resources.surface, &image);
-               CHECK_VASTATUS(va_status, "vaDeriveImage");
-
                uint8_t *surface_p = nullptr;
-               vaMapBuffer(va_dpy->va_dpy, image.buf, (void **)&surface_p);
+               vaMapBuffer(va_dpy->va_dpy, resources.image.buf, (void **)&surface_p);
 
                size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
                size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
 
                {
                        const uint8_t *src = qf.frame->data_copy + field_start;
-                       uint8_t *dst = (unsigned char *)surface_p + image.offsets[0];
-                       memcpy_with_pitch(dst, src, qf.video_format.width * 2, image.pitches[0], qf.video_format.height);
+                       uint8_t *dst = (unsigned char *)surface_p + resources.image.offsets[0];
+                       memcpy_with_pitch(dst, src, qf.video_format.width * 2, resources.image.pitches[0], qf.video_format.height);
                }
+
+               va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf);
+               CHECK_VASTATUS(va_status, "vaUnmapBuffer");
        }
 
-       va_status = vaUnmapBuffer(va_dpy->va_dpy, image.buf);
-       CHECK_VASTATUS(va_status, "vaUnmapBuffer");
-       va_status = vaDestroyImage(va_dpy->va_dpy, image.image_id);
-       CHECK_VASTATUS(va_status, "vaDestroyImage");
+       // Seemingly vaPutImage() (which triggers a GPU copy) is much nicer to the
+       // CPU than vaDeriveImage() and copying directly into the GPU's buffers.
+       // Exactly why is unclear, but it seems to involve L3 cache usage when there
+       // are many high-res (1080p+) images in play.
+       va_status = vaPutImage(va_dpy->va_dpy, resources.surface, resources.image.image_id, 0, 0, width, height, 0, 0, width, height);
+       CHECK_VASTATUS(va_status, "vaPutImage");
 
        // Finally, stick in the JPEG header.
        VAEncPackedHeaderParameterBuffer header_parm;