]> git.sesse.net Git - nageru/commitdiff
In the MJPEG export, include white balance information.
authorSteinar H. Gunderson <sgunderson@bigfoot.com>
Mon, 10 Feb 2020 17:52:49 +0000 (18:52 +0100)
committerSteinar H. Gunderson <sgunderson@bigfoot.com>
Tue, 11 Feb 2020 17:16:24 +0000 (18:16 +0100)
We do this by converting it to an x-y white point and storing it in Exif,
which is the closest thing I could find to a standard in this space.

nageru/mixer.cpp
nageru/mjpeg_encoder.cpp
nageru/mjpeg_encoder.h

index b0bb58494165d0920a0d0f11bcf4ebc6f65a706c..1457aa92316e56400563feeea8a0185b5c76fb4e 100644 (file)
@@ -1118,7 +1118,8 @@ void Mixer::thread_func()
                        if (new_frame->frame->data_copy != nullptr) {
                                int mjpeg_card_index = mjpeg_encoder->get_mjpeg_stream_for_card(card_index);
                                if (mjpeg_card_index != -1) {
-                                       mjpeg_encoder->upload_frame(pts_int, mjpeg_card_index, new_frame->frame, new_frame->video_format, new_frame->y_offset, new_frame->cbcr_offset, move(raw_audio[card_index]));
+                                       RGBTriplet white_balance = theme->get_white_balance_for_signal(card_index);
+                                       mjpeg_encoder->upload_frame(pts_int, mjpeg_card_index, new_frame->frame, new_frame->video_format, new_frame->y_offset, new_frame->cbcr_offset, move(raw_audio[card_index]), white_balance);
                                }
                        }
                }
index e39e802c843cbef0bcd6f130db17702d722c2ce3..39a5959ed3bfeeedbd6b5715ba93b180239f1457 100644 (file)
@@ -21,11 +21,15 @@ extern "C" {
 #include "shared/timebase.h"
 #include "va_display_with_cleanup.h"
 
+#include <movit/colorspace_conversion_effect.h>
+
 #include <va/va.h>
 #include <va/va_drm.h>
 #include <va/va_x11.h>
 
+using namespace Eigen;
 using namespace bmusb;
+using namespace movit;
 using namespace std;
 
 static VAImageFormat uyvy_format;
@@ -286,7 +290,7 @@ unique_ptr<VADisplayWithCleanup> MJPEGEncoder::try_open_va(const string &va_disp
        return va_dpy;
 }
 
-void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset, vector<int32_t> audio)
+void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset, vector<int32_t> audio, const RGBTriplet &white_balance)
 {
        PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata;
        if (video_format.width == 0 || video_format.height == 0) {
@@ -317,7 +321,7 @@ void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFram
                return;
        }
        ++metric_mjpeg_overrun_submitted;
-       frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset, move(audio) });
+       frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset, move(audio), white_balance });
        any_frames_to_be_encoded.notify_all();
 }
 
@@ -495,7 +499,25 @@ void MJPEGEncoder::release_va_resources(MJPEGEncoder::VAResources resources)
        va_resources_freelist.push_front(resources);
 }
 
-void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, VectorDestinationManager *dest, jpeg_compress_struct *cinfo)
+namespace {
+
+void push16(uint16_t val, string *str)
+{
+       str->push_back(val >> 8);
+       str->push_back(val & 0xff);
+}
+
+void push32(uint32_t val, string *str)
+{
+       str->push_back(val >> 24);
+       str->push_back((val >> 16) & 0xff);
+       str->push_back((val >> 8) & 0xff);
+       str->push_back(val & 0xff);
+}
+
+}  // namespace
+
+void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, const RGBTriplet &white_balance, VectorDestinationManager *dest, jpeg_compress_struct *cinfo)
 {
        jpeg_error_mgr jerr;
        cinfo->err = jpeg_std_error(&jerr);
@@ -520,15 +542,70 @@ void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, VectorDestinat
        cinfo->CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
        jpeg_start_compress(cinfo, true);
 
+       if (fabs(white_balance.r - 1.0f) > 1e-3 ||
+           fabs(white_balance.g - 1.0f) > 1e-3 ||
+           fabs(white_balance.b - 1.0f) > 1e-3) {
+               // Convert from (linear) RGB to XYZ.
+               Matrix3d rgb_to_xyz_matrix = movit::ColorspaceConversionEffect::get_xyz_matrix(COLORSPACE_sRGB);
+               Vector3d xyz = rgb_to_xyz_matrix * Vector3d(white_balance.r, white_balance.g, white_balance.b);
+
+               // Convert from XYZ to xyz by normalizing.
+               xyz /= (xyz[0] + xyz[1] + xyz[2]);
+
+               // Create a very rudimentary EXIF header to hold our white point.
+               string exif;
+
+               // Exif header, followed by some padding.
+               exif = "Exif";
+               push16(0, &exif);
+
+               // TIFF header first:
+               exif += "MM";  // Big endian.
+
+               // Magic number.
+               push16(42, &exif);
+
+               // Offset of first IFD (relative to the MM, immediately after the header).
+               push32(exif.size() - 6 + 4, &exif);
+
+               // Now the actual IFD.
+
+               // One entry.
+               push16(1, &exif);
+
+               // WhitePoint tag ID.
+               push16(0x13e, &exif);
+
+               // Rational type.
+               push16(5, &exif);
+
+               // Two values (x and y; z is implicit due to normalization).
+               push32(2, &exif);
+
+               // Offset (relative to the MM, immediately after the last IFD).
+               push32(exif.size() - 6 + 8, &exif);
+
+               // No more IFDs.
+               push32(0, &exif);
+
+               // The actual values.
+               push32(lrintf(xyz[0] * 10000.0f), &exif);
+               push32(10000, &exif);
+               push32(lrintf(xyz[1] * 10000.0f), &exif);
+               push32(10000, &exif);
+
+               jpeg_write_marker(cinfo, JPEG_APP0 + 1, (const JOCTET *)exif.data(), exif.size());
+       }
+
        // This comment marker is private to FFmpeg. It signals limited Y'CbCr range
        // (and nothing else).
        jpeg_write_marker(cinfo, JPEG_COM, (const JOCTET *)"CS=ITU601", strlen("CS=ITU601"));
 }
 
-vector<uint8_t> MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, jpeg_compress_struct *cinfo)
+vector<uint8_t> MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, const RGBTriplet &white_balance, jpeg_compress_struct *cinfo)
 {
        VectorDestinationManager dest;
-       init_jpeg_422(width, height, &dest, cinfo);
+       init_jpeg_422(width, height, white_balance, &dest, cinfo);
 
        // Make a dummy black image; there's seemingly no other easy way of
        // making libjpeg outputting all of its headers.
@@ -560,7 +637,7 @@ vector<uint8_t> MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, j
        return dest.dest;
 }
 
-MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_resolution(unsigned width, unsigned height)
+MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_resolution(unsigned width, unsigned height, const RGBTriplet &white_balance)
 {
        pair<unsigned, unsigned> key(width, height);
        if (va_data_for_resolution.count(key)) {
@@ -570,7 +647,7 @@ MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_resolution(unsigned width, un
        // Use libjpeg to generate a header and set sane defaults for e.g.
        // quantization tables. Then do the actual encode with VA-API.
        jpeg_compress_struct cinfo;
-       vector<uint8_t> jpeg_header = get_jpeg_header(width, height, &cinfo);
+       vector<uint8_t> jpeg_header = get_jpeg_header(width, height, white_balance, &cinfo);
 
        // Picture parameters.
        VAEncPictureParameterBufferJPEG pic_param;
@@ -686,7 +763,7 @@ void MJPEGEncoder::encode_jpeg_va(QueuedFrame &&qf)
                release = ReleaseVAResources(this, resources);
        }
 
-       VAData va_data = get_va_data_for_resolution(width, height);
+       VAData va_data = get_va_data_for_resolution(width, height, qf.white_balance);
        va_data.pic_param.coded_buf = resources.data_buffer;
 
        VABufferID pic_param_buffer;
@@ -822,7 +899,7 @@ vector<uint8_t> MJPEGEncoder::encode_jpeg_libjpeg(const QueuedFrame &qf)
 
        VectorDestinationManager dest;
        jpeg_compress_struct cinfo;
-       init_jpeg_422(width, height, &dest, &cinfo);
+       init_jpeg_422(width, height, qf.white_balance, &dest, &cinfo);
 
        size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
        size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
index fe41929b0b128c13833b9761ab98766103e48e3e..8a0d8fadb6b198fc8e6e2104eb8da41da2e17ce1 100644 (file)
@@ -39,7 +39,7 @@ public:
        MJPEGEncoder(HTTPD *httpd, const std::string &va_display);
        ~MJPEGEncoder();
        void stop();
-       void upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset, std::vector<int32_t> audio);
+       void upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset, std::vector<int32_t> audio, const movit::RGBTriplet &white_balance);
        bool using_vaapi() const { return va_dpy != nullptr; }
 
        // Returns -1 for inactive (ie., don't encode frames for this card right now).
@@ -106,6 +106,7 @@ private:
                bmusb::VideoFormat video_format;
                size_t y_offset, cbcr_offset;
                std::vector<int32_t> audio;
+               movit::RGBTriplet white_balance;
 
                // Only for frames in the process of being encoded by VA-API.
                VAResources resources;
@@ -118,8 +119,8 @@ private:
        std::vector<uint8_t> encode_jpeg_libjpeg(const QueuedFrame &qf);
        void write_mjpeg_packet(int64_t pts, unsigned card_index, const uint8_t *jpeg, size_t jpeg_size);
        void write_audio_packet(int64_t pts, unsigned card_index, const std::vector<int32_t> &audio);
-       void init_jpeg_422(unsigned width, unsigned height, VectorDestinationManager *dest, jpeg_compress_struct *cinfo);
-       std::vector<uint8_t> get_jpeg_header(unsigned width, unsigned height, jpeg_compress_struct *cinfo);
+       void init_jpeg_422(unsigned width, unsigned height, const movit::RGBTriplet &white_balance, VectorDestinationManager *dest, jpeg_compress_struct *cinfo);
+       std::vector<uint8_t> get_jpeg_header(unsigned width, unsigned height, const movit::RGBTriplet &white_balance, jpeg_compress_struct *cinfo);
 
        static int write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time);
        int write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time);
@@ -150,7 +151,7 @@ private:
                VAEncSliceParameterBufferJPEG parms;
        };
        std::map<std::pair<unsigned, unsigned>, VAData> va_data_for_resolution;
-       VAData get_va_data_for_resolution(unsigned width, unsigned height);
+       VAData get_va_data_for_resolution(unsigned width, unsigned height, const movit::RGBTriplet &white_balance);
 
        std::list<VAResources> va_resources_freelist;
        std::mutex va_resources_mutex;