#include "mjpeg_encoder.h"
+#include <assert.h>
#include <jpeglib.h>
#include <unistd.h>
#if __SSE2__
#include "shared/timebase.h"
#include "va_display_with_cleanup.h"
+#include <movit/colorspace_conversion_effect.h>
+
#include <va/va.h>
#include <va/va_drm.h>
#include <va/va_x11.h>
+using namespace Eigen;
using namespace bmusb;
+using namespace movit;
using namespace std;
+static VAImageFormat uyvy_format;
+
extern void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height);
// From libjpeg (although it's of course identical between implementations).
int MJPEGEncoder::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
{
- MJPEGEncoder *engine = (MJPEGEncoder *)opaque;
- return engine->write_packet2(buf, buf_size, type, time);
+ WritePacket2Context *ctx = (WritePacket2Context *)opaque;
+ return ctx->mjpeg_encoder->write_packet2(ctx->stream_id, buf, buf_size, type, time);
}
-int MJPEGEncoder::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
+int MJPEGEncoder::write_packet2(HTTPD::StreamID stream_id, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
{
+ string *mux_header = &streams[stream_id].mux_header;
if (type == AVIO_DATA_MARKER_HEADER) {
- mux_header.append((char *)buf, buf_size);
- httpd->set_header(HTTPD::MULTICAM_STREAM, mux_header);
+ mux_header->append((char *)buf, buf_size);
+ httpd->set_header(stream_id, *mux_header);
} else {
- httpd->add_data(HTTPD::MULTICAM_STREAM, (char *)buf, buf_size, /*keyframe=*/true, AV_NOPTS_VALUE, AVRational{ AV_TIME_BASE, 1 });
+ httpd->add_data(stream_id, (char *)buf, buf_size, /*keyframe=*/true, AV_NOPTS_VALUE, AVRational{ AV_TIME_BASE, 1 });
}
return buf_size;
}
-MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display)
- : httpd(httpd)
-{
- // Set up the mux. We don't use the Mux wrapper, because it's geared towards
- // a situation with only one video stream (and possibly one audio stream)
- // with known width/height, and we don't need the extra functionality it provides.
- avctx.reset(avformat_alloc_context());
- avctx->oformat = av_guess_format("mp4", nullptr, nullptr);
+namespace {
- uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
- avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
- avctx->pb->write_data_type = &MJPEGEncoder::write_packet2_thunk;
- avctx->flags = AVFMT_FLAG_CUSTOM_IO;
-
- for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) {
- AVStream *stream = avformat_new_stream(avctx.get(), nullptr);
- if (stream == nullptr) {
- fprintf(stderr, "avformat_new_stream() failed\n");
- exit(1);
- }
- stream->time_base = AVRational{ 1, TIMEBASE };
- stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
- stream->codecpar->codec_id = AV_CODEC_ID_MJPEG;
+void add_video_stream(AVFormatContext *avctx)
+{
+ AVStream *stream = avformat_new_stream(avctx, nullptr);
+ if (stream == nullptr) {
+ fprintf(stderr, "avformat_new_stream() failed\n");
+ abort();
+ }
- // Used for aspect ratio only. Can change without notice (the mux won't care).
- stream->codecpar->width = global_flags.width;
- stream->codecpar->height = global_flags.height;
+ // FFmpeg is very picky about having audio at 1/48000 timebase,
+ // no matter what we write. Even though we'd prefer our usual 1/120000,
+ // put the video on the same one, so that we can have locked audio.
+ stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
+ stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
+ stream->codecpar->codec_id = AV_CODEC_ID_MJPEG;
+
+ // Used for aspect ratio only. Can change without notice (the mux won't care).
+ stream->codecpar->width = global_flags.width;
+ stream->codecpar->height = global_flags.height;
+
+ // TODO: We could perhaps use the interpretation for each card here
+ // (or at least the command-line flags) instead of the defaults,
+ // but what would we do when they change?
+ stream->codecpar->color_primaries = AVCOL_PRI_BT709;
+ stream->codecpar->color_trc = AVCOL_TRC_IEC61966_2_1;
+ stream->codecpar->color_space = AVCOL_SPC_BT709;
+ stream->codecpar->color_range = AVCOL_RANGE_MPEG;
+ stream->codecpar->chroma_location = AVCHROMA_LOC_LEFT;
+ stream->codecpar->field_order = AV_FIELD_PROGRESSIVE;
+}
- // TODO: We could perhaps use the interpretation for each card here
- // (or at least the command-line flags) instead of the defaults,
- // but what would we do when they change?
- stream->codecpar->color_primaries = AVCOL_PRI_BT709;
- stream->codecpar->color_trc = AVCOL_TRC_IEC61966_2_1;
- stream->codecpar->color_space = AVCOL_SPC_BT709;
- stream->codecpar->color_range = AVCOL_RANGE_MPEG;
- stream->codecpar->chroma_location = AVCHROMA_LOC_LEFT;
- stream->codecpar->field_order = AV_FIELD_PROGRESSIVE;
+void add_audio_stream(AVFormatContext *avctx)
+{
+ AVStream *stream = avformat_new_stream(avctx, nullptr);
+ if (stream == nullptr) {
+ fprintf(stderr, "avformat_new_stream() failed\n");
+ abort();
}
+ stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
+ stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
+ stream->codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
+ stream->codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
+ stream->codecpar->channels = 2;
+ stream->codecpar->sample_rate = OUTPUT_FREQUENCY;
+}
+void finalize_mux(AVFormatContext *avctx)
+{
AVDictionary *options = NULL;
vector<pair<string, string>> opts = MUX_OPTS;
for (pair<string, string> opt : opts) {
av_dict_set(&options, opt.first.c_str(), opt.second.c_str(), 0);
}
- if (avformat_write_header(avctx.get(), &options) < 0) {
+ if (avformat_write_header(avctx, &options) < 0) {
fprintf(stderr, "avformat_write_header() failed\n");
- exit(1);
+ abort();
+ }
+}
+
+} // namespace
+
+MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display)
+ : httpd(httpd)
+{
+ create_ffmpeg_context(HTTPD::StreamID{ HTTPD::MULTICAM_STREAM, 0 });
+ for (unsigned stream_idx = 0; stream_idx < MAX_VIDEO_CARDS; ++stream_idx) {
+ create_ffmpeg_context(HTTPD::StreamID{ HTTPD::SIPHON_STREAM, stream_idx });
}
+ add_stream(HTTPD::StreamID{ HTTPD::MULTICAM_STREAM, 0 });
+
// Initialize VA-API.
string error;
va_dpy = try_open_va(va_display, &error, &config_id);
MJPEGEncoder::~MJPEGEncoder()
{
- av_free(avctx->pb->buffer);
+ for (auto &id_and_stream : streams) {
+ av_free(id_and_stream.second.avctx->pb->buffer);
+ }
global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "zero_size" }});
global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "interlaced" }});
return nullptr;
}
+ // TODO: Unify with the code in Futatabi.
int num_formats = vaMaxNumImageFormats(va_dpy->va_dpy);
assert(num_formats > 0);
return nullptr;
}
+ bool found = false;
+ for (int i = 0; i < num_formats; ++i) {
+ if (formats[i].fourcc == VA_FOURCC_UYVY) {
+ memcpy(&uyvy_format, &formats[i], sizeof(VAImageFormat));
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ if (error != nullptr) *error = "UYVY format not found";
+ return nullptr;
+ }
+
return va_dpy;
}
-void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset)
+void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset, vector<int32_t> audio, const RGBTriplet &white_balance)
{
PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata;
if (video_format.width == 0 || video_format.height == 0) {
return;
}
++metric_mjpeg_overrun_submitted;
- frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset });
+ frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset, move(audio), white_balance });
any_frames_to_be_encoded.notify_all();
}
-void MJPEGEncoder::finish_frame(RefCountedFrame frame)
-{
- PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata;
-
- if (userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_VA_API) {
- VAResources resources __attribute__((unused)) = move(userdata->va_resources);
- ReleaseVAResources release = move(userdata->va_resources_release);
- VAImage image = move(userdata->va_image);
-
- VAStatus va_status = vaUnmapBuffer(va_dpy->va_dpy, image.buf);
- CHECK_VASTATUS(va_status, "vaUnmapBuffer");
- va_status = vaDestroyImage(va_dpy->va_dpy, image.image_id);
- CHECK_VASTATUS(va_status, "vaDestroyImage");
- }
-}
-
-int MJPEGEncoder::get_mjpeg_stream_for_card(unsigned card_index)
+bool MJPEGEncoder::should_encode_mjpeg_for_card(unsigned card_index)
{
// Only bother doing MJPEG encoding if there are any connected clients
// that want the stream.
- if (httpd->get_num_connected_multicam_clients() == 0) {
- return -1;
+ if (httpd->get_num_connected_multicam_clients() == 0 &&
+ httpd->get_num_connected_siphon_clients(card_index) == 0) {
+ return false;
}
auto it = global_flags.card_to_mjpeg_stream_export.find(card_index);
- if (it == global_flags.card_to_mjpeg_stream_export.end()) {
- return -1;
- }
- return it->second;
+ return (it != global_flags.card_to_mjpeg_stream_export.end());
}
void MJPEGEncoder::encoder_thread_func()
frames_to_be_encoded.pop();
}
+ assert(global_flags.card_to_mjpeg_stream_export.count(qf.card_index)); // Or should_encode_mjpeg_for_card() would have returned false.
+ int stream_index = global_flags.card_to_mjpeg_stream_export[qf.card_index];
+
if (va_dpy != nullptr) {
// Will call back in the receiver thread.
encode_jpeg_va(move(qf));
} else {
+ update_siphon_streams();
+
+ HTTPD::StreamID multicam_id{ HTTPD::MULTICAM_STREAM, 0 };
+ HTTPD::StreamID siphon_id{ HTTPD::SIPHON_STREAM, qf.card_index };
+ assert(streams.count(multicam_id));
+
+ // Write audio before video, since Futatabi expects it.
+ if (qf.audio.size() > 0) {
+ write_audio_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index + global_flags.card_to_mjpeg_stream_export.size(), qf.audio);
+ if (streams.count(siphon_id)) {
+ write_audio_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/1, qf.audio);
+ }
+ }
+
// Encode synchronously, in the same thread.
vector<uint8_t> jpeg = encode_jpeg_libjpeg(qf);
- write_mjpeg_packet(qf.pts, qf.card_index, jpeg.data(), jpeg.size());
+ write_mjpeg_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index, jpeg.data(), jpeg.size());
+ if (streams.count(siphon_id)) {
+ write_mjpeg_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/0, jpeg.data(), jpeg.size());
+ }
}
}
free(tmp_cr);
}
-void MJPEGEncoder::write_mjpeg_packet(int64_t pts, unsigned card_index, const uint8_t *jpeg, size_t jpeg_size)
+void MJPEGEncoder::write_mjpeg_packet(AVFormatContext *avctx, int64_t pts, unsigned stream_index, const uint8_t *jpeg, size_t jpeg_size)
{
AVPacket pkt;
memset(&pkt, 0, sizeof(pkt));
pkt.buf = nullptr;
pkt.data = const_cast<uint8_t *>(jpeg);
pkt.size = jpeg_size;
- pkt.stream_index = card_index;
+ pkt.stream_index = stream_index;
pkt.flags = AV_PKT_FLAG_KEY;
AVRational time_base = avctx->streams[pkt.stream_index]->time_base;
pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base);
+ pkt.duration = 0;
- if (av_write_frame(avctx.get(), &pkt) < 0) {
+ if (av_write_frame(avctx, &pkt) < 0) {
fprintf(stderr, "av_write_frame() failed\n");
- exit(1);
+ abort();
+ }
+}
+
+void MJPEGEncoder::write_audio_packet(AVFormatContext *avctx, int64_t pts, unsigned stream_index, const vector<int32_t> &audio)
+{
+ AVPacket pkt;
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.buf = nullptr;
+ pkt.data = reinterpret_cast<uint8_t *>(const_cast<int32_t *>(&audio[0]));
+ pkt.size = audio.size() * sizeof(audio[0]);
+ pkt.stream_index = stream_index;
+ pkt.flags = AV_PKT_FLAG_KEY;
+ AVRational time_base = avctx->streams[pkt.stream_index]->time_base;
+ pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base);
+ size_t num_stereo_samples = audio.size() / 2;
+ pkt.duration = av_rescale_q(num_stereo_samples, AVRational{ 1, OUTPUT_FREQUENCY }, time_base);
+
+ if (av_write_frame(avctx, &pkt) < 0) {
+ fprintf(stderr, "av_write_frame() failed\n");
+ abort();
}
}
va_status = vaCreateBuffer(va_dpy->va_dpy, config_id, VAEncCodedBufferType, width * height * 3 + 8192, 1, nullptr, &ret.data_buffer);
CHECK_VASTATUS(va_status, "vaCreateBuffer");
+ va_status = vaCreateImage(va_dpy->va_dpy, &uyvy_format, width, height, &ret.image);
+ CHECK_VASTATUS(va_status, "vaCreateImage");
+
return ret;
}
void MJPEGEncoder::release_va_resources(MJPEGEncoder::VAResources resources)
{
lock_guard<mutex> lock(va_resources_mutex);
- if (va_resources_freelist.size() > 10) {
+ if (va_resources_freelist.size() > 50) {
auto it = va_resources_freelist.end();
--it;
va_status = vaDestroySurfaces(va_dpy->va_dpy, &it->surface, 1);
CHECK_VASTATUS(va_status, "vaDestroySurfaces");
+ va_status = vaDestroyImage(va_dpy->va_dpy, it->image.image_id);
+ CHECK_VASTATUS(va_status, "vaDestroyImage");
+
va_resources_freelist.erase(it);
}
va_resources_freelist.push_front(resources);
}
-void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, VectorDestinationManager *dest, jpeg_compress_struct *cinfo)
+namespace {
+
+void push16(uint16_t val, string *str)
+{
+ str->push_back(val >> 8);
+ str->push_back(val & 0xff);
+}
+
+void push32(uint32_t val, string *str)
+{
+ str->push_back(val >> 24);
+ str->push_back((val >> 16) & 0xff);
+ str->push_back((val >> 8) & 0xff);
+ str->push_back(val & 0xff);
+}
+
+} // namespace
+
+void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, const RGBTriplet &white_balance, VectorDestinationManager *dest, jpeg_compress_struct *cinfo)
{
jpeg_error_mgr jerr;
cinfo->err = jpeg_std_error(&jerr);
cinfo->CCIR601_sampling = true; // Seems to be mostly ignored by libjpeg, though.
jpeg_start_compress(cinfo, true);
+ if (fabs(white_balance.r - 1.0f) > 1e-3 ||
+ fabs(white_balance.g - 1.0f) > 1e-3 ||
+ fabs(white_balance.b - 1.0f) > 1e-3) {
+ // Convert from (linear) RGB to XYZ.
+ Matrix3d rgb_to_xyz_matrix = movit::ColorspaceConversionEffect::get_xyz_matrix(COLORSPACE_sRGB);
+ Vector3d xyz = rgb_to_xyz_matrix * Vector3d(white_balance.r, white_balance.g, white_balance.b);
+
+ // Convert from XYZ to xyz by normalizing.
+ xyz /= (xyz[0] + xyz[1] + xyz[2]);
+
+ // Create a very rudimentary EXIF header to hold our white point.
+ string exif;
+
+ // Exif header, followed by some padding.
+ exif = "Exif";
+ push16(0, &exif);
+
+ // TIFF header first:
+ exif += "MM"; // Big endian.
+
+ // Magic number.
+ push16(42, &exif);
+
+ // Offset of first IFD (relative to the MM, immediately after the header).
+ push32(exif.size() - 6 + 4, &exif);
+
+ // Now the actual IFD.
+
+ // One entry.
+ push16(1, &exif);
+
+ // WhitePoint tag ID.
+ push16(0x13e, &exif);
+
+ // Rational type.
+ push16(5, &exif);
+
+ // Two values (x and y; z is implicit due to normalization).
+ push32(2, &exif);
+
+ // Offset (relative to the MM, immediately after the last IFD).
+ push32(exif.size() - 6 + 8, &exif);
+
+ // No more IFDs.
+ push32(0, &exif);
+
+ // The actual values.
+ push32(lrintf(xyz[0] * 10000.0f), &exif);
+ push32(10000, &exif);
+ push32(lrintf(xyz[1] * 10000.0f), &exif);
+ push32(10000, &exif);
+
+ jpeg_write_marker(cinfo, JPEG_APP0 + 1, (const JOCTET *)exif.data(), exif.size());
+ }
+
// This comment marker is private to FFmpeg. It signals limited Y'CbCr range
// (and nothing else).
jpeg_write_marker(cinfo, JPEG_COM, (const JOCTET *)"CS=ITU601", strlen("CS=ITU601"));
}
-vector<uint8_t> MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, jpeg_compress_struct *cinfo)
+vector<uint8_t> MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, const RGBTriplet &white_balance, jpeg_compress_struct *cinfo)
{
VectorDestinationManager dest;
- init_jpeg_422(width, height, &dest, cinfo);
+ init_jpeg_422(width, height, white_balance, &dest, cinfo);
// Make a dummy black image; there's seemingly no other easy way of
// making libjpeg outputting all of its headers.
return dest.dest;
}
-MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_resolution(unsigned width, unsigned height)
+MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_parameters(unsigned width, unsigned height, const RGBTriplet &white_balance)
{
- pair<unsigned, unsigned> key(width, height);
- if (va_data_for_resolution.count(key)) {
- return va_data_for_resolution[key];
+ VAKey key{width, height, white_balance};
+ if (va_data_for_parameters.count(key)) {
+ return va_data_for_parameters[key];
}
// Use libjpeg to generate a header and set sane defaults for e.g.
// quantization tables. Then do the actual encode with VA-API.
jpeg_compress_struct cinfo;
- vector<uint8_t> jpeg_header = get_jpeg_header(width, height, &cinfo);
+ vector<uint8_t> jpeg_header = get_jpeg_header(width, height, white_balance, &cinfo);
// Picture parameters.
VAEncPictureParameterBufferJPEG pic_param;
ret.q = q;
ret.huff = huff;
ret.parms = parms;
- va_data_for_resolution[key] = ret;
+ va_data_for_parameters[key] = ret;
return ret;
}
release = ReleaseVAResources(this, resources);
}
- VAData va_data = get_va_data_for_resolution(width, height);
+ VAData va_data = get_va_data_for_parameters(width, height, qf.white_balance);
va_data.pic_param.coded_buf = resources.data_buffer;
VABufferID pic_param_buffer;
CHECK_VASTATUS(va_status, "vaCreateBuffer");
VABufferDestroyer destroy_slice_param(va_dpy->va_dpy, slice_param_buffer);
- VAImage image;
if (userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_VA_API) {
- // The pixel data is already uploaded by the caller.
- image = move(userdata->va_image);
+ // The pixel data is already put into the image by the caller.
+ va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf);
+ CHECK_VASTATUS(va_status, "vaUnmapBuffer");
} else {
assert(userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_MALLOC);
// Upload the pixel data.
- va_status = vaDeriveImage(va_dpy->va_dpy, resources.surface, &image);
- CHECK_VASTATUS(va_status, "vaDeriveImage");
-
uint8_t *surface_p = nullptr;
- vaMapBuffer(va_dpy->va_dpy, image.buf, (void **)&surface_p);
+ vaMapBuffer(va_dpy->va_dpy, resources.image.buf, (void **)&surface_p);
size_t field_start_line = qf.video_format.extra_lines_top; // No interlacing support.
size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
{
const uint8_t *src = qf.frame->data_copy + field_start;
- uint8_t *dst = (unsigned char *)surface_p + image.offsets[0];
- memcpy_with_pitch(dst, src, qf.video_format.width * 2, image.pitches[0], qf.video_format.height);
+ uint8_t *dst = (unsigned char *)surface_p + resources.image.offsets[0];
+ memcpy_with_pitch(dst, src, qf.video_format.width * 2, resources.image.pitches[0], qf.video_format.height);
}
+
+ va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf);
+ CHECK_VASTATUS(va_status, "vaUnmapBuffer");
}
- va_status = vaUnmapBuffer(va_dpy->va_dpy, image.buf);
- CHECK_VASTATUS(va_status, "vaUnmapBuffer");
- va_status = vaDestroyImage(va_dpy->va_dpy, image.image_id);
- CHECK_VASTATUS(va_status, "vaDestroyImage");
+ qf.frame->data_copy = nullptr;
+
+ // Seemingly vaPutImage() (which triggers a GPU copy) is much nicer to the
+ // CPU than vaDeriveImage() and copying directly into the GPU's buffers.
+ // Exactly why is unclear, but it seems to involve L3 cache usage when there
+ // are many high-res (1080p+) images in play.
+ va_status = vaPutImage(va_dpy->va_dpy, resources.surface, resources.image.image_id, 0, 0, width, height, 0, 0, width, height);
+ CHECK_VASTATUS(va_status, "vaPutImage");
// Finally, stick in the JPEG header.
VAEncPackedHeaderParameterBuffer header_parm;
frames_encoding.pop();
}
+ update_siphon_streams();
+
+ assert(global_flags.card_to_mjpeg_stream_export.count(qf.card_index)); // Or should_encode_mjpeg_for_card() would have returned false.
+ int stream_index = global_flags.card_to_mjpeg_stream_export[qf.card_index];
+
+ HTTPD::StreamID multicam_id{ HTTPD::MULTICAM_STREAM, 0 };
+ HTTPD::StreamID siphon_id{ HTTPD::SIPHON_STREAM, qf.card_index };
+ assert(streams.count(multicam_id));
+ assert(streams[multicam_id].avctx != nullptr);
+
+ // Write audio before video, since Futatabi expects it.
+ if (qf.audio.size() > 0) {
+ write_audio_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index + global_flags.card_to_mjpeg_stream_export.size(), qf.audio);
+ if (streams.count(siphon_id)) {
+ write_audio_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/1, qf.audio);
+ }
+ }
+
VAStatus va_status = vaSyncSurface(va_dpy->va_dpy, qf.resources.surface);
CHECK_VASTATUS(va_status, "vaSyncSurface");
CHECK_VASTATUS(va_status, "vaMapBuffer");
const uint8_t *coded_buf = reinterpret_cast<uint8_t *>(segment->buf);
- write_mjpeg_packet(qf.pts, qf.card_index, coded_buf, segment->size);
+ write_mjpeg_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index, coded_buf, segment->size);
+ if (streams.count(siphon_id)) {
+ write_mjpeg_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/0, coded_buf, segment->size);
+ }
va_status = vaUnmapBuffer(va_dpy->va_dpy, qf.resources.data_buffer);
CHECK_VASTATUS(va_status, "vaUnmapBuffer");
VectorDestinationManager dest;
jpeg_compress_struct cinfo;
- init_jpeg_422(width, height, &dest, &cinfo);
+ init_jpeg_422(width, height, qf.white_balance, &dest, &cinfo);
size_t field_start_line = qf.video_format.extra_lines_top; // No interlacing support.
size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
for (unsigned y = 0; y < qf.video_format.height; y += 8) {
const uint8_t *src = qf.frame->data_copy + field_start + y * qf.video_format.width * 2;
- memcpy_interleaved(tmp_y, tmp_cbcr, src, qf.video_format.width * 8 * 2);
+ memcpy_interleaved(tmp_cbcr, tmp_y, src, qf.video_format.width * 8 * 2);
memcpy_interleaved(tmp_cb, tmp_cr, tmp_cbcr, qf.video_format.width * 8);
for (unsigned yy = 0; yy < 8; ++yy) {
yptr[yy] = tmp_y + yy * width;
return dest.dest;
}
+
+void MJPEGEncoder::add_stream(HTTPD::StreamID stream_id)
+{
+ AVFormatContextWithCloser avctx;
+
+ // Set up the mux. We don't use the Mux wrapper, because it's geared towards
+ // a situation with only one video stream (and possibly one audio stream)
+ // with known width/height, and we don't need the extra functionality it provides.
+ avctx.reset(avformat_alloc_context());
+ avctx->oformat = av_guess_format("nut", nullptr, nullptr);
+
+ uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
+ avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, &ffmpeg_contexts[stream_id], nullptr, nullptr, nullptr);
+ avctx->pb->write_data_type = &MJPEGEncoder::write_packet2_thunk;
+ avctx->flags = AVFMT_FLAG_CUSTOM_IO;
+
+ if (stream_id.type == HTTPD::MULTICAM_STREAM) {
+ for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) {
+ add_video_stream(avctx.get());
+ }
+ for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) {
+ add_audio_stream(avctx.get());
+ }
+ } else {
+ assert(stream_id.type == HTTPD::SIPHON_STREAM);
+ add_video_stream(avctx.get());
+ add_audio_stream(avctx.get());
+ }
+ finalize_mux(avctx.get());
+
+ Stream s;
+ s.avctx = move(avctx);
+ streams[stream_id] = move(s);
+}
+
+void MJPEGEncoder::update_siphon_streams()
+{
+ // Bring the list of streams into sync with what the clients need.
+ for (auto it = streams.begin(); it != streams.end(); ) {
+ if (it->first.type != HTTPD::SIPHON_STREAM) {
+ ++it;
+ continue;
+ }
+ if (httpd->get_num_connected_siphon_clients(it->first.index) == 0) {
+ av_free(it->second.avctx->pb->buffer);
+ streams.erase(it++);
+ } else {
+ ++it;
+ }
+ }
+ for (unsigned stream_idx = 0; stream_idx < MAX_VIDEO_CARDS; ++stream_idx) {
+ HTTPD::StreamID stream_id{ HTTPD::SIPHON_STREAM, stream_idx };
+ if (streams.count(stream_id) == 0 && httpd->get_num_connected_siphon_clients(stream_idx) > 0) {
+ add_stream(stream_id);
+ }
+ }
+}
+
+void MJPEGEncoder::create_ffmpeg_context(HTTPD::StreamID stream_id)
+{
+ ffmpeg_contexts.emplace(stream_id, WritePacket2Context{ this, stream_id });
+}