]> git.sesse.net Git - nageru/blob - nageru/mjpeg_encoder.cpp
a526c1e1e875c2e53cedece541a9020cd473625d
[nageru] / nageru / mjpeg_encoder.cpp
1 #include "mjpeg_encoder.h"
2
3 #include <assert.h>
4 #include <jpeglib.h>
5 #include <unistd.h>
6 #if __SSE2__
7 #include <immintrin.h>
8 #endif
9 #include <list>
10
11 extern "C" {
12 #include <libavformat/avformat.h>
13 #include <libavutil/channel_layout.h>
14 }
15
16 #include "defs.h"
17 #include "shared/ffmpeg_raii.h"
18 #include "flags.h"
19 #include "shared/httpd.h"
20 #include "shared/memcpy_interleaved.h"
21 #include "shared/metrics.h"
22 #include "pbo_frame_allocator.h"
23 #include "shared/timebase.h"
24 #include "shared/va_display.h"
25
26 #include <movit/colorspace_conversion_effect.h>
27
28 #include <va/va.h>
29 #include <va/va_drm.h>
30 #include <va/va_x11.h>
31
32 using namespace Eigen;
33 using namespace bmusb;
34 using namespace movit;
35 using namespace std;
36
37 static VAImageFormat uyvy_format, nv12_format;
38
39 extern void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height);
40
41 // The inverse of memcpy_interleaved(), with (slow) support for pitch.
42 void interleave_with_pitch(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, size_t src_width, size_t dst_pitch, size_t height)
43 {
44 #if __SSE2__
45         if (dst_pitch == src_width * 2 && (src_width * height) % 16 == 0) {
46                 __m128i *dptr = reinterpret_cast<__m128i *>(dst);
47                 const __m128i *sptr1 = reinterpret_cast<const __m128i *>(src1);
48                 const __m128i *sptr2 = reinterpret_cast<const __m128i *>(src2);
49                 for (size_t i = 0; i < src_width * height / 16; ++i) {
50                         __m128i data1 = _mm_loadu_si128(sptr1++);
51                         __m128i data2 = _mm_loadu_si128(sptr2++);
52                         _mm_storeu_si128(dptr++, _mm_unpacklo_epi8(data1, data2));
53                         _mm_storeu_si128(dptr++, _mm_unpackhi_epi8(data1, data2));
54                 }
55                 return;
56         }
57 #endif
58
59         for (size_t y = 0; y < height; ++y) {
60                 uint8_t *dptr = dst + y * dst_pitch;
61                 const uint8_t *sptr1 = src1 + y * src_width;
62                 const uint8_t *sptr2 = src2 + y * src_width;
63                 for (size_t x = 0; x < src_width; ++x) {
64                         *dptr++ = *sptr1++;
65                         *dptr++ = *sptr2++;
66                 }
67         }
68 }
69
70 // From libjpeg (although it's of course identical between implementations).
71 static const int jpeg_natural_order[DCTSIZE2] = {
72          0,  1,  8, 16,  9,  2,  3, 10,
73         17, 24, 32, 25, 18, 11,  4,  5,
74         12, 19, 26, 33, 40, 48, 41, 34,
75         27, 20, 13,  6,  7, 14, 21, 28,
76         35, 42, 49, 56, 57, 50, 43, 36,
77         29, 22, 15, 23, 30, 37, 44, 51,
78         58, 59, 52, 45, 38, 31, 39, 46,
79         53, 60, 61, 54, 47, 55, 62, 63,
80 };
81
82 struct VectorDestinationManager {
83         jpeg_destination_mgr pub;
84         std::vector<uint8_t> dest;
85
86         VectorDestinationManager()
87         {
88                 pub.init_destination = init_destination_thunk;
89                 pub.empty_output_buffer = empty_output_buffer_thunk;
90                 pub.term_destination = term_destination_thunk;
91         }
92
93         static void init_destination_thunk(j_compress_ptr ptr)
94         {
95                 ((VectorDestinationManager *)(ptr->dest))->init_destination();
96         }
97
98         inline void init_destination()
99         {
100                 make_room(0);
101         }
102
103         static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
104         {
105                 return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
106         }
107
108         inline bool empty_output_buffer()
109         {
110                 make_room(dest.size());  // Should ignore pub.free_in_buffer!
111                 return true;
112         }
113
114         inline void make_room(size_t bytes_used)
115         {
116                 dest.resize(bytes_used + 4096);
117                 dest.resize(dest.capacity());
118                 pub.next_output_byte = dest.data() + bytes_used;
119                 pub.free_in_buffer = dest.size() - bytes_used;
120         }
121
122         static void term_destination_thunk(j_compress_ptr ptr)
123         {
124                 ((VectorDestinationManager *)(ptr->dest))->term_destination();
125         }
126
127         inline void term_destination()
128         {
129                 dest.resize(dest.size() - pub.free_in_buffer);
130         }
131 };
132 static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
133
134 int MJPEGEncoder::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
135 {
136         WritePacket2Context *ctx = (WritePacket2Context *)opaque;
137         return ctx->mjpeg_encoder->write_packet2(ctx->stream_id, buf, buf_size, type, time);
138 }
139
140 int MJPEGEncoder::write_packet2(HTTPD::StreamID stream_id, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
141 {
142         string *mux_header = &streams[stream_id].mux_header;
143         if (type == AVIO_DATA_MARKER_HEADER) {
144                 mux_header->append((char *)buf, buf_size);
145                 httpd->set_header(stream_id, *mux_header);
146         } else {
147                 httpd->add_data(stream_id, (char *)buf, buf_size, /*keyframe=*/true, AV_NOPTS_VALUE, AVRational{ AV_TIME_BASE, 1 });
148         }
149         return buf_size;
150 }
151
152 namespace {
153
154 void add_video_stream(AVFormatContext *avctx)
155 {
156         AVStream *stream = avformat_new_stream(avctx, nullptr);
157         if (stream == nullptr) {
158                 fprintf(stderr, "avformat_new_stream() failed\n");
159                 abort();
160         }
161
162         // FFmpeg is very picky about having audio at 1/48000 timebase,
163         // no matter what we write. Even though we'd prefer our usual 1/120000,
164         // put the video on the same one, so that we can have locked audio.
165         stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
166         stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
167         stream->codecpar->codec_id = AV_CODEC_ID_MJPEG;
168
169         // Used for aspect ratio only. Can change without notice (the mux won't care).
170         stream->codecpar->width = global_flags.width;
171         stream->codecpar->height = global_flags.height;
172
173         // TODO: We could perhaps use the interpretation for each card here
174         // (or at least the command-line flags) instead of the defaults,
175         // but what would we do when they change?
176         stream->codecpar->color_primaries = AVCOL_PRI_BT709;
177         stream->codecpar->color_trc = AVCOL_TRC_IEC61966_2_1;
178         stream->codecpar->color_space = AVCOL_SPC_BT709;
179         stream->codecpar->color_range = AVCOL_RANGE_MPEG;
180         stream->codecpar->chroma_location = AVCHROMA_LOC_LEFT;
181         stream->codecpar->field_order = AV_FIELD_PROGRESSIVE;
182 }
183
184 void add_audio_stream(AVFormatContext *avctx)
185 {
186         AVStream *stream = avformat_new_stream(avctx, nullptr);
187         if (stream == nullptr) {
188                 fprintf(stderr, "avformat_new_stream() failed\n");
189                 abort();
190         }
191         stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
192         stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
193         stream->codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
194         stream->codecpar->ch_layout.order = AV_CHANNEL_ORDER_NATIVE;
195         stream->codecpar->ch_layout.nb_channels = 2;
196         stream->codecpar->ch_layout.u.mask = AV_CH_LAYOUT_STEREO;
197         stream->codecpar->sample_rate = OUTPUT_FREQUENCY;
198 }
199
200 void finalize_mux(AVFormatContext *avctx)
201 {
202         AVDictionary *options = NULL;
203         vector<pair<string, string>> opts = MUX_OPTS;
204         for (pair<string, string> opt : opts) {
205                 av_dict_set(&options, opt.first.c_str(), opt.second.c_str(), 0);
206         }
207         if (avformat_write_header(avctx, &options) < 0) {
208                 fprintf(stderr, "avformat_write_header() failed\n");
209                 abort();
210         }
211 }
212
213 }  // namespace
214
215 MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display)
216         : httpd(httpd)
217 {
218         create_ffmpeg_context(HTTPD::StreamID{ HTTPD::MULTICAM_STREAM, 0 });
219         for (unsigned stream_idx = 0; stream_idx < MAX_VIDEO_CARDS; ++stream_idx) {
220                 create_ffmpeg_context(HTTPD::StreamID{ HTTPD::SIPHON_STREAM, stream_idx });
221         }
222
223         add_stream(HTTPD::StreamID{ HTTPD::MULTICAM_STREAM, 0 });
224
225         // Initialize VA-API.
226         VAConfigID config_id_422, config_id_420;
227         string error;
228         va_dpy = try_open_va(va_display, { VAProfileJPEGBaseline }, VAEntrypointEncPicture,
229                 {
230                         { "4:2:2", VA_RT_FORMAT_YUV422, VA_FOURCC_UYVY, &config_id_422, &uyvy_format },
231                         // We'd prefer VA_FOURCC_I420, but it's not supported by Intel's driver.
232                         { "4:2:0", VA_RT_FORMAT_YUV420, VA_FOURCC_NV12, &config_id_420, &nv12_format }
233                 },
234                 /*chosen_profile=*/nullptr, &error);
235         if (va_dpy == nullptr) {
236                 fprintf(stderr, "Could not initialize VA-API for MJPEG encoding: %s. JPEGs will be encoded in software if needed.\n", error.c_str());
237         }
238
239         encoder_thread = thread(&MJPEGEncoder::encoder_thread_func, this);
240         if (va_dpy != nullptr) {
241                 va_pool.reset(new VAResourcePool(va_dpy->va_dpy, uyvy_format, nv12_format, config_id_422, config_id_420, /*with_data_buffer=*/true));
242                 va_receiver_thread = thread(&MJPEGEncoder::va_receiver_thread_func, this);
243         }
244
245         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "zero_size" }}, &metric_mjpeg_frames_zero_size_dropped);
246         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "interlaced" }}, &metric_mjpeg_frames_interlaced_dropped);
247         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "unsupported_pixel_format" }}, &metric_mjpeg_frames_unsupported_pixel_format_dropped);
248         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "oversized" }}, &metric_mjpeg_frames_oversized_dropped);
249         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "overrun" }}, &metric_mjpeg_overrun_dropped);
250         global_metrics.add("mjpeg_frames", {{ "status", "submitted" }}, &metric_mjpeg_overrun_submitted);
251
252         running = true;
253 }
254
255 MJPEGEncoder::~MJPEGEncoder()
256 {
257         for (auto &id_and_stream : streams) {
258                 av_free(id_and_stream.second.avctx->pb->buffer);
259         }
260
261         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "zero_size" }});
262         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "interlaced" }});
263         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "unsupported_pixel_format" }});
264         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "oversized" }});
265         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "overrun" }});
266         global_metrics.remove("mjpeg_frames", {{ "status", "submitted" }});
267 }
268
269 void MJPEGEncoder::stop()
270 {
271         if (!running) {
272                 return;
273         }
274         running = false;
275         should_quit = true;
276         any_frames_to_be_encoded.notify_all();
277         any_frames_encoding.notify_all();
278         encoder_thread.join();
279         if (va_dpy != nullptr) {
280                 va_receiver_thread.join();
281         }
282 }
283
284 namespace {
285
286 bool is_uyvy(RefCountedFrame frame)
287 {
288         PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata;
289         return userdata->pixel_format == PixelFormat_8BitYCbCr && frame->interleaved;
290 }
291
292 bool is_i420(RefCountedFrame frame)
293 {
294         PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata;
295         return userdata->pixel_format == PixelFormat_8BitYCbCrPlanar &&
296                 userdata->ycbcr_format.chroma_subsampling_x == 2 &&
297                 userdata->ycbcr_format.chroma_subsampling_y == 2;
298 }
299
300 }  // namespace
301
302 void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset, vector<int32_t> audio, const RGBTriplet &white_balance)
303 {
304         if (video_format.width == 0 || video_format.height == 0) {
305                 ++metric_mjpeg_frames_zero_size_dropped;
306                 return;
307         }
308         if (video_format.interlaced) {
309                 fprintf(stderr, "Card %u: Ignoring JPEG encoding for interlaced frame\n", card_index);
310                 ++metric_mjpeg_frames_interlaced_dropped;
311                 return;
312         }
313         if (!is_uyvy(frame) && !is_i420(frame)) {
314                 fprintf(stderr, "Card %u: Ignoring JPEG encoding for unsupported pixel format\n", card_index);
315                 ++metric_mjpeg_frames_unsupported_pixel_format_dropped;
316                 return;
317         }
318         if (video_format.width > 4096 || video_format.height > 4096) {
319                 fprintf(stderr, "Card %u: Ignoring JPEG encoding for oversized frame\n", card_index);
320                 ++metric_mjpeg_frames_oversized_dropped;
321                 return;
322         }
323
324         lock_guard<mutex> lock(mu);
325         if (frames_to_be_encoded.size() + frames_encoding.size() > 50) {
326                 fprintf(stderr, "WARNING: MJPEG encoding doesn't keep up, discarding frame.\n");
327                 ++metric_mjpeg_overrun_dropped;
328                 return;
329         }
330         ++metric_mjpeg_overrun_submitted;
331         frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset, move(audio), white_balance });
332         any_frames_to_be_encoded.notify_all();
333 }
334
335 bool MJPEGEncoder::should_encode_mjpeg_for_card(unsigned card_index)
336 {
337         // Only bother doing MJPEG encoding if there are any connected clients
338         // that want the stream.
339         if (httpd->get_num_connected_multicam_clients() == 0 &&
340             httpd->get_num_connected_siphon_clients(card_index) == 0) {
341                 return false;
342         }
343
344         auto it = global_flags.card_to_mjpeg_stream_export.find(card_index);
345         return (it != global_flags.card_to_mjpeg_stream_export.end());
346 }
347
348 void MJPEGEncoder::encoder_thread_func()
349 {
350         pthread_setname_np(pthread_self(), "MJPEG_Encode");
351         posix_memalign((void **)&tmp_y, 4096, 4096 * 8);
352         posix_memalign((void **)&tmp_cbcr, 4096, 4096 * 8);
353         posix_memalign((void **)&tmp_cb, 4096, 4096 * 8);
354         posix_memalign((void **)&tmp_cr, 4096, 4096 * 8);
355
356         for (;;) {
357                 QueuedFrame qf;
358                 {
359                         unique_lock<mutex> lock(mu);
360                         any_frames_to_be_encoded.wait(lock, [this] { return !frames_to_be_encoded.empty() || should_quit; });
361                         if (should_quit) break;
362                         qf = move(frames_to_be_encoded.front());
363                         frames_to_be_encoded.pop();
364                 }
365
366                 assert(global_flags.card_to_mjpeg_stream_export.count(qf.card_index));  // Or should_encode_mjpeg_for_card() would have returned false.
367                 int stream_index = global_flags.card_to_mjpeg_stream_export[qf.card_index];
368
369                 if (va_dpy != nullptr) {
370                         // Will call back in the receiver thread.
371                         encode_jpeg_va(move(qf));
372                 } else {
373                         update_siphon_streams();
374
375                         HTTPD::StreamID multicam_id{ HTTPD::MULTICAM_STREAM, 0 };
376                         HTTPD::StreamID siphon_id{ HTTPD::SIPHON_STREAM, qf.card_index };
377                         assert(streams.count(multicam_id));
378
379                         // Write audio before video, since Futatabi expects it.
380                         if (qf.audio.size() > 0) {
381                                 write_audio_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index + global_flags.card_to_mjpeg_stream_export.size(), qf.audio);
382                                 if (streams.count(siphon_id)) {
383                                         write_audio_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/1, qf.audio);
384                                 }
385                         }
386
387                         // Encode synchronously, in the same thread.
388                         vector<uint8_t> jpeg = encode_jpeg_libjpeg(qf);
389                         write_mjpeg_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index, jpeg.data(), jpeg.size());
390                         if (streams.count(siphon_id)) {
391                                 write_mjpeg_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/0, jpeg.data(), jpeg.size());
392                         }
393                 }
394         }
395
396         free(tmp_y);
397         free(tmp_cbcr);
398         free(tmp_cb);
399         free(tmp_cr);
400 }
401
402 void MJPEGEncoder::write_mjpeg_packet(AVFormatContext *avctx, int64_t pts, unsigned stream_index, const uint8_t *jpeg, size_t jpeg_size)
403 {
404         AVPacket pkt;
405         memset(&pkt, 0, sizeof(pkt));
406         pkt.buf = nullptr;
407         pkt.data = const_cast<uint8_t *>(jpeg);
408         pkt.size = jpeg_size;
409         pkt.stream_index = stream_index;
410         pkt.flags = AV_PKT_FLAG_KEY;
411         AVRational time_base = avctx->streams[pkt.stream_index]->time_base;
412         pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base);
413         pkt.duration = 0;
414
415         if (av_write_frame(avctx, &pkt) < 0) {
416                 fprintf(stderr, "av_write_frame() failed\n");
417                 abort();
418         }
419 }
420
421 void MJPEGEncoder::write_audio_packet(AVFormatContext *avctx, int64_t pts, unsigned stream_index, const vector<int32_t> &audio)
422 {
423         AVPacket pkt;
424         memset(&pkt, 0, sizeof(pkt));
425         pkt.buf = nullptr;
426         pkt.data = reinterpret_cast<uint8_t *>(const_cast<int32_t *>(&audio[0]));
427         pkt.size = audio.size() * sizeof(audio[0]);
428         pkt.stream_index = stream_index;
429         pkt.flags = AV_PKT_FLAG_KEY;
430         AVRational time_base = avctx->streams[pkt.stream_index]->time_base;
431         pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base);
432         size_t num_stereo_samples = audio.size() / 2;
433         pkt.duration = av_rescale_q(num_stereo_samples, AVRational{ 1, OUTPUT_FREQUENCY }, time_base);
434
435         if (av_write_frame(avctx, &pkt) < 0) {
436                 fprintf(stderr, "av_write_frame() failed\n");
437                 abort();
438         }
439 }
440
441 class VABufferDestroyer {
442 public:
443         VABufferDestroyer(VADisplay dpy, VABufferID buf)
444                 : dpy(dpy), buf(buf) {}
445
446         ~VABufferDestroyer() {
447                 VAStatus va_status = vaDestroyBuffer(dpy, buf);
448                 CHECK_VASTATUS(va_status, "vaDestroyBuffer");
449         }
450
451 private:
452         VADisplay dpy;
453         VABufferID buf;
454 };
455
456 namespace {
457
458 void push16(uint16_t val, string *str)
459 {
460         str->push_back(val >> 8);
461         str->push_back(val & 0xff);
462 }
463
464 void push32(uint32_t val, string *str)
465 {
466         str->push_back(val >> 24);
467         str->push_back((val >> 16) & 0xff);
468         str->push_back((val >> 8) & 0xff);
469         str->push_back(val & 0xff);
470 }
471
472 }  // namespace
473
474 void MJPEGEncoder::init_jpeg(unsigned width, unsigned height, const RGBTriplet &white_balance, VectorDestinationManager *dest, jpeg_compress_struct *cinfo, int y_h_samp_factor, int y_v_samp_factor)
475 {
476         jpeg_error_mgr jerr;
477         cinfo->err = jpeg_std_error(&jerr);
478         jpeg_create_compress(cinfo);
479
480         cinfo->dest = (jpeg_destination_mgr *)dest;
481
482         cinfo->input_components = 3;
483         jpeg_set_defaults(cinfo);
484         jpeg_set_quality(cinfo, quality, /*force_baseline=*/false);
485
486         cinfo->image_width = width;
487         cinfo->image_height = height;
488         cinfo->raw_data_in = true;
489         jpeg_set_colorspace(cinfo, JCS_YCbCr);
490         cinfo->comp_info[0].h_samp_factor = y_h_samp_factor;
491         cinfo->comp_info[0].v_samp_factor = y_v_samp_factor;
492         cinfo->comp_info[1].h_samp_factor = 1;
493         cinfo->comp_info[1].v_samp_factor = 1;
494         cinfo->comp_info[2].h_samp_factor = 1;
495         cinfo->comp_info[2].v_samp_factor = 1;
496         cinfo->CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
497         jpeg_start_compress(cinfo, true);
498
499         if (fabs(white_balance.r - 1.0f) > 1e-3 ||
500             fabs(white_balance.g - 1.0f) > 1e-3 ||
501             fabs(white_balance.b - 1.0f) > 1e-3) {
502                 // Convert from (linear) RGB to XYZ.
503                 Matrix3d rgb_to_xyz_matrix = movit::ColorspaceConversionEffect::get_xyz_matrix(COLORSPACE_sRGB);
504                 Vector3d xyz = rgb_to_xyz_matrix * Vector3d(white_balance.r, white_balance.g, white_balance.b);
505
506                 // Convert from XYZ to xyz by normalizing.
507                 xyz /= (xyz[0] + xyz[1] + xyz[2]);
508
509                 // Create a very rudimentary EXIF header to hold our white point.
510                 string exif;
511
512                 // Exif header, followed by some padding.
513                 exif = "Exif";
514                 push16(0, &exif);
515
516                 // TIFF header first:
517                 exif += "MM";  // Big endian.
518
519                 // Magic number.
520                 push16(42, &exif);
521
522                 // Offset of first IFD (relative to the MM, immediately after the header).
523                 push32(exif.size() - 6 + 4, &exif);
524
525                 // Now the actual IFD.
526
527                 // One entry.
528                 push16(1, &exif);
529
530                 // WhitePoint tag ID.
531                 push16(0x13e, &exif);
532
533                 // Rational type.
534                 push16(5, &exif);
535
536                 // Two values (x and y; z is implicit due to normalization).
537                 push32(2, &exif);
538
539                 // Offset (relative to the MM, immediately after the last IFD).
540                 push32(exif.size() - 6 + 8, &exif);
541
542                 // No more IFDs.
543                 push32(0, &exif);
544
545                 // The actual values.
546                 push32(lrintf(xyz[0] * 10000.0f), &exif);
547                 push32(10000, &exif);
548                 push32(lrintf(xyz[1] * 10000.0f), &exif);
549                 push32(10000, &exif);
550
551                 jpeg_write_marker(cinfo, JPEG_APP0 + 1, (const JOCTET *)exif.data(), exif.size());
552         }
553
554         // This comment marker is private to FFmpeg. It signals limited Y'CbCr range
555         // (and nothing else).
556         jpeg_write_marker(cinfo, JPEG_COM, (const JOCTET *)"CS=ITU601", strlen("CS=ITU601"));
557 }
558
559 vector<uint8_t> MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, const RGBTriplet &white_balance, int y_h_samp_factor, int y_v_samp_factor, jpeg_compress_struct *cinfo)
560 {
561         VectorDestinationManager dest;
562         init_jpeg(width, height, white_balance, &dest, cinfo, y_h_samp_factor, y_v_samp_factor);
563
564         // Make a dummy black image; there's seemingly no other easy way of
565         // making libjpeg outputting all of its headers.
566         assert(y_v_samp_factor <= 2);  // Or we'd need larger JSAMPROW arrays below.
567         size_t block_height_y = 8 * y_v_samp_factor;
568         size_t block_height_cbcr = 8;
569
570         JSAMPROW yptr[16], cbptr[16], crptr[16];
571         JSAMPARRAY data[3] = { yptr, cbptr, crptr };
572         memset(tmp_y, 0, 4096);
573         memset(tmp_cb, 0, 4096);
574         memset(tmp_cr, 0, 4096);
575         for (unsigned yy = 0; yy < block_height_y; ++yy) {
576                 yptr[yy] = tmp_y;
577         }
578         for (unsigned yy = 0; yy < block_height_cbcr; ++yy) {
579                 cbptr[yy] = tmp_cb;
580                 crptr[yy] = tmp_cr;
581         }
582         for (unsigned y = 0; y < height; y += block_height_y) {
583                 jpeg_write_raw_data(cinfo, data, block_height_y);
584         }
585         jpeg_finish_compress(cinfo);
586
587         // We're only interested in the header, not the data after it.
588         dest.term_destination();
589         for (size_t i = 0; i < dest.dest.size() - 1; ++i) {
590                 if (dest.dest[i] == 0xff && dest.dest[i + 1] == 0xda) {  // Start of scan (SOS).
591                         unsigned len = dest.dest[i + 2] * 256 + dest.dest[i + 3];
592                         dest.dest.resize(i + len + 2);
593                         break;
594                 }
595         }
596
597         return dest.dest;
598 }
599
600 MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_parameters(unsigned width, unsigned height, unsigned y_h_samp_factor, unsigned y_v_samp_factor, const RGBTriplet &white_balance)
601 {
602         VAKey key{width, height, y_h_samp_factor, y_v_samp_factor, white_balance};
603         if (va_data_for_parameters.count(key)) {
604                 return va_data_for_parameters[key];
605         }
606
607         // Use libjpeg to generate a header and set sane defaults for e.g.
608         // quantization tables. Then do the actual encode with VA-API.
609         jpeg_compress_struct cinfo;
610         vector<uint8_t> jpeg_header = get_jpeg_header(width, height, white_balance, y_h_samp_factor, y_v_samp_factor, &cinfo);
611
612         // Picture parameters.
613         VAEncPictureParameterBufferJPEG pic_param;
614         memset(&pic_param, 0, sizeof(pic_param));
615         pic_param.reconstructed_picture = VA_INVALID_ID;
616         pic_param.picture_width = cinfo.image_width;
617         pic_param.picture_height = cinfo.image_height;
618         for (int component_idx = 0; component_idx < cinfo.num_components; ++component_idx) {
619                 const jpeg_component_info *comp = &cinfo.comp_info[component_idx];
620                 pic_param.component_id[component_idx] = comp->component_id;
621                 pic_param.quantiser_table_selector[component_idx] = comp->quant_tbl_no;
622         }
623         pic_param.num_components = cinfo.num_components;
624         pic_param.num_scan = 1;
625         pic_param.sample_bit_depth = 8;
626         pic_param.coded_buf = VA_INVALID_ID;  // To be filled out by caller.
627         pic_param.pic_flags.bits.huffman = 1;
628         pic_param.quality = 50;  // Don't scale the given quantization matrices. (See gen8_mfc_jpeg_fqm_state)
629
630         // Quantization matrices.
631         VAQMatrixBufferJPEG q;
632         memset(&q, 0, sizeof(q));
633
634         q.load_lum_quantiser_matrix = true;
635         q.load_chroma_quantiser_matrix = true;
636         for (int quant_tbl_idx = 0; quant_tbl_idx < min(4, NUM_QUANT_TBLS); ++quant_tbl_idx) {
637                 const JQUANT_TBL *qtbl = cinfo.quant_tbl_ptrs[quant_tbl_idx];
638                 assert((qtbl == nullptr) == (quant_tbl_idx >= 2));
639                 if (qtbl == nullptr) continue;
640
641                 uint8_t *qmatrix = (quant_tbl_idx == 0) ? q.lum_quantiser_matrix : q.chroma_quantiser_matrix;
642                 for (int i = 0; i < 64; ++i) {
643                         if (qtbl->quantval[i] > 255) {
644                                 fprintf(stderr, "Baseline JPEG only!\n");
645                                 abort();
646                         }
647                         qmatrix[i] = qtbl->quantval[jpeg_natural_order[i]];
648                 }
649         }
650
651         // Huffman tables (arithmetic is not supported).
652         VAHuffmanTableBufferJPEGBaseline huff;
653         memset(&huff, 0, sizeof(huff));
654
655         for (int huff_tbl_idx = 0; huff_tbl_idx < min(2, NUM_HUFF_TBLS); ++huff_tbl_idx) {
656                 const JHUFF_TBL *ac_hufftbl = cinfo.ac_huff_tbl_ptrs[huff_tbl_idx];
657                 const JHUFF_TBL *dc_hufftbl = cinfo.dc_huff_tbl_ptrs[huff_tbl_idx];
658                 if (ac_hufftbl == nullptr) {
659                         assert(dc_hufftbl == nullptr);
660                         huff.load_huffman_table[huff_tbl_idx] = 0;
661                 } else {
662                         assert(dc_hufftbl != nullptr);
663                         huff.load_huffman_table[huff_tbl_idx] = 1;
664
665                         for (int i = 0; i < 16; ++i) {
666                                 huff.huffman_table[huff_tbl_idx].num_dc_codes[i] = dc_hufftbl->bits[i + 1];
667                         }
668                         for (int i = 0; i < 12; ++i) {
669                                 huff.huffman_table[huff_tbl_idx].dc_values[i] = dc_hufftbl->huffval[i];
670                         }
671                         for (int i = 0; i < 16; ++i) {
672                                 huff.huffman_table[huff_tbl_idx].num_ac_codes[i] = ac_hufftbl->bits[i + 1];
673                         }
674                         for (int i = 0; i < 162; ++i) {
675                                 huff.huffman_table[huff_tbl_idx].ac_values[i] = ac_hufftbl->huffval[i];
676                         }
677                 }
678         }
679
680         // Slice parameters (metadata about the slice).
681         VAEncSliceParameterBufferJPEG parms;
682         memset(&parms, 0, sizeof(parms));
683         for (int component_idx = 0; component_idx < cinfo.num_components; ++component_idx) {
684                 const jpeg_component_info *comp = &cinfo.comp_info[component_idx];
685                 parms.components[component_idx].component_selector = comp->component_id;
686                 parms.components[component_idx].dc_table_selector = comp->dc_tbl_no;
687                 parms.components[component_idx].ac_table_selector = comp->ac_tbl_no;
688                 if (parms.components[component_idx].dc_table_selector > 1 ||
689                     parms.components[component_idx].ac_table_selector > 1) {
690                         fprintf(stderr, "Uses too many Huffman tables\n");
691                         abort();
692                 }
693         }
694         parms.num_components = cinfo.num_components;
695         parms.restart_interval = cinfo.restart_interval;
696
697         jpeg_destroy_compress(&cinfo);
698
699         VAData ret;
700         ret.jpeg_header = move(jpeg_header);
701         ret.pic_param = pic_param;
702         ret.q = q;
703         ret.huff = huff;
704         ret.parms = parms;
705         va_data_for_parameters[key] = ret;
706         return ret;
707 }
708
709 void MJPEGEncoder::encode_jpeg_va(QueuedFrame &&qf)
710 {
711         PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)qf.frame->userdata;
712         unsigned width = qf.video_format.width;
713         unsigned height = qf.video_format.height;
714
715         VAResourcePool::VAResources resources;
716         ReleaseVAResources release;
717         if (userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_VA_API) {
718                 assert(is_uyvy(qf.frame));
719                 resources = move(userdata->va_resources);
720                 release = move(userdata->va_resources_release);
721         } else {
722                 assert(userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_MALLOC);
723                 if (is_uyvy(qf.frame)) {
724                         resources = va_pool->get_va_resources(width, height, VA_FOURCC_UYVY);
725                 } else {
726                         assert(is_i420(qf.frame));
727                         resources = va_pool->get_va_resources(width, height, VA_FOURCC_NV12);
728                 }
729                 release = ReleaseVAResources(va_pool.get(), resources);
730         }
731
732         int y_h_samp_factor, y_v_samp_factor;
733         if (is_uyvy(qf.frame)) {
734                 // 4:2:2 (sample Y' twice as often horizontally as Cb or Cr, vertical is left alone).
735                 y_h_samp_factor = 2;
736                 y_v_samp_factor = 1;
737         } else {
738                 // 4:2:0 (sample Y' twice as often as Cb or Cr, in both directions)
739                 assert(is_i420(qf.frame));
740                 y_h_samp_factor = 2;
741                 y_v_samp_factor = 2;
742         }
743
744         VAData va_data = get_va_data_for_parameters(width, height, y_h_samp_factor, y_v_samp_factor, qf.white_balance);
745         va_data.pic_param.coded_buf = resources.data_buffer;
746
747         VABufferID pic_param_buffer;
748         VAStatus va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAEncPictureParameterBufferType, sizeof(va_data.pic_param), 1, &va_data.pic_param, &pic_param_buffer);
749         CHECK_VASTATUS(va_status, "vaCreateBuffer");
750         VABufferDestroyer destroy_pic_param(va_dpy->va_dpy, pic_param_buffer);
751
752         VABufferID q_buffer;
753         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAQMatrixBufferType, sizeof(va_data.q), 1, &va_data.q, &q_buffer);
754         CHECK_VASTATUS(va_status, "vaCreateBuffer");
755         VABufferDestroyer destroy_iq(va_dpy->va_dpy, q_buffer);
756
757         VABufferID huff_buffer;
758         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAHuffmanTableBufferType, sizeof(va_data.huff), 1, &va_data.huff, &huff_buffer);
759         CHECK_VASTATUS(va_status, "vaCreateBuffer");
760         VABufferDestroyer destroy_huff(va_dpy->va_dpy, huff_buffer);
761
762         VABufferID slice_param_buffer;
763         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAEncSliceParameterBufferType, sizeof(va_data.parms), 1, &va_data.parms, &slice_param_buffer);
764         CHECK_VASTATUS(va_status, "vaCreateBuffer");
765         VABufferDestroyer destroy_slice_param(va_dpy->va_dpy, slice_param_buffer);
766
767         if (userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_VA_API) {
768                 // The pixel data is already put into the image by the caller.
769                 va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf);
770                 CHECK_VASTATUS(va_status, "vaUnmapBuffer");
771         } else {
772                 assert(userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_MALLOC);
773
774                 // Upload the pixel data.
775                 uint8_t *surface_p = nullptr;
776                 vaMapBuffer(va_dpy->va_dpy, resources.image.buf, (void **)&surface_p);
777
778                 if (is_uyvy(qf.frame)) {
779                         size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
780                         size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
781
782                         const uint8_t *src = qf.frame->data_copy + field_start;
783                         uint8_t *dst = (unsigned char *)surface_p + resources.image.offsets[0];
784                         memcpy_with_pitch(dst, src, qf.video_format.width * 2, resources.image.pitches[0], qf.video_format.height);
785                 } else {
786                         assert(is_i420(qf.frame));
787                         assert(!qf.frame->interleaved);  // Makes no sense for I420.
788
789                         size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
790                         const uint8_t *y_src = qf.frame->data + qf.video_format.width * field_start_line;
791                         const uint8_t *cb_src = y_src + width * height;
792                         const uint8_t *cr_src = cb_src + (width / 2) * (height / 2);
793
794                         uint8_t *y_dst = (unsigned char *)surface_p + resources.image.offsets[0];
795                         uint8_t *cbcr_dst = (unsigned char *)surface_p + resources.image.offsets[1];
796
797                         memcpy_with_pitch(y_dst, y_src, qf.video_format.width, resources.image.pitches[0], qf.video_format.height);
798                         interleave_with_pitch(cbcr_dst, cb_src, cr_src, qf.video_format.width / 2, resources.image.pitches[1], qf.video_format.height / 2);
799                 }
800
801                 va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf);
802                 CHECK_VASTATUS(va_status, "vaUnmapBuffer");
803         }
804
805         qf.frame->data_copy = nullptr;
806
807         // Seemingly vaPutImage() (which triggers a GPU copy) is much nicer to the
808         // CPU than vaDeriveImage() and copying directly into the GPU's buffers.
809         // Exactly why is unclear, but it seems to involve L3 cache usage when there
810         // are many high-res (1080p+) images in play.
811         va_status = vaPutImage(va_dpy->va_dpy, resources.surface, resources.image.image_id, 0, 0, width, height, 0, 0, width, height);
812         CHECK_VASTATUS(va_status, "vaPutImage");
813
814         // Finally, stick in the JPEG header.
815         VAEncPackedHeaderParameterBuffer header_parm;
816         header_parm.type = VAEncPackedHeaderRawData;
817         header_parm.bit_length = 8 * va_data.jpeg_header.size();
818
819         VABufferID header_parm_buffer;
820         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAEncPackedHeaderParameterBufferType, sizeof(header_parm), 1, &header_parm, &header_parm_buffer);
821         CHECK_VASTATUS(va_status, "vaCreateBuffer");
822         VABufferDestroyer destroy_header(va_dpy->va_dpy, header_parm_buffer);
823
824         VABufferID header_data_buffer;
825         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAEncPackedHeaderDataBufferType, va_data.jpeg_header.size(), 1, va_data.jpeg_header.data(), &header_data_buffer);
826         CHECK_VASTATUS(va_status, "vaCreateBuffer");
827         VABufferDestroyer destroy_header_data(va_dpy->va_dpy, header_data_buffer);
828
829         va_status = vaBeginPicture(va_dpy->va_dpy, resources.context, resources.surface);
830         CHECK_VASTATUS(va_status, "vaBeginPicture");
831         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &pic_param_buffer, 1);
832         CHECK_VASTATUS(va_status, "vaRenderPicture(pic_param)");
833         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &q_buffer, 1);
834         CHECK_VASTATUS(va_status, "vaRenderPicture(q)");
835         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &huff_buffer, 1);
836         CHECK_VASTATUS(va_status, "vaRenderPicture(huff)");
837         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &slice_param_buffer, 1);
838         CHECK_VASTATUS(va_status, "vaRenderPicture(slice_param)");
839         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &header_parm_buffer, 1);
840         CHECK_VASTATUS(va_status, "vaRenderPicture(header_parm)");
841         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &header_data_buffer, 1);
842         CHECK_VASTATUS(va_status, "vaRenderPicture(header_data)");
843         va_status = vaEndPicture(va_dpy->va_dpy, resources.context);
844         CHECK_VASTATUS(va_status, "vaEndPicture");
845
846         qf.resources = move(resources);
847         qf.resource_releaser = move(release);
848
849         lock_guard<mutex> lock(mu);
850         frames_encoding.push(move(qf));
851         any_frames_encoding.notify_all();
852 }
853
854 void MJPEGEncoder::va_receiver_thread_func()
855 {
856         pthread_setname_np(pthread_self(), "MJPEG_Receive");
857         for (;;) {
858                 QueuedFrame qf;
859                 {
860                         unique_lock<mutex> lock(mu);
861                         any_frames_encoding.wait(lock, [this] { return !frames_encoding.empty() || should_quit; });
862                         if (should_quit) return;
863                         qf = move(frames_encoding.front());
864                         frames_encoding.pop();
865                 }
866
867                 update_siphon_streams();
868
869                 assert(global_flags.card_to_mjpeg_stream_export.count(qf.card_index));  // Or should_encode_mjpeg_for_card() would have returned false.
870                 int stream_index = global_flags.card_to_mjpeg_stream_export[qf.card_index];
871
872                 HTTPD::StreamID multicam_id{ HTTPD::MULTICAM_STREAM, 0 };
873                 HTTPD::StreamID siphon_id{ HTTPD::SIPHON_STREAM, qf.card_index };
874                 assert(streams.count(multicam_id));
875                 assert(streams[multicam_id].avctx != nullptr);
876
877                 // Write audio before video, since Futatabi expects it.
878                 if (qf.audio.size() > 0) {
879                         write_audio_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index + global_flags.card_to_mjpeg_stream_export.size(), qf.audio);
880                         if (streams.count(siphon_id)) {
881                                 write_audio_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/1, qf.audio);
882                         }
883                 }
884
885                 VAStatus va_status = vaSyncSurface(va_dpy->va_dpy, qf.resources.surface);
886                 CHECK_VASTATUS(va_status, "vaSyncSurface");
887
888                 VACodedBufferSegment *segment;
889                 va_status = vaMapBuffer(va_dpy->va_dpy, qf.resources.data_buffer, (void **)&segment);
890                 CHECK_VASTATUS(va_status, "vaMapBuffer");
891
892                 const uint8_t *coded_buf = reinterpret_cast<uint8_t *>(segment->buf);
893                 write_mjpeg_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index, coded_buf, segment->size);
894                 if (streams.count(siphon_id)) {
895                         write_mjpeg_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/0, coded_buf, segment->size);
896                 }
897
898                 va_status = vaUnmapBuffer(va_dpy->va_dpy, qf.resources.data_buffer);
899                 CHECK_VASTATUS(va_status, "vaUnmapBuffer");
900         }
901 }
902
903 vector<uint8_t> MJPEGEncoder::encode_jpeg_libjpeg(const QueuedFrame &qf)
904 {
905         unsigned width = qf.video_format.width;
906         unsigned height = qf.video_format.height;
907
908         VectorDestinationManager dest;
909         jpeg_compress_struct cinfo;
910
911         size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
912
913         PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)qf.frame->userdata;
914         if (userdata->pixel_format == PixelFormat_8BitYCbCr) {
915                 init_jpeg(width, height, qf.white_balance, &dest, &cinfo, /*y_h_samp_factor=*/2, /*y_v_samp_factor=*/1);
916
917                 assert(qf.frame->interleaved);
918                 size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
919
920                 JSAMPROW yptr[8], cbptr[8], crptr[8];
921                 JSAMPARRAY data[3] = { yptr, cbptr, crptr };
922                 for (unsigned y = 0; y < qf.video_format.height; y += 8) {
923                         const uint8_t *src;
924                         src = qf.frame->data_copy + field_start + y * qf.video_format.width * 2;
925
926                         memcpy_interleaved(tmp_cbcr, tmp_y, src, qf.video_format.width * 8 * 2);
927                         memcpy_interleaved(tmp_cb, tmp_cr, tmp_cbcr, qf.video_format.width * 8);
928                         for (unsigned yy = 0; yy < 8; ++yy) {
929                                 yptr[yy] = tmp_y + yy * width;
930                                 cbptr[yy] = tmp_cb + yy * width / 2;
931                                 crptr[yy] = tmp_cr + yy * width / 2;
932                         }
933                         jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
934                 }
935         } else {
936                 assert(userdata->pixel_format == PixelFormat_8BitYCbCrPlanar);
937
938                 const movit::YCbCrFormat &ycbcr = userdata->ycbcr_format;
939                 init_jpeg(width, height, qf.white_balance, &dest, &cinfo, ycbcr.chroma_subsampling_x, ycbcr.chroma_subsampling_y);
940                 assert(ycbcr.chroma_subsampling_y <= 2);  // Or we'd need larger JSAMPROW arrays below.
941
942                 size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
943                 const uint8_t *y_start = qf.frame->data + qf.video_format.width * field_start_line;
944                 const uint8_t *cb_start = y_start + width * height;
945                 const uint8_t *cr_start = cb_start + (width / ycbcr.chroma_subsampling_x) * (height / ycbcr.chroma_subsampling_y);
946
947                 size_t block_height_y = 8 * ycbcr.chroma_subsampling_y;
948                 size_t block_height_cbcr = 8;
949
950                 JSAMPROW yptr[16], cbptr[16], crptr[16];
951                 JSAMPARRAY data[3] = { yptr, cbptr, crptr };
952                 for (unsigned y = 0; y < qf.video_format.height; y += block_height_y) {
953                         for (unsigned yy = 0; yy < block_height_y; ++yy) {
954                                 yptr[yy] = const_cast<JSAMPROW>(y_start) + (y + yy) * width;
955                         }
956                         unsigned cbcr_y = y / ycbcr.chroma_subsampling_y;
957                         for (unsigned yy = 0; yy < block_height_cbcr; ++yy) {
958                                 cbptr[yy] = const_cast<JSAMPROW>(cb_start) + (cbcr_y + yy) * width / ycbcr.chroma_subsampling_x;
959                                 crptr[yy] = const_cast<JSAMPROW>(cr_start) + (cbcr_y + yy) * width / ycbcr.chroma_subsampling_x;
960                         }
961                         jpeg_write_raw_data(&cinfo, data, block_height_y);
962                 }
963         }
964         jpeg_finish_compress(&cinfo);
965
966         return dest.dest;
967 }
968
969 void MJPEGEncoder::add_stream(HTTPD::StreamID stream_id)
970 {
971         AVFormatContextWithCloser avctx;
972
973         // Set up the mux. We don't use the Mux wrapper, because it's geared towards
974         // a situation with only one video stream (and possibly one audio stream)
975         // with known width/height, and we don't need the extra functionality it provides.
976         avctx.reset(avformat_alloc_context());
977         avctx->oformat = av_guess_format("nut", nullptr, nullptr);
978
979         uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
980         avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, &ffmpeg_contexts[stream_id], nullptr, nullptr, nullptr);
981         avctx->pb->write_data_type = &MJPEGEncoder::write_packet2_thunk;
982         avctx->flags = AVFMT_FLAG_CUSTOM_IO;
983
984         if (stream_id.type == HTTPD::MULTICAM_STREAM) {
985                 for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) {
986                         add_video_stream(avctx.get());
987                 }
988                 for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) {
989                         add_audio_stream(avctx.get());
990                 }
991         } else {
992                 assert(stream_id.type == HTTPD::SIPHON_STREAM);
993                 add_video_stream(avctx.get());
994                 add_audio_stream(avctx.get());
995         }
996         finalize_mux(avctx.get());
997
998         Stream s;
999         s.avctx = move(avctx);
1000         streams[stream_id] = move(s);
1001 }
1002
1003 void MJPEGEncoder::update_siphon_streams()
1004 {
1005         // Bring the list of streams into sync with what the clients need.
1006         for (auto it = streams.begin(); it != streams.end(); ) {
1007                 if (it->first.type != HTTPD::SIPHON_STREAM) {
1008                         ++it;
1009                         continue;
1010                 }
1011                 if (httpd->get_num_connected_siphon_clients(it->first.index) == 0) {
1012                         av_free(it->second.avctx->pb->buffer);
1013                         streams.erase(it++);
1014                 } else {
1015                         ++it;
1016                 }
1017         }
1018         for (unsigned stream_idx = 0; stream_idx < MAX_VIDEO_CARDS; ++stream_idx) {
1019                 HTTPD::StreamID stream_id{ HTTPD::SIPHON_STREAM, stream_idx };
1020                 if (streams.count(stream_id) == 0 && httpd->get_num_connected_siphon_clients(stream_idx) > 0) {
1021                         add_stream(stream_id);
1022                 }
1023         }
1024 }
1025
1026 void MJPEGEncoder::create_ffmpeg_context(HTTPD::StreamID stream_id)
1027 {
1028         ffmpeg_contexts.emplace(stream_id, WritePacket2Context{ this, stream_id });
1029 }