]> git.sesse.net Git - nageru/blob - nageru/mjpeg_encoder.cpp
Support MJPEG encoding of planar Y'CbCr sources.
[nageru] / nageru / mjpeg_encoder.cpp
1 #include "mjpeg_encoder.h"
2
3 #include <assert.h>
4 #include <jpeglib.h>
5 #include <unistd.h>
6 #if __SSE2__
7 #include <immintrin.h>
8 #endif
9 #include <list>
10
11 extern "C" {
12 #include <libavformat/avformat.h>
13 }
14
15 #include "defs.h"
16 #include "shared/ffmpeg_raii.h"
17 #include "flags.h"
18 #include "shared/httpd.h"
19 #include "shared/memcpy_interleaved.h"
20 #include "shared/metrics.h"
21 #include "pbo_frame_allocator.h"
22 #include "shared/timebase.h"
23 #include "va_display_with_cleanup.h"
24
25 #include <movit/colorspace_conversion_effect.h>
26
27 #include <va/va.h>
28 #include <va/va_drm.h>
29 #include <va/va_x11.h>
30
31 using namespace Eigen;
32 using namespace bmusb;
33 using namespace movit;
34 using namespace std;
35
36 static VAImageFormat uyvy_format, nv12_format;
37
38 extern void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height);
39
40 // The inverse of memcpy_interleaved(), with (slow) support for pitch.
41 void interleave_with_pitch(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, size_t src_width, size_t dst_pitch, size_t height)
42 {
43 #if __SSE2__
44         if (dst_pitch == src_width * 2 && (src_width * height) % 16 == 0) {
45                 __m128i *dptr = reinterpret_cast<__m128i *>(dst);
46                 const __m128i *sptr1 = reinterpret_cast<const __m128i *>(src1);
47                 const __m128i *sptr2 = reinterpret_cast<const __m128i *>(src2);
48                 for (size_t i = 0; i < src_width * height / 16; ++i) {
49                         __m128i data1 = _mm_loadu_si128(sptr1++);
50                         __m128i data2 = _mm_loadu_si128(sptr2++);
51                         _mm_storeu_si128(dptr++, _mm_unpacklo_epi8(data1, data2));
52                         _mm_storeu_si128(dptr++, _mm_unpackhi_epi8(data1, data2));
53                 }
54                 return;
55         }
56 #endif
57
58         for (size_t y = 0; y < height; ++y) {
59                 uint8_t *dptr = dst + y * dst_pitch;
60                 const uint8_t *sptr1 = src1 + y * src_width;
61                 const uint8_t *sptr2 = src2 + y * src_width;
62                 for (size_t x = 0; x < src_width; ++x) {
63                         *dptr++ = *sptr1++;
64                         *dptr++ = *sptr2++;
65                 }
66         }
67 }
68
69 // From libjpeg (although it's of course identical between implementations).
70 static const int jpeg_natural_order[DCTSIZE2] = {
71          0,  1,  8, 16,  9,  2,  3, 10,
72         17, 24, 32, 25, 18, 11,  4,  5,
73         12, 19, 26, 33, 40, 48, 41, 34,
74         27, 20, 13,  6,  7, 14, 21, 28,
75         35, 42, 49, 56, 57, 50, 43, 36,
76         29, 22, 15, 23, 30, 37, 44, 51,
77         58, 59, 52, 45, 38, 31, 39, 46,
78         53, 60, 61, 54, 47, 55, 62, 63,
79 };
80
81 struct VectorDestinationManager {
82         jpeg_destination_mgr pub;
83         std::vector<uint8_t> dest;
84
85         VectorDestinationManager()
86         {
87                 pub.init_destination = init_destination_thunk;
88                 pub.empty_output_buffer = empty_output_buffer_thunk;
89                 pub.term_destination = term_destination_thunk;
90         }
91
92         static void init_destination_thunk(j_compress_ptr ptr)
93         {
94                 ((VectorDestinationManager *)(ptr->dest))->init_destination();
95         }
96
97         inline void init_destination()
98         {
99                 make_room(0);
100         }
101
102         static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
103         {
104                 return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
105         }
106
107         inline bool empty_output_buffer()
108         {
109                 make_room(dest.size());  // Should ignore pub.free_in_buffer!
110                 return true;
111         }
112
113         inline void make_room(size_t bytes_used)
114         {
115                 dest.resize(bytes_used + 4096);
116                 dest.resize(dest.capacity());
117                 pub.next_output_byte = dest.data() + bytes_used;
118                 pub.free_in_buffer = dest.size() - bytes_used;
119         }
120
121         static void term_destination_thunk(j_compress_ptr ptr)
122         {
123                 ((VectorDestinationManager *)(ptr->dest))->term_destination();
124         }
125
126         inline void term_destination()
127         {
128                 dest.resize(dest.size() - pub.free_in_buffer);
129         }
130 };
131 static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
132
133 int MJPEGEncoder::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
134 {
135         WritePacket2Context *ctx = (WritePacket2Context *)opaque;
136         return ctx->mjpeg_encoder->write_packet2(ctx->stream_id, buf, buf_size, type, time);
137 }
138
139 int MJPEGEncoder::write_packet2(HTTPD::StreamID stream_id, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
140 {
141         string *mux_header = &streams[stream_id].mux_header;
142         if (type == AVIO_DATA_MARKER_HEADER) {
143                 mux_header->append((char *)buf, buf_size);
144                 httpd->set_header(stream_id, *mux_header);
145         } else {
146                 httpd->add_data(stream_id, (char *)buf, buf_size, /*keyframe=*/true, AV_NOPTS_VALUE, AVRational{ AV_TIME_BASE, 1 });
147         }
148         return buf_size;
149 }
150
151 namespace {
152
153 void add_video_stream(AVFormatContext *avctx)
154 {
155         AVStream *stream = avformat_new_stream(avctx, nullptr);
156         if (stream == nullptr) {
157                 fprintf(stderr, "avformat_new_stream() failed\n");
158                 abort();
159         }
160
161         // FFmpeg is very picky about having audio at 1/48000 timebase,
162         // no matter what we write. Even though we'd prefer our usual 1/120000,
163         // put the video on the same one, so that we can have locked audio.
164         stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
165         stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
166         stream->codecpar->codec_id = AV_CODEC_ID_MJPEG;
167
168         // Used for aspect ratio only. Can change without notice (the mux won't care).
169         stream->codecpar->width = global_flags.width;
170         stream->codecpar->height = global_flags.height;
171
172         // TODO: We could perhaps use the interpretation for each card here
173         // (or at least the command-line flags) instead of the defaults,
174         // but what would we do when they change?
175         stream->codecpar->color_primaries = AVCOL_PRI_BT709;
176         stream->codecpar->color_trc = AVCOL_TRC_IEC61966_2_1;
177         stream->codecpar->color_space = AVCOL_SPC_BT709;
178         stream->codecpar->color_range = AVCOL_RANGE_MPEG;
179         stream->codecpar->chroma_location = AVCHROMA_LOC_LEFT;
180         stream->codecpar->field_order = AV_FIELD_PROGRESSIVE;
181 }
182
183 void add_audio_stream(AVFormatContext *avctx)
184 {
185         AVStream *stream = avformat_new_stream(avctx, nullptr);
186         if (stream == nullptr) {
187                 fprintf(stderr, "avformat_new_stream() failed\n");
188                 abort();
189         }
190         stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
191         stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
192         stream->codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
193         stream->codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
194         stream->codecpar->channels = 2;
195         stream->codecpar->sample_rate = OUTPUT_FREQUENCY;
196 }
197
198 void finalize_mux(AVFormatContext *avctx)
199 {
200         AVDictionary *options = NULL;
201         vector<pair<string, string>> opts = MUX_OPTS;
202         for (pair<string, string> opt : opts) {
203                 av_dict_set(&options, opt.first.c_str(), opt.second.c_str(), 0);
204         }
205         if (avformat_write_header(avctx, &options) < 0) {
206                 fprintf(stderr, "avformat_write_header() failed\n");
207                 abort();
208         }
209 }
210
211 }  // namespace
212
213 MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display)
214         : httpd(httpd)
215 {
216         create_ffmpeg_context(HTTPD::StreamID{ HTTPD::MULTICAM_STREAM, 0 });
217         for (unsigned stream_idx = 0; stream_idx < MAX_VIDEO_CARDS; ++stream_idx) {
218                 create_ffmpeg_context(HTTPD::StreamID{ HTTPD::SIPHON_STREAM, stream_idx });
219         }
220
221         add_stream(HTTPD::StreamID{ HTTPD::MULTICAM_STREAM, 0 });
222
223         // Initialize VA-API.
224         string error;
225         va_dpy = try_open_va(va_display, &error, &config_id_422, &config_id_420);
226         if (va_dpy == nullptr) {
227                 fprintf(stderr, "Could not initialize VA-API for MJPEG encoding: %s. JPEGs will be encoded in software if needed.\n", error.c_str());
228         }
229
230         encoder_thread = thread(&MJPEGEncoder::encoder_thread_func, this);
231         if (va_dpy != nullptr) {
232                 va_receiver_thread = thread(&MJPEGEncoder::va_receiver_thread_func, this);
233         }
234
235         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "zero_size" }}, &metric_mjpeg_frames_zero_size_dropped);
236         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "interlaced" }}, &metric_mjpeg_frames_interlaced_dropped);
237         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "unsupported_pixel_format" }}, &metric_mjpeg_frames_unsupported_pixel_format_dropped);
238         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "oversized" }}, &metric_mjpeg_frames_oversized_dropped);
239         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "overrun" }}, &metric_mjpeg_overrun_dropped);
240         global_metrics.add("mjpeg_frames", {{ "status", "submitted" }}, &metric_mjpeg_overrun_submitted);
241
242         running = true;
243 }
244
245 MJPEGEncoder::~MJPEGEncoder()
246 {
247         for (auto &id_and_stream : streams) {
248                 av_free(id_and_stream.second.avctx->pb->buffer);
249         }
250
251         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "zero_size" }});
252         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "interlaced" }});
253         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "unsupported_pixel_format" }});
254         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "oversized" }});
255         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "overrun" }});
256         global_metrics.remove("mjpeg_frames", {{ "status", "submitted" }});
257 }
258
259 void MJPEGEncoder::stop()
260 {
261         if (!running) {
262                 return;
263         }
264         running = false;
265         should_quit = true;
266         any_frames_to_be_encoded.notify_all();
267         any_frames_encoding.notify_all();
268         encoder_thread.join();
269         if (va_dpy != nullptr) {
270                 va_receiver_thread.join();
271         }
272 }
273
274 unique_ptr<VADisplayWithCleanup> MJPEGEncoder::try_open_va(const string &va_display, string *error, VAConfigID *config_id_422, VAConfigID *config_id_420)
275 {
276         unique_ptr<VADisplayWithCleanup> va_dpy = va_open_display(va_display);
277         if (va_dpy == nullptr) {
278                 if (error) *error = "Opening VA display failed";
279                 return nullptr;
280         }
281         int major_ver, minor_ver;
282         VAStatus va_status = vaInitialize(va_dpy->va_dpy, &major_ver, &minor_ver);
283         if (va_status != VA_STATUS_SUCCESS) {
284                 char buf[256];
285                 snprintf(buf, sizeof(buf), "vaInitialize() failed with status %d\n", va_status);
286                 if (error != nullptr) *error = buf;
287                 return nullptr;
288         }
289
290         {
291                 VAConfigAttrib attr = { VAConfigAttribRTFormat, VA_RT_FORMAT_YUV422 };
292                 va_status = vaCreateConfig(va_dpy->va_dpy, VAProfileJPEGBaseline, VAEntrypointEncPicture,
293                         &attr, 1, config_id_422);
294                 if (va_status == VA_STATUS_ERROR_UNSUPPORTED_ENTRYPOINT) {
295                         if (error != nullptr) *error = "No 4:2:2 hardware support";
296                         return nullptr;
297                 } else if (va_status != VA_STATUS_SUCCESS) {
298                         char buf[256];
299                         snprintf(buf, sizeof(buf), "vaCreateConfig() for 4:2:2 failed with status %d\n", va_status);
300                         if (error != nullptr) *error = buf;
301                         return nullptr;
302                 }
303         }
304         {
305                 VAConfigAttrib attr = { VAConfigAttribRTFormat, VA_RT_FORMAT_YUV420 };
306                 va_status = vaCreateConfig(va_dpy->va_dpy, VAProfileJPEGBaseline, VAEntrypointEncPicture,
307                         &attr, 1, config_id_420);
308                 if (va_status == VA_STATUS_ERROR_UNSUPPORTED_ENTRYPOINT) {
309                         if (error != nullptr) *error = "No 4:2:0 hardware support";
310                         return nullptr;
311                 } else if (va_status != VA_STATUS_SUCCESS) {
312                         char buf[256];
313                         snprintf(buf, sizeof(buf), "vaCreateConfig() for 4:2:0 failed with status %d\n", va_status);
314                         if (error != nullptr) *error = buf;
315                         return nullptr;
316                 }
317         }
318
319         // TODO: Unify with the code in Futatabi.
320         int num_formats = vaMaxNumImageFormats(va_dpy->va_dpy);
321         assert(num_formats > 0);
322
323         unique_ptr<VAImageFormat[]> formats(new VAImageFormat[num_formats]);
324         va_status = vaQueryImageFormats(va_dpy->va_dpy, formats.get(), &num_formats);
325         if (va_status != VA_STATUS_SUCCESS) {
326                 char buf[256];
327                 snprintf(buf, sizeof(buf), "vaQueryImageFormats() failed with status %d\n", va_status);
328                 if (error != nullptr) *error = buf;
329                 return nullptr;
330         }
331
332         bool uyvy_found = false, nv12_found = false;
333         for (int i = 0; i < num_formats; ++i) {
334                 if (formats[i].fourcc == VA_FOURCC_UYVY) {
335                         memcpy(&uyvy_format, &formats[i], sizeof(VAImageFormat));
336                         uyvy_found = true;
337                 }
338                 if (formats[i].fourcc == VA_FOURCC_NV12) {
339                         memcpy(&nv12_format, &formats[i], sizeof(VAImageFormat));
340                         nv12_found = true;
341                 }
342         }
343         if (!uyvy_found) {
344                 if (error != nullptr) *error = "UYVY format not found";
345                 return nullptr;
346         }
347         if (!nv12_found) {
348                 if (error != nullptr) *error = "NV12 format not found";
349                 return nullptr;
350         }
351
352         return va_dpy;
353 }
354
355 namespace {
356
357 bool is_uyvy(RefCountedFrame frame)
358 {
359         PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata;
360         return userdata->pixel_format == PixelFormat_8BitYCbCr && frame->interleaved;
361 }
362
363 bool is_i420(RefCountedFrame frame)
364 {
365         PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata;
366         return userdata->pixel_format == PixelFormat_8BitYCbCrPlanar &&
367                 userdata->ycbcr_format.chroma_subsampling_x == 2 &&
368                 userdata->ycbcr_format.chroma_subsampling_y == 2;
369 }
370
371 }  // namespace
372
373 void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset, vector<int32_t> audio, const RGBTriplet &white_balance)
374 {
375         if (video_format.width == 0 || video_format.height == 0) {
376                 ++metric_mjpeg_frames_zero_size_dropped;
377                 return;
378         }
379         if (video_format.interlaced) {
380                 fprintf(stderr, "Card %u: Ignoring JPEG encoding for interlaced frame\n", card_index);
381                 ++metric_mjpeg_frames_interlaced_dropped;
382                 return;
383         }
384         if (!is_uyvy(frame) && !is_i420(frame)) {
385                 fprintf(stderr, "Card %u: Ignoring JPEG encoding for unsupported pixel format\n", card_index);
386                 ++metric_mjpeg_frames_unsupported_pixel_format_dropped;
387                 return;
388         }
389         if (video_format.width > 4096 || video_format.height > 4096) {
390                 fprintf(stderr, "Card %u: Ignoring JPEG encoding for oversized frame\n", card_index);
391                 ++metric_mjpeg_frames_oversized_dropped;
392                 return;
393         }
394
395         lock_guard<mutex> lock(mu);
396         if (frames_to_be_encoded.size() + frames_encoding.size() > 50) {
397                 fprintf(stderr, "WARNING: MJPEG encoding doesn't keep up, discarding frame.\n");
398                 ++metric_mjpeg_overrun_dropped;
399                 return;
400         }
401         ++metric_mjpeg_overrun_submitted;
402         frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset, move(audio), white_balance });
403         any_frames_to_be_encoded.notify_all();
404 }
405
406 bool MJPEGEncoder::should_encode_mjpeg_for_card(unsigned card_index)
407 {
408         // Only bother doing MJPEG encoding if there are any connected clients
409         // that want the stream.
410         if (httpd->get_num_connected_multicam_clients() == 0 &&
411             httpd->get_num_connected_siphon_clients(card_index) == 0) {
412                 return false;
413         }
414
415         auto it = global_flags.card_to_mjpeg_stream_export.find(card_index);
416         return (it != global_flags.card_to_mjpeg_stream_export.end());
417 }
418
419 void MJPEGEncoder::encoder_thread_func()
420 {
421         pthread_setname_np(pthread_self(), "MJPEG_Encode");
422         posix_memalign((void **)&tmp_y, 4096, 4096 * 8);
423         posix_memalign((void **)&tmp_cbcr, 4096, 4096 * 8);
424         posix_memalign((void **)&tmp_cb, 4096, 4096 * 8);
425         posix_memalign((void **)&tmp_cr, 4096, 4096 * 8);
426
427         for (;;) {
428                 QueuedFrame qf;
429                 {
430                         unique_lock<mutex> lock(mu);
431                         any_frames_to_be_encoded.wait(lock, [this] { return !frames_to_be_encoded.empty() || should_quit; });
432                         if (should_quit) break;
433                         qf = move(frames_to_be_encoded.front());
434                         frames_to_be_encoded.pop();
435                 }
436
437                 assert(global_flags.card_to_mjpeg_stream_export.count(qf.card_index));  // Or should_encode_mjpeg_for_card() would have returned false.
438                 int stream_index = global_flags.card_to_mjpeg_stream_export[qf.card_index];
439
440                 if (va_dpy != nullptr) {
441                         // Will call back in the receiver thread.
442                         encode_jpeg_va(move(qf));
443                 } else {
444                         update_siphon_streams();
445
446                         HTTPD::StreamID multicam_id{ HTTPD::MULTICAM_STREAM, 0 };
447                         HTTPD::StreamID siphon_id{ HTTPD::SIPHON_STREAM, qf.card_index };
448                         assert(streams.count(multicam_id));
449
450                         // Write audio before video, since Futatabi expects it.
451                         if (qf.audio.size() > 0) {
452                                 write_audio_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index + global_flags.card_to_mjpeg_stream_export.size(), qf.audio);
453                                 if (streams.count(siphon_id)) {
454                                         write_audio_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/1, qf.audio);
455                                 }
456                         }
457
458                         // Encode synchronously, in the same thread.
459                         vector<uint8_t> jpeg = encode_jpeg_libjpeg(qf);
460                         write_mjpeg_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index, jpeg.data(), jpeg.size());
461                         if (streams.count(siphon_id)) {
462                                 write_mjpeg_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/0, jpeg.data(), jpeg.size());
463                         }
464                 }
465         }
466
467         free(tmp_y);
468         free(tmp_cbcr);
469         free(tmp_cb);
470         free(tmp_cr);
471 }
472
473 void MJPEGEncoder::write_mjpeg_packet(AVFormatContext *avctx, int64_t pts, unsigned stream_index, const uint8_t *jpeg, size_t jpeg_size)
474 {
475         AVPacket pkt;
476         memset(&pkt, 0, sizeof(pkt));
477         pkt.buf = nullptr;
478         pkt.data = const_cast<uint8_t *>(jpeg);
479         pkt.size = jpeg_size;
480         pkt.stream_index = stream_index;
481         pkt.flags = AV_PKT_FLAG_KEY;
482         AVRational time_base = avctx->streams[pkt.stream_index]->time_base;
483         pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base);
484         pkt.duration = 0;
485
486         if (av_write_frame(avctx, &pkt) < 0) {
487                 fprintf(stderr, "av_write_frame() failed\n");
488                 abort();
489         }
490 }
491
492 void MJPEGEncoder::write_audio_packet(AVFormatContext *avctx, int64_t pts, unsigned stream_index, const vector<int32_t> &audio)
493 {
494         AVPacket pkt;
495         memset(&pkt, 0, sizeof(pkt));
496         pkt.buf = nullptr;
497         pkt.data = reinterpret_cast<uint8_t *>(const_cast<int32_t *>(&audio[0]));
498         pkt.size = audio.size() * sizeof(audio[0]);
499         pkt.stream_index = stream_index;
500         pkt.flags = AV_PKT_FLAG_KEY;
501         AVRational time_base = avctx->streams[pkt.stream_index]->time_base;
502         pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base);
503         size_t num_stereo_samples = audio.size() / 2;
504         pkt.duration = av_rescale_q(num_stereo_samples, AVRational{ 1, OUTPUT_FREQUENCY }, time_base);
505
506         if (av_write_frame(avctx, &pkt) < 0) {
507                 fprintf(stderr, "av_write_frame() failed\n");
508                 abort();
509         }
510 }
511
512 class VABufferDestroyer {
513 public:
514         VABufferDestroyer(VADisplay dpy, VABufferID buf)
515                 : dpy(dpy), buf(buf) {}
516
517         ~VABufferDestroyer() {
518                 VAStatus va_status = vaDestroyBuffer(dpy, buf);
519                 CHECK_VASTATUS(va_status, "vaDestroyBuffer");
520         }
521
522 private:
523         VADisplay dpy;
524         VABufferID buf;
525 };
526
527 MJPEGEncoder::VAResources MJPEGEncoder::get_va_resources(unsigned width, unsigned height, uint32_t fourcc)
528 {
529         {
530                 lock_guard<mutex> lock(va_resources_mutex);
531                 for (auto it = va_resources_freelist.begin(); it != va_resources_freelist.end(); ++it) {
532                         if (it->width == width && it->height == height && it->fourcc == fourcc) {
533                                 VAResources ret = *it;
534                                 va_resources_freelist.erase(it);
535                                 return ret;
536                         }
537                 }
538         }
539
540         VAResources ret;
541
542         ret.width = width;
543         ret.height = height;
544         ret.fourcc = fourcc;
545
546         VASurfaceAttrib attrib;
547         attrib.flags = VA_SURFACE_ATTRIB_SETTABLE;
548         attrib.type = VASurfaceAttribPixelFormat;
549         attrib.value.type = VAGenericValueTypeInteger;
550         attrib.value.value.i = fourcc;
551
552         VAStatus va_status;
553         VAConfigID config_id;
554         if (fourcc == VA_FOURCC_UYVY) {
555                 va_status = vaCreateSurfaces(va_dpy->va_dpy, VA_RT_FORMAT_YUV422, width, height, &ret.surface, 1, &attrib, 1);
556                 config_id = config_id_422;
557         } else {
558                 assert(fourcc == VA_FOURCC_NV12);
559                 va_status = vaCreateSurfaces(va_dpy->va_dpy, VA_RT_FORMAT_YUV420, width, height, &ret.surface, 1, &attrib, 1);
560                 config_id = config_id_420;
561         }
562
563         va_status = vaCreateContext(va_dpy->va_dpy, config_id, width, height, 0, &ret.surface, 1, &ret.context);
564         CHECK_VASTATUS(va_status, "vaCreateContext");
565
566         va_status = vaCreateBuffer(va_dpy->va_dpy, ret.context, VAEncCodedBufferType, width * height * 3 + 8192, 1, nullptr, &ret.data_buffer);
567         CHECK_VASTATUS(va_status, "vaCreateBuffer");
568
569         if (fourcc == VA_FOURCC_UYVY) {
570                 va_status = vaCreateImage(va_dpy->va_dpy, &uyvy_format, width, height, &ret.image);
571                 CHECK_VASTATUS(va_status, "vaCreateImage");
572         } else {
573                 assert(fourcc == VA_FOURCC_NV12);
574                 va_status = vaCreateImage(va_dpy->va_dpy, &nv12_format, width, height, &ret.image);
575                 CHECK_VASTATUS(va_status, "vaCreateImage");
576         }
577
578         return ret;
579 }
580
581 void MJPEGEncoder::release_va_resources(MJPEGEncoder::VAResources resources)
582 {
583         lock_guard<mutex> lock(va_resources_mutex);
584         if (va_resources_freelist.size() > 50) {
585                 auto it = va_resources_freelist.end();
586                 --it;
587
588                 VAStatus va_status = vaDestroyBuffer(va_dpy->va_dpy, it->data_buffer);
589                 CHECK_VASTATUS(va_status, "vaDestroyBuffer");
590
591                 va_status = vaDestroyContext(va_dpy->va_dpy, it->context);
592                 CHECK_VASTATUS(va_status, "vaDestroyContext");
593
594                 va_status = vaDestroySurfaces(va_dpy->va_dpy, &it->surface, 1);
595                 CHECK_VASTATUS(va_status, "vaDestroySurfaces");
596
597                 va_status = vaDestroyImage(va_dpy->va_dpy, it->image.image_id);
598                 CHECK_VASTATUS(va_status, "vaDestroyImage");
599
600                 va_resources_freelist.erase(it);
601         }
602
603         va_resources_freelist.push_front(resources);
604 }
605
606 namespace {
607
608 void push16(uint16_t val, string *str)
609 {
610         str->push_back(val >> 8);
611         str->push_back(val & 0xff);
612 }
613
614 void push32(uint32_t val, string *str)
615 {
616         str->push_back(val >> 24);
617         str->push_back((val >> 16) & 0xff);
618         str->push_back((val >> 8) & 0xff);
619         str->push_back(val & 0xff);
620 }
621
622 }  // namespace
623
624 void MJPEGEncoder::init_jpeg(unsigned width, unsigned height, const RGBTriplet &white_balance, VectorDestinationManager *dest, jpeg_compress_struct *cinfo, int y_h_samp_factor, int y_v_samp_factor)
625 {
626         jpeg_error_mgr jerr;
627         cinfo->err = jpeg_std_error(&jerr);
628         jpeg_create_compress(cinfo);
629
630         cinfo->dest = (jpeg_destination_mgr *)dest;
631
632         cinfo->input_components = 3;
633         jpeg_set_defaults(cinfo);
634         jpeg_set_quality(cinfo, quality, /*force_baseline=*/false);
635
636         cinfo->image_width = width;
637         cinfo->image_height = height;
638         cinfo->raw_data_in = true;
639         jpeg_set_colorspace(cinfo, JCS_YCbCr);
640         cinfo->comp_info[0].h_samp_factor = y_h_samp_factor;
641         cinfo->comp_info[0].v_samp_factor = y_v_samp_factor;
642         cinfo->comp_info[1].h_samp_factor = 1;
643         cinfo->comp_info[1].v_samp_factor = 1;
644         cinfo->comp_info[2].h_samp_factor = 1;
645         cinfo->comp_info[2].v_samp_factor = 1;
646         cinfo->CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
647         jpeg_start_compress(cinfo, true);
648
649         if (fabs(white_balance.r - 1.0f) > 1e-3 ||
650             fabs(white_balance.g - 1.0f) > 1e-3 ||
651             fabs(white_balance.b - 1.0f) > 1e-3) {
652                 // Convert from (linear) RGB to XYZ.
653                 Matrix3d rgb_to_xyz_matrix = movit::ColorspaceConversionEffect::get_xyz_matrix(COLORSPACE_sRGB);
654                 Vector3d xyz = rgb_to_xyz_matrix * Vector3d(white_balance.r, white_balance.g, white_balance.b);
655
656                 // Convert from XYZ to xyz by normalizing.
657                 xyz /= (xyz[0] + xyz[1] + xyz[2]);
658
659                 // Create a very rudimentary EXIF header to hold our white point.
660                 string exif;
661
662                 // Exif header, followed by some padding.
663                 exif = "Exif";
664                 push16(0, &exif);
665
666                 // TIFF header first:
667                 exif += "MM";  // Big endian.
668
669                 // Magic number.
670                 push16(42, &exif);
671
672                 // Offset of first IFD (relative to the MM, immediately after the header).
673                 push32(exif.size() - 6 + 4, &exif);
674
675                 // Now the actual IFD.
676
677                 // One entry.
678                 push16(1, &exif);
679
680                 // WhitePoint tag ID.
681                 push16(0x13e, &exif);
682
683                 // Rational type.
684                 push16(5, &exif);
685
686                 // Two values (x and y; z is implicit due to normalization).
687                 push32(2, &exif);
688
689                 // Offset (relative to the MM, immediately after the last IFD).
690                 push32(exif.size() - 6 + 8, &exif);
691
692                 // No more IFDs.
693                 push32(0, &exif);
694
695                 // The actual values.
696                 push32(lrintf(xyz[0] * 10000.0f), &exif);
697                 push32(10000, &exif);
698                 push32(lrintf(xyz[1] * 10000.0f), &exif);
699                 push32(10000, &exif);
700
701                 jpeg_write_marker(cinfo, JPEG_APP0 + 1, (const JOCTET *)exif.data(), exif.size());
702         }
703
704         // This comment marker is private to FFmpeg. It signals limited Y'CbCr range
705         // (and nothing else).
706         jpeg_write_marker(cinfo, JPEG_COM, (const JOCTET *)"CS=ITU601", strlen("CS=ITU601"));
707 }
708
709 vector<uint8_t> MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, const RGBTriplet &white_balance, int y_h_samp_factor, int y_v_samp_factor, jpeg_compress_struct *cinfo)
710 {
711         VectorDestinationManager dest;
712         init_jpeg(width, height, white_balance, &dest, cinfo, y_h_samp_factor, y_v_samp_factor);
713
714         // Make a dummy black image; there's seemingly no other easy way of
715         // making libjpeg outputting all of its headers.
716         assert(y_v_samp_factor <= 2);  // Or we'd need larger JSAMPROW arrays below.
717         size_t block_height_y = 8 * y_v_samp_factor;
718         size_t block_height_cbcr = 8;
719
720         JSAMPROW yptr[16], cbptr[16], crptr[16];
721         JSAMPARRAY data[3] = { yptr, cbptr, crptr };
722         memset(tmp_y, 0, 4096);
723         memset(tmp_cb, 0, 4096);
724         memset(tmp_cr, 0, 4096);
725         for (unsigned yy = 0; yy < block_height_y; ++yy) {
726                 yptr[yy] = tmp_y;
727         }
728         for (unsigned yy = 0; yy < block_height_cbcr; ++yy) {
729                 cbptr[yy] = tmp_cb;
730                 crptr[yy] = tmp_cr;
731         }
732         for (unsigned y = 0; y < height; y += block_height_y) {
733                 jpeg_write_raw_data(cinfo, data, block_height_y);
734         }
735         jpeg_finish_compress(cinfo);
736
737         // We're only interested in the header, not the data after it.
738         dest.term_destination();
739         for (size_t i = 0; i < dest.dest.size() - 1; ++i) {
740                 if (dest.dest[i] == 0xff && dest.dest[i + 1] == 0xda) {  // Start of scan (SOS).
741                         unsigned len = dest.dest[i + 2] * 256 + dest.dest[i + 3];
742                         dest.dest.resize(i + len + 2);
743                         break;
744                 }
745         }
746
747         return dest.dest;
748 }
749
750 MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_parameters(unsigned width, unsigned height, unsigned y_h_samp_factor, unsigned y_v_samp_factor, const RGBTriplet &white_balance)
751 {
752         VAKey key{width, height, y_h_samp_factor, y_v_samp_factor, white_balance};
753         if (va_data_for_parameters.count(key)) {
754                 return va_data_for_parameters[key];
755         }
756
757         // Use libjpeg to generate a header and set sane defaults for e.g.
758         // quantization tables. Then do the actual encode with VA-API.
759         jpeg_compress_struct cinfo;
760         vector<uint8_t> jpeg_header = get_jpeg_header(width, height, white_balance, y_h_samp_factor, y_v_samp_factor, &cinfo);
761
762         // Picture parameters.
763         VAEncPictureParameterBufferJPEG pic_param;
764         memset(&pic_param, 0, sizeof(pic_param));
765         pic_param.reconstructed_picture = VA_INVALID_ID;
766         pic_param.picture_width = cinfo.image_width;
767         pic_param.picture_height = cinfo.image_height;
768         for (int component_idx = 0; component_idx < cinfo.num_components; ++component_idx) {
769                 const jpeg_component_info *comp = &cinfo.comp_info[component_idx];
770                 pic_param.component_id[component_idx] = comp->component_id;
771                 pic_param.quantiser_table_selector[component_idx] = comp->quant_tbl_no;
772         }
773         pic_param.num_components = cinfo.num_components;
774         pic_param.num_scan = 1;
775         pic_param.sample_bit_depth = 8;
776         pic_param.coded_buf = VA_INVALID_ID;  // To be filled out by caller.
777         pic_param.pic_flags.bits.huffman = 1;
778         pic_param.quality = 50;  // Don't scale the given quantization matrices. (See gen8_mfc_jpeg_fqm_state)
779
780         // Quantization matrices.
781         VAQMatrixBufferJPEG q;
782         memset(&q, 0, sizeof(q));
783
784         q.load_lum_quantiser_matrix = true;
785         q.load_chroma_quantiser_matrix = true;
786         for (int quant_tbl_idx = 0; quant_tbl_idx < min(4, NUM_QUANT_TBLS); ++quant_tbl_idx) {
787                 const JQUANT_TBL *qtbl = cinfo.quant_tbl_ptrs[quant_tbl_idx];
788                 assert((qtbl == nullptr) == (quant_tbl_idx >= 2));
789                 if (qtbl == nullptr) continue;
790
791                 uint8_t *qmatrix = (quant_tbl_idx == 0) ? q.lum_quantiser_matrix : q.chroma_quantiser_matrix;
792                 for (int i = 0; i < 64; ++i) {
793                         if (qtbl->quantval[i] > 255) {
794                                 fprintf(stderr, "Baseline JPEG only!\n");
795                                 abort();
796                         }
797                         qmatrix[i] = qtbl->quantval[jpeg_natural_order[i]];
798                 }
799         }
800
801         // Huffman tables (arithmetic is not supported).
802         VAHuffmanTableBufferJPEGBaseline huff;
803         memset(&huff, 0, sizeof(huff));
804
805         for (int huff_tbl_idx = 0; huff_tbl_idx < min(2, NUM_HUFF_TBLS); ++huff_tbl_idx) {
806                 const JHUFF_TBL *ac_hufftbl = cinfo.ac_huff_tbl_ptrs[huff_tbl_idx];
807                 const JHUFF_TBL *dc_hufftbl = cinfo.dc_huff_tbl_ptrs[huff_tbl_idx];
808                 if (ac_hufftbl == nullptr) {
809                         assert(dc_hufftbl == nullptr);
810                         huff.load_huffman_table[huff_tbl_idx] = 0;
811                 } else {
812                         assert(dc_hufftbl != nullptr);
813                         huff.load_huffman_table[huff_tbl_idx] = 1;
814
815                         for (int i = 0; i < 16; ++i) {
816                                 huff.huffman_table[huff_tbl_idx].num_dc_codes[i] = dc_hufftbl->bits[i + 1];
817                         }
818                         for (int i = 0; i < 12; ++i) {
819                                 huff.huffman_table[huff_tbl_idx].dc_values[i] = dc_hufftbl->huffval[i];
820                         }
821                         for (int i = 0; i < 16; ++i) {
822                                 huff.huffman_table[huff_tbl_idx].num_ac_codes[i] = ac_hufftbl->bits[i + 1];
823                         }
824                         for (int i = 0; i < 162; ++i) {
825                                 huff.huffman_table[huff_tbl_idx].ac_values[i] = ac_hufftbl->huffval[i];
826                         }
827                 }
828         }
829
830         // Slice parameters (metadata about the slice).
831         VAEncSliceParameterBufferJPEG parms;
832         memset(&parms, 0, sizeof(parms));
833         for (int component_idx = 0; component_idx < cinfo.num_components; ++component_idx) {
834                 const jpeg_component_info *comp = &cinfo.comp_info[component_idx];
835                 parms.components[component_idx].component_selector = comp->component_id;
836                 parms.components[component_idx].dc_table_selector = comp->dc_tbl_no;
837                 parms.components[component_idx].ac_table_selector = comp->ac_tbl_no;
838                 if (parms.components[component_idx].dc_table_selector > 1 ||
839                     parms.components[component_idx].ac_table_selector > 1) {
840                         fprintf(stderr, "Uses too many Huffman tables\n");
841                         abort();
842                 }
843         }
844         parms.num_components = cinfo.num_components;
845         parms.restart_interval = cinfo.restart_interval;
846
847         jpeg_destroy_compress(&cinfo);
848
849         VAData ret;
850         ret.jpeg_header = move(jpeg_header);
851         ret.pic_param = pic_param;
852         ret.q = q;
853         ret.huff = huff;
854         ret.parms = parms;
855         va_data_for_parameters[key] = ret;
856         return ret;
857 }
858
859 void MJPEGEncoder::encode_jpeg_va(QueuedFrame &&qf)
860 {
861         PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)qf.frame->userdata;
862         unsigned width = qf.video_format.width;
863         unsigned height = qf.video_format.height;
864
865         VAResources resources;
866         ReleaseVAResources release;
867         if (userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_VA_API) {
868                 assert(is_uyvy(qf.frame));
869                 resources = move(userdata->va_resources);
870                 release = move(userdata->va_resources_release);
871         } else {
872                 assert(userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_MALLOC);
873                 if (is_uyvy(qf.frame)) {
874                         resources = get_va_resources(width, height, VA_FOURCC_UYVY);
875                 } else {
876                         assert(is_i420(qf.frame));
877                         // We'd prefer VA_FOURCC_I420, but it's not supported by Intel's driver.
878                         resources = get_va_resources(width, height, VA_FOURCC_NV12);
879                 }
880                 release = ReleaseVAResources(this, resources);
881         }
882
883         int y_h_samp_factor, y_v_samp_factor;
884         if (is_uyvy(qf.frame)) {
885                 // 4:2:2 (sample Y' twice as often vertically as Cb or Cr).
886                 y_h_samp_factor = 1;
887                 y_v_samp_factor = 2;
888         } else {
889                 // 4:2:0 (sample Y' twice as often as Cb or Cr, in both directions)
890                 assert(is_i420(qf.frame));
891                 y_h_samp_factor = 2;
892                 y_v_samp_factor = 2;
893         }
894
895         VAData va_data = get_va_data_for_parameters(width, height, y_h_samp_factor, y_v_samp_factor, qf.white_balance);
896         va_data.pic_param.coded_buf = resources.data_buffer;
897
898         VABufferID pic_param_buffer;
899         VAStatus va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAEncPictureParameterBufferType, sizeof(va_data.pic_param), 1, &va_data.pic_param, &pic_param_buffer);
900         CHECK_VASTATUS(va_status, "vaCreateBuffer");
901         VABufferDestroyer destroy_pic_param(va_dpy->va_dpy, pic_param_buffer);
902
903         VABufferID q_buffer;
904         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAQMatrixBufferType, sizeof(va_data.q), 1, &va_data.q, &q_buffer);
905         CHECK_VASTATUS(va_status, "vaCreateBuffer");
906         VABufferDestroyer destroy_iq(va_dpy->va_dpy, q_buffer);
907
908         VABufferID huff_buffer;
909         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAHuffmanTableBufferType, sizeof(va_data.huff), 1, &va_data.huff, &huff_buffer);
910         CHECK_VASTATUS(va_status, "vaCreateBuffer");
911         VABufferDestroyer destroy_huff(va_dpy->va_dpy, huff_buffer);
912
913         VABufferID slice_param_buffer;
914         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAEncSliceParameterBufferType, sizeof(va_data.parms), 1, &va_data.parms, &slice_param_buffer);
915         CHECK_VASTATUS(va_status, "vaCreateBuffer");
916         VABufferDestroyer destroy_slice_param(va_dpy->va_dpy, slice_param_buffer);
917
918         if (userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_VA_API) {
919                 // The pixel data is already put into the image by the caller.
920                 va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf);
921                 CHECK_VASTATUS(va_status, "vaUnmapBuffer");
922         } else {
923                 assert(userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_MALLOC);
924
925                 // Upload the pixel data.
926                 uint8_t *surface_p = nullptr;
927                 vaMapBuffer(va_dpy->va_dpy, resources.image.buf, (void **)&surface_p);
928
929                 if (is_uyvy(qf.frame)) {
930                         size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
931                         size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
932
933                         const uint8_t *src = qf.frame->data_copy + field_start;
934                         uint8_t *dst = (unsigned char *)surface_p + resources.image.offsets[0];
935                         memcpy_with_pitch(dst, src, qf.video_format.width * 2, resources.image.pitches[0], qf.video_format.height);
936                 } else {
937                         assert(is_i420(qf.frame));
938                         assert(!qf.frame->interleaved);  // Makes no sense for I420.
939
940                         size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
941                         const uint8_t *y_src = qf.frame->data + qf.video_format.width * field_start_line;
942                         const uint8_t *cb_src = y_src + width * height;
943                         const uint8_t *cr_src = cb_src + (width / 2) * (height / 2);
944
945                         uint8_t *y_dst = (unsigned char *)surface_p + resources.image.offsets[0];
946                         uint8_t *cbcr_dst = (unsigned char *)surface_p + resources.image.offsets[1];
947
948                         memcpy_with_pitch(y_dst, y_src, qf.video_format.width, resources.image.pitches[0], qf.video_format.height);
949                         interleave_with_pitch(cbcr_dst, cb_src, cr_src, qf.video_format.width / 2, resources.image.pitches[1], qf.video_format.height / 2);
950                 }
951
952                 va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf);
953                 CHECK_VASTATUS(va_status, "vaUnmapBuffer");
954         }
955
956         qf.frame->data_copy = nullptr;
957
958         // Seemingly vaPutImage() (which triggers a GPU copy) is much nicer to the
959         // CPU than vaDeriveImage() and copying directly into the GPU's buffers.
960         // Exactly why is unclear, but it seems to involve L3 cache usage when there
961         // are many high-res (1080p+) images in play.
962         va_status = vaPutImage(va_dpy->va_dpy, resources.surface, resources.image.image_id, 0, 0, width, height, 0, 0, width, height);
963         CHECK_VASTATUS(va_status, "vaPutImage");
964
965         // Finally, stick in the JPEG header.
966         VAEncPackedHeaderParameterBuffer header_parm;
967         header_parm.type = VAEncPackedHeaderRawData;
968         header_parm.bit_length = 8 * va_data.jpeg_header.size();
969
970         VABufferID header_parm_buffer;
971         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAEncPackedHeaderParameterBufferType, sizeof(header_parm), 1, &header_parm, &header_parm_buffer);
972         CHECK_VASTATUS(va_status, "vaCreateBuffer");
973         VABufferDestroyer destroy_header(va_dpy->va_dpy, header_parm_buffer);
974
975         VABufferID header_data_buffer;
976         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAEncPackedHeaderDataBufferType, va_data.jpeg_header.size(), 1, va_data.jpeg_header.data(), &header_data_buffer);
977         CHECK_VASTATUS(va_status, "vaCreateBuffer");
978         VABufferDestroyer destroy_header_data(va_dpy->va_dpy, header_data_buffer);
979
980         va_status = vaBeginPicture(va_dpy->va_dpy, resources.context, resources.surface);
981         CHECK_VASTATUS(va_status, "vaBeginPicture");
982         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &pic_param_buffer, 1);
983         CHECK_VASTATUS(va_status, "vaRenderPicture(pic_param)");
984         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &q_buffer, 1);
985         CHECK_VASTATUS(va_status, "vaRenderPicture(q)");
986         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &huff_buffer, 1);
987         CHECK_VASTATUS(va_status, "vaRenderPicture(huff)");
988         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &slice_param_buffer, 1);
989         CHECK_VASTATUS(va_status, "vaRenderPicture(slice_param)");
990         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &header_parm_buffer, 1);
991         CHECK_VASTATUS(va_status, "vaRenderPicture(header_parm)");
992         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &header_data_buffer, 1);
993         CHECK_VASTATUS(va_status, "vaRenderPicture(header_data)");
994         va_status = vaEndPicture(va_dpy->va_dpy, resources.context);
995         CHECK_VASTATUS(va_status, "vaEndPicture");
996
997         qf.resources = move(resources);
998         qf.resource_releaser = move(release);
999
1000         lock_guard<mutex> lock(mu);
1001         frames_encoding.push(move(qf));
1002         any_frames_encoding.notify_all();
1003 }
1004
1005 void MJPEGEncoder::va_receiver_thread_func()
1006 {
1007         pthread_setname_np(pthread_self(), "MJPEG_Receive");
1008         for (;;) {
1009                 QueuedFrame qf;
1010                 {
1011                         unique_lock<mutex> lock(mu);
1012                         any_frames_encoding.wait(lock, [this] { return !frames_encoding.empty() || should_quit; });
1013                         if (should_quit) return;
1014                         qf = move(frames_encoding.front());
1015                         frames_encoding.pop();
1016                 }
1017
1018                 update_siphon_streams();
1019
1020                 assert(global_flags.card_to_mjpeg_stream_export.count(qf.card_index));  // Or should_encode_mjpeg_for_card() would have returned false.
1021                 int stream_index = global_flags.card_to_mjpeg_stream_export[qf.card_index];
1022
1023                 HTTPD::StreamID multicam_id{ HTTPD::MULTICAM_STREAM, 0 };
1024                 HTTPD::StreamID siphon_id{ HTTPD::SIPHON_STREAM, qf.card_index };
1025                 assert(streams.count(multicam_id));
1026                 assert(streams[multicam_id].avctx != nullptr);
1027
1028                 // Write audio before video, since Futatabi expects it.
1029                 if (qf.audio.size() > 0) {
1030                         write_audio_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index + global_flags.card_to_mjpeg_stream_export.size(), qf.audio);
1031                         if (streams.count(siphon_id)) {
1032                                 write_audio_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/1, qf.audio);
1033                         }
1034                 }
1035
1036                 VAStatus va_status = vaSyncSurface(va_dpy->va_dpy, qf.resources.surface);
1037                 CHECK_VASTATUS(va_status, "vaSyncSurface");
1038
1039                 VACodedBufferSegment *segment;
1040                 va_status = vaMapBuffer(va_dpy->va_dpy, qf.resources.data_buffer, (void **)&segment);
1041                 CHECK_VASTATUS(va_status, "vaMapBuffer");
1042
1043                 const uint8_t *coded_buf = reinterpret_cast<uint8_t *>(segment->buf);
1044                 write_mjpeg_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index, coded_buf, segment->size);
1045                 if (streams.count(siphon_id)) {
1046                         write_mjpeg_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/0, coded_buf, segment->size);
1047                 }
1048
1049                 va_status = vaUnmapBuffer(va_dpy->va_dpy, qf.resources.data_buffer);
1050                 CHECK_VASTATUS(va_status, "vaUnmapBuffer");
1051         }
1052 }
1053
1054 vector<uint8_t> MJPEGEncoder::encode_jpeg_libjpeg(const QueuedFrame &qf)
1055 {
1056         unsigned width = qf.video_format.width;
1057         unsigned height = qf.video_format.height;
1058
1059         VectorDestinationManager dest;
1060         jpeg_compress_struct cinfo;
1061
1062         size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
1063
1064         PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)qf.frame->userdata;
1065         if (userdata->pixel_format == PixelFormat_8BitYCbCr) {
1066                 init_jpeg(width, height, qf.white_balance, &dest, &cinfo, /*y_h_samp_factor=*/2, /*y_v_samp_factor=*/1);
1067
1068                 assert(qf.frame->interleaved);
1069                 size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
1070
1071                 JSAMPROW yptr[8], cbptr[8], crptr[8];
1072                 JSAMPARRAY data[3] = { yptr, cbptr, crptr };
1073                 for (unsigned y = 0; y < qf.video_format.height; y += 8) {
1074                         const uint8_t *src;
1075                         src = qf.frame->data_copy + field_start + y * qf.video_format.width * 2;
1076
1077                         memcpy_interleaved(tmp_cbcr, tmp_y, src, qf.video_format.width * 8 * 2);
1078                         memcpy_interleaved(tmp_cb, tmp_cr, tmp_cbcr, qf.video_format.width * 8);
1079                         for (unsigned yy = 0; yy < 8; ++yy) {
1080                                 yptr[yy] = tmp_y + yy * width;
1081                                 cbptr[yy] = tmp_cb + yy * width / 2;
1082                                 crptr[yy] = tmp_cr + yy * width / 2;
1083                         }
1084                         jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
1085                 }
1086         } else {
1087                 assert(userdata->pixel_format == PixelFormat_8BitYCbCrPlanar);
1088
1089                 const movit::YCbCrFormat &ycbcr = userdata->ycbcr_format;
1090                 init_jpeg(width, height, qf.white_balance, &dest, &cinfo, ycbcr.chroma_subsampling_x, ycbcr.chroma_subsampling_y);
1091                 assert(ycbcr.chroma_subsampling_y <= 2);  // Or we'd need larger JSAMPROW arrays below.
1092
1093                 size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
1094                 const uint8_t *y_start = qf.frame->data + qf.video_format.width * field_start_line;
1095                 const uint8_t *cb_start = y_start + width * height;
1096                 const uint8_t *cr_start = cb_start + (width / ycbcr.chroma_subsampling_x) * (height / ycbcr.chroma_subsampling_y);
1097
1098                 size_t block_height_y = 8 * ycbcr.chroma_subsampling_y;
1099                 size_t block_height_cbcr = 8;
1100
1101                 JSAMPROW yptr[16], cbptr[16], crptr[16];
1102                 JSAMPARRAY data[3] = { yptr, cbptr, crptr };
1103                 for (unsigned y = 0; y < qf.video_format.height; y += block_height_y) {
1104                         for (unsigned yy = 0; yy < block_height_y; ++yy) {
1105                                 yptr[yy] = const_cast<JSAMPROW>(y_start) + (y + yy) * width;
1106                         }
1107                         unsigned cbcr_y = y / ycbcr.chroma_subsampling_y;
1108                         for (unsigned yy = 0; yy < block_height_cbcr; ++yy) {
1109                                 cbptr[yy] = const_cast<JSAMPROW>(cb_start) + (cbcr_y + yy) * width / ycbcr.chroma_subsampling_x;
1110                                 crptr[yy] = const_cast<JSAMPROW>(cr_start) + (cbcr_y + yy) * width / ycbcr.chroma_subsampling_x;
1111                         }
1112                         jpeg_write_raw_data(&cinfo, data, block_height_y);
1113                 }
1114         }
1115         jpeg_finish_compress(&cinfo);
1116
1117         return dest.dest;
1118 }
1119
1120 void MJPEGEncoder::add_stream(HTTPD::StreamID stream_id)
1121 {
1122         AVFormatContextWithCloser avctx;
1123
1124         // Set up the mux. We don't use the Mux wrapper, because it's geared towards
1125         // a situation with only one video stream (and possibly one audio stream)
1126         // with known width/height, and we don't need the extra functionality it provides.
1127         avctx.reset(avformat_alloc_context());
1128         avctx->oformat = av_guess_format("nut", nullptr, nullptr);
1129
1130         uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
1131         avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, &ffmpeg_contexts[stream_id], nullptr, nullptr, nullptr);
1132         avctx->pb->write_data_type = &MJPEGEncoder::write_packet2_thunk;
1133         avctx->flags = AVFMT_FLAG_CUSTOM_IO;
1134
1135         if (stream_id.type == HTTPD::MULTICAM_STREAM) {
1136                 for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) {
1137                         add_video_stream(avctx.get());
1138                 }
1139                 for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) {
1140                         add_audio_stream(avctx.get());
1141                 }
1142         } else {
1143                 assert(stream_id.type == HTTPD::SIPHON_STREAM);
1144                 add_video_stream(avctx.get());
1145                 add_audio_stream(avctx.get());
1146         }
1147         finalize_mux(avctx.get());
1148
1149         Stream s;
1150         s.avctx = move(avctx);
1151         streams[stream_id] = move(s);
1152 }
1153
1154 void MJPEGEncoder::update_siphon_streams()
1155 {
1156         // Bring the list of streams into sync with what the clients need.
1157         for (auto it = streams.begin(); it != streams.end(); ) {
1158                 if (it->first.type != HTTPD::SIPHON_STREAM) {
1159                         ++it;
1160                         continue;
1161                 }
1162                 if (httpd->get_num_connected_siphon_clients(it->first.index) == 0) {
1163                         av_free(it->second.avctx->pb->buffer);
1164                         streams.erase(it++);
1165                 } else {
1166                         ++it;
1167                 }
1168         }
1169         for (unsigned stream_idx = 0; stream_idx < MAX_VIDEO_CARDS; ++stream_idx) {
1170                 HTTPD::StreamID stream_id{ HTTPD::SIPHON_STREAM, stream_idx };
1171                 if (streams.count(stream_id) == 0 && httpd->get_num_connected_siphon_clients(stream_idx) > 0) {
1172                         add_stream(stream_id);
1173                 }
1174         }
1175 }
1176
1177 void MJPEGEncoder::create_ffmpeg_context(HTTPD::StreamID stream_id)
1178 {
1179         ffmpeg_contexts.emplace(stream_id, WritePacket2Context{ this, stream_id });
1180 }