]> git.sesse.net Git - nageru/blob - nageru/mjpeg_encoder.cpp
Give in the right VAAPI context when encoding MJPEG.
[nageru] / nageru / mjpeg_encoder.cpp
1 #include "mjpeg_encoder.h"
2
3 #include <assert.h>
4 #include <jpeglib.h>
5 #include <unistd.h>
6 #if __SSE2__
7 #include <immintrin.h>
8 #endif
9 #include <list>
10
11 extern "C" {
12 #include <libavformat/avformat.h>
13 }
14
15 #include "defs.h"
16 #include "shared/ffmpeg_raii.h"
17 #include "flags.h"
18 #include "shared/httpd.h"
19 #include "shared/memcpy_interleaved.h"
20 #include "shared/metrics.h"
21 #include "pbo_frame_allocator.h"
22 #include "shared/timebase.h"
23 #include "va_display_with_cleanup.h"
24
25 #include <movit/colorspace_conversion_effect.h>
26
27 #include <va/va.h>
28 #include <va/va_drm.h>
29 #include <va/va_x11.h>
30
31 using namespace Eigen;
32 using namespace bmusb;
33 using namespace movit;
34 using namespace std;
35
36 static VAImageFormat uyvy_format;
37
38 extern void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height);
39
40 // From libjpeg (although it's of course identical between implementations).
41 static const int jpeg_natural_order[DCTSIZE2] = {
42          0,  1,  8, 16,  9,  2,  3, 10,
43         17, 24, 32, 25, 18, 11,  4,  5,
44         12, 19, 26, 33, 40, 48, 41, 34,
45         27, 20, 13,  6,  7, 14, 21, 28,
46         35, 42, 49, 56, 57, 50, 43, 36,
47         29, 22, 15, 23, 30, 37, 44, 51,
48         58, 59, 52, 45, 38, 31, 39, 46,
49         53, 60, 61, 54, 47, 55, 62, 63,
50 };
51
52 struct VectorDestinationManager {
53         jpeg_destination_mgr pub;
54         std::vector<uint8_t> dest;
55
56         VectorDestinationManager()
57         {
58                 pub.init_destination = init_destination_thunk;
59                 pub.empty_output_buffer = empty_output_buffer_thunk;
60                 pub.term_destination = term_destination_thunk;
61         }
62
63         static void init_destination_thunk(j_compress_ptr ptr)
64         {
65                 ((VectorDestinationManager *)(ptr->dest))->init_destination();
66         }
67
68         inline void init_destination()
69         {
70                 make_room(0);
71         }
72
73         static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
74         {
75                 return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
76         }
77
78         inline bool empty_output_buffer()
79         {
80                 make_room(dest.size());  // Should ignore pub.free_in_buffer!
81                 return true;
82         }
83
84         inline void make_room(size_t bytes_used)
85         {
86                 dest.resize(bytes_used + 4096);
87                 dest.resize(dest.capacity());
88                 pub.next_output_byte = dest.data() + bytes_used;
89                 pub.free_in_buffer = dest.size() - bytes_used;
90         }
91
92         static void term_destination_thunk(j_compress_ptr ptr)
93         {
94                 ((VectorDestinationManager *)(ptr->dest))->term_destination();
95         }
96
97         inline void term_destination()
98         {
99                 dest.resize(dest.size() - pub.free_in_buffer);
100         }
101 };
102 static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
103
104 int MJPEGEncoder::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
105 {
106         WritePacket2Context *ctx = (WritePacket2Context *)opaque;
107         return ctx->mjpeg_encoder->write_packet2(ctx->stream_id, buf, buf_size, type, time);
108 }
109
110 int MJPEGEncoder::write_packet2(HTTPD::StreamID stream_id, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
111 {
112         string *mux_header = &streams[stream_id].mux_header;
113         if (type == AVIO_DATA_MARKER_HEADER) {
114                 mux_header->append((char *)buf, buf_size);
115                 httpd->set_header(stream_id, *mux_header);
116         } else {
117                 httpd->add_data(stream_id, (char *)buf, buf_size, /*keyframe=*/true, AV_NOPTS_VALUE, AVRational{ AV_TIME_BASE, 1 });
118         }
119         return buf_size;
120 }
121
122 namespace {
123
124 void add_video_stream(AVFormatContext *avctx)
125 {
126         AVStream *stream = avformat_new_stream(avctx, nullptr);
127         if (stream == nullptr) {
128                 fprintf(stderr, "avformat_new_stream() failed\n");
129                 abort();
130         }
131
132         // FFmpeg is very picky about having audio at 1/48000 timebase,
133         // no matter what we write. Even though we'd prefer our usual 1/120000,
134         // put the video on the same one, so that we can have locked audio.
135         stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
136         stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
137         stream->codecpar->codec_id = AV_CODEC_ID_MJPEG;
138
139         // Used for aspect ratio only. Can change without notice (the mux won't care).
140         stream->codecpar->width = global_flags.width;
141         stream->codecpar->height = global_flags.height;
142
143         // TODO: We could perhaps use the interpretation for each card here
144         // (or at least the command-line flags) instead of the defaults,
145         // but what would we do when they change?
146         stream->codecpar->color_primaries = AVCOL_PRI_BT709;
147         stream->codecpar->color_trc = AVCOL_TRC_IEC61966_2_1;
148         stream->codecpar->color_space = AVCOL_SPC_BT709;
149         stream->codecpar->color_range = AVCOL_RANGE_MPEG;
150         stream->codecpar->chroma_location = AVCHROMA_LOC_LEFT;
151         stream->codecpar->field_order = AV_FIELD_PROGRESSIVE;
152 }
153
154 void add_audio_stream(AVFormatContext *avctx)
155 {
156         AVStream *stream = avformat_new_stream(avctx, nullptr);
157         if (stream == nullptr) {
158                 fprintf(stderr, "avformat_new_stream() failed\n");
159                 abort();
160         }
161         stream->time_base = AVRational{ 1, OUTPUT_FREQUENCY };
162         stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
163         stream->codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
164         stream->codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
165         stream->codecpar->channels = 2;
166         stream->codecpar->sample_rate = OUTPUT_FREQUENCY;
167 }
168
169 void finalize_mux(AVFormatContext *avctx)
170 {
171         AVDictionary *options = NULL;
172         vector<pair<string, string>> opts = MUX_OPTS;
173         for (pair<string, string> opt : opts) {
174                 av_dict_set(&options, opt.first.c_str(), opt.second.c_str(), 0);
175         }
176         if (avformat_write_header(avctx, &options) < 0) {
177                 fprintf(stderr, "avformat_write_header() failed\n");
178                 abort();
179         }
180 }
181
182 }  // namespace
183
184 MJPEGEncoder::MJPEGEncoder(HTTPD *httpd, const string &va_display)
185         : httpd(httpd)
186 {
187         create_ffmpeg_context(HTTPD::StreamID{ HTTPD::MULTICAM_STREAM, 0 });
188         for (unsigned stream_idx = 0; stream_idx < MAX_VIDEO_CARDS; ++stream_idx) {
189                 create_ffmpeg_context(HTTPD::StreamID{ HTTPD::SIPHON_STREAM, stream_idx });
190         }
191
192         add_stream(HTTPD::StreamID{ HTTPD::MULTICAM_STREAM, 0 });
193
194         // Initialize VA-API.
195         string error;
196         va_dpy = try_open_va(va_display, &error, &config_id);
197         if (va_dpy == nullptr) {
198                 fprintf(stderr, "Could not initialize VA-API for MJPEG encoding: %s. JPEGs will be encoded in software if needed.\n", error.c_str());
199         }
200
201         encoder_thread = thread(&MJPEGEncoder::encoder_thread_func, this);
202         if (va_dpy != nullptr) {
203                 va_receiver_thread = thread(&MJPEGEncoder::va_receiver_thread_func, this);
204         }
205
206         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "zero_size" }}, &metric_mjpeg_frames_zero_size_dropped);
207         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "interlaced" }}, &metric_mjpeg_frames_interlaced_dropped);
208         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "unsupported_pixel_format" }}, &metric_mjpeg_frames_unsupported_pixel_format_dropped);
209         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "oversized" }}, &metric_mjpeg_frames_oversized_dropped);
210         global_metrics.add("mjpeg_frames", {{ "status", "dropped" }, { "reason", "overrun" }}, &metric_mjpeg_overrun_dropped);
211         global_metrics.add("mjpeg_frames", {{ "status", "submitted" }}, &metric_mjpeg_overrun_submitted);
212
213         running = true;
214 }
215
216 MJPEGEncoder::~MJPEGEncoder()
217 {
218         for (auto &id_and_stream : streams) {
219                 av_free(id_and_stream.second.avctx->pb->buffer);
220         }
221
222         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "zero_size" }});
223         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "interlaced" }});
224         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "unsupported_pixel_format" }});
225         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "oversized" }});
226         global_metrics.remove("mjpeg_frames", {{ "status", "dropped" }, { "reason", "overrun" }});
227         global_metrics.remove("mjpeg_frames", {{ "status", "submitted" }});
228 }
229
230 void MJPEGEncoder::stop()
231 {
232         if (!running) {
233                 return;
234         }
235         running = false;
236         should_quit = true;
237         any_frames_to_be_encoded.notify_all();
238         any_frames_encoding.notify_all();
239         encoder_thread.join();
240         if (va_dpy != nullptr) {
241                 va_receiver_thread.join();
242         }
243 }
244
245 unique_ptr<VADisplayWithCleanup> MJPEGEncoder::try_open_va(const string &va_display, string *error, VAConfigID *config_id)
246 {
247         unique_ptr<VADisplayWithCleanup> va_dpy = va_open_display(va_display);
248         if (va_dpy == nullptr) {
249                 if (error) *error = "Opening VA display failed";
250                 return nullptr;
251         }
252         int major_ver, minor_ver;
253         VAStatus va_status = vaInitialize(va_dpy->va_dpy, &major_ver, &minor_ver);
254         if (va_status != VA_STATUS_SUCCESS) {
255                 char buf[256];
256                 snprintf(buf, sizeof(buf), "vaInitialize() failed with status %d\n", va_status);
257                 if (error != nullptr) *error = buf;
258                 return nullptr;
259         }
260
261         VAConfigAttrib attr = { VAConfigAttribRTFormat, VA_RT_FORMAT_YUV422 };
262         va_status = vaCreateConfig(va_dpy->va_dpy, VAProfileJPEGBaseline, VAEntrypointEncPicture,
263                 &attr, 1, config_id);
264         if (va_status == VA_STATUS_ERROR_UNSUPPORTED_ENTRYPOINT) {
265                 if (error != nullptr) *error = "No hardware support";
266                 return nullptr;
267         } else if (va_status != VA_STATUS_SUCCESS) {
268                 char buf[256];
269                 snprintf(buf, sizeof(buf), "vaCreateConfig() failed with status %d\n", va_status);
270                 if (error != nullptr) *error = buf;
271                 return nullptr;
272         }
273
274         // TODO: Unify with the code in Futatabi.
275         int num_formats = vaMaxNumImageFormats(va_dpy->va_dpy);
276         assert(num_formats > 0);
277
278         unique_ptr<VAImageFormat[]> formats(new VAImageFormat[num_formats]);
279         va_status = vaQueryImageFormats(va_dpy->va_dpy, formats.get(), &num_formats);
280         if (va_status != VA_STATUS_SUCCESS) {
281                 char buf[256];
282                 snprintf(buf, sizeof(buf), "vaQueryImageFormats() failed with status %d\n", va_status);
283                 if (error != nullptr) *error = buf;
284                 return nullptr;
285         }
286
287         bool found = false;
288         for (int i = 0; i < num_formats; ++i) {
289                 if (formats[i].fourcc == VA_FOURCC_UYVY) {
290                         memcpy(&uyvy_format, &formats[i], sizeof(VAImageFormat));
291                         found = true;
292                         break;
293                 }
294         }
295         if (!found) {
296                 if (error != nullptr) *error = "UYVY format not found";
297                 return nullptr;
298         }
299
300         return va_dpy;
301 }
302
303 void MJPEGEncoder::upload_frame(int64_t pts, unsigned card_index, RefCountedFrame frame, const bmusb::VideoFormat &video_format, size_t y_offset, size_t cbcr_offset, vector<int32_t> audio, const RGBTriplet &white_balance)
304 {
305         PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)frame->userdata;
306         if (video_format.width == 0 || video_format.height == 0) {
307                 ++metric_mjpeg_frames_zero_size_dropped;
308                 return;
309         }
310         if (video_format.interlaced) {
311                 fprintf(stderr, "Card %u: Ignoring JPEG encoding for interlaced frame\n", card_index);
312                 ++metric_mjpeg_frames_interlaced_dropped;
313                 return;
314         }
315         if (userdata->pixel_format != PixelFormat_8BitYCbCr ||
316             !frame->interleaved) {
317                 fprintf(stderr, "Card %u: Ignoring JPEG encoding for unsupported pixel format\n", card_index);
318                 ++metric_mjpeg_frames_unsupported_pixel_format_dropped;
319                 return;
320         }
321         if (video_format.width > 4096 || video_format.height > 4096) {
322                 fprintf(stderr, "Card %u: Ignoring JPEG encoding for oversized frame\n", card_index);
323                 ++metric_mjpeg_frames_oversized_dropped;
324                 return;
325         }
326
327         lock_guard<mutex> lock(mu);
328         if (frames_to_be_encoded.size() + frames_encoding.size() > 50) {
329                 fprintf(stderr, "WARNING: MJPEG encoding doesn't keep up, discarding frame.\n");
330                 ++metric_mjpeg_overrun_dropped;
331                 return;
332         }
333         ++metric_mjpeg_overrun_submitted;
334         frames_to_be_encoded.push(QueuedFrame{ pts, card_index, frame, video_format, y_offset, cbcr_offset, move(audio), white_balance });
335         any_frames_to_be_encoded.notify_all();
336 }
337
338 bool MJPEGEncoder::should_encode_mjpeg_for_card(unsigned card_index)
339 {
340         // Only bother doing MJPEG encoding if there are any connected clients
341         // that want the stream.
342         if (httpd->get_num_connected_multicam_clients() == 0 &&
343             httpd->get_num_connected_siphon_clients(card_index) == 0) {
344                 return false;
345         }
346
347         auto it = global_flags.card_to_mjpeg_stream_export.find(card_index);
348         return (it != global_flags.card_to_mjpeg_stream_export.end());
349 }
350
351 void MJPEGEncoder::encoder_thread_func()
352 {
353         pthread_setname_np(pthread_self(), "MJPEG_Encode");
354         posix_memalign((void **)&tmp_y, 4096, 4096 * 8);
355         posix_memalign((void **)&tmp_cbcr, 4096, 4096 * 8);
356         posix_memalign((void **)&tmp_cb, 4096, 4096 * 8);
357         posix_memalign((void **)&tmp_cr, 4096, 4096 * 8);
358
359         for (;;) {
360                 QueuedFrame qf;
361                 {
362                         unique_lock<mutex> lock(mu);
363                         any_frames_to_be_encoded.wait(lock, [this] { return !frames_to_be_encoded.empty() || should_quit; });
364                         if (should_quit) break;
365                         qf = move(frames_to_be_encoded.front());
366                         frames_to_be_encoded.pop();
367                 }
368
369                 assert(global_flags.card_to_mjpeg_stream_export.count(qf.card_index));  // Or should_encode_mjpeg_for_card() would have returned false.
370                 int stream_index = global_flags.card_to_mjpeg_stream_export[qf.card_index];
371
372                 if (va_dpy != nullptr) {
373                         // Will call back in the receiver thread.
374                         encode_jpeg_va(move(qf));
375                 } else {
376                         update_siphon_streams();
377
378                         HTTPD::StreamID multicam_id{ HTTPD::MULTICAM_STREAM, 0 };
379                         HTTPD::StreamID siphon_id{ HTTPD::SIPHON_STREAM, qf.card_index };
380                         assert(streams.count(multicam_id));
381
382                         // Write audio before video, since Futatabi expects it.
383                         if (qf.audio.size() > 0) {
384                                 write_audio_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index + global_flags.card_to_mjpeg_stream_export.size(), qf.audio);
385                                 if (streams.count(siphon_id)) {
386                                         write_audio_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/1, qf.audio);
387                                 }
388                         }
389
390                         // Encode synchronously, in the same thread.
391                         vector<uint8_t> jpeg = encode_jpeg_libjpeg(qf);
392                         write_mjpeg_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index, jpeg.data(), jpeg.size());
393                         if (streams.count(siphon_id)) {
394                                 write_mjpeg_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/0, jpeg.data(), jpeg.size());
395                         }
396                 }
397         }
398
399         free(tmp_y);
400         free(tmp_cbcr);
401         free(tmp_cb);
402         free(tmp_cr);
403 }
404
405 void MJPEGEncoder::write_mjpeg_packet(AVFormatContext *avctx, int64_t pts, unsigned stream_index, const uint8_t *jpeg, size_t jpeg_size)
406 {
407         AVPacket pkt;
408         memset(&pkt, 0, sizeof(pkt));
409         pkt.buf = nullptr;
410         pkt.data = const_cast<uint8_t *>(jpeg);
411         pkt.size = jpeg_size;
412         pkt.stream_index = stream_index;
413         pkt.flags = AV_PKT_FLAG_KEY;
414         AVRational time_base = avctx->streams[pkt.stream_index]->time_base;
415         pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base);
416         pkt.duration = 0;
417
418         if (av_write_frame(avctx, &pkt) < 0) {
419                 fprintf(stderr, "av_write_frame() failed\n");
420                 abort();
421         }
422 }
423
424 void MJPEGEncoder::write_audio_packet(AVFormatContext *avctx, int64_t pts, unsigned stream_index, const vector<int32_t> &audio)
425 {
426         AVPacket pkt;
427         memset(&pkt, 0, sizeof(pkt));
428         pkt.buf = nullptr;
429         pkt.data = reinterpret_cast<uint8_t *>(const_cast<int32_t *>(&audio[0]));
430         pkt.size = audio.size() * sizeof(audio[0]);
431         pkt.stream_index = stream_index;
432         pkt.flags = AV_PKT_FLAG_KEY;
433         AVRational time_base = avctx->streams[pkt.stream_index]->time_base;
434         pkt.pts = pkt.dts = av_rescale_q(pts, AVRational{ 1, TIMEBASE }, time_base);
435         size_t num_stereo_samples = audio.size() / 2;
436         pkt.duration = av_rescale_q(num_stereo_samples, AVRational{ 1, OUTPUT_FREQUENCY }, time_base);
437
438         if (av_write_frame(avctx, &pkt) < 0) {
439                 fprintf(stderr, "av_write_frame() failed\n");
440                 abort();
441         }
442 }
443
444 class VABufferDestroyer {
445 public:
446         VABufferDestroyer(VADisplay dpy, VABufferID buf)
447                 : dpy(dpy), buf(buf) {}
448
449         ~VABufferDestroyer() {
450                 VAStatus va_status = vaDestroyBuffer(dpy, buf);
451                 CHECK_VASTATUS(va_status, "vaDestroyBuffer");
452         }
453
454 private:
455         VADisplay dpy;
456         VABufferID buf;
457 };
458
459 MJPEGEncoder::VAResources MJPEGEncoder::get_va_resources(unsigned width, unsigned height)
460 {
461         {
462                 lock_guard<mutex> lock(va_resources_mutex);
463                 for (auto it = va_resources_freelist.begin(); it != va_resources_freelist.end(); ++it) {
464                         if (it->width == width && it->height == height) {
465                                 VAResources ret = *it;
466                                 va_resources_freelist.erase(it);
467                                 return ret;
468                         }
469                 }
470         }
471
472         VAResources ret;
473
474         ret.width = width;
475         ret.height = height;
476
477         VASurfaceAttrib attrib;
478         attrib.flags = VA_SURFACE_ATTRIB_SETTABLE;
479         attrib.type = VASurfaceAttribPixelFormat;
480         attrib.value.type = VAGenericValueTypeInteger;
481         attrib.value.value.i = VA_FOURCC_UYVY;
482
483         VAStatus va_status = vaCreateSurfaces(va_dpy->va_dpy, VA_RT_FORMAT_YUV422,
484                 width, height,
485                 &ret.surface, 1, &attrib, 1);
486         CHECK_VASTATUS(va_status, "vaCreateSurfaces");
487
488         va_status = vaCreateContext(va_dpy->va_dpy, config_id, width, height, 0, &ret.surface, 1, &ret.context);
489         CHECK_VASTATUS(va_status, "vaCreateContext");
490
491         va_status = vaCreateBuffer(va_dpy->va_dpy, ret.context, VAEncCodedBufferType, width * height * 3 + 8192, 1, nullptr, &ret.data_buffer);
492         CHECK_VASTATUS(va_status, "vaCreateBuffer");
493
494         va_status = vaCreateImage(va_dpy->va_dpy, &uyvy_format, width, height, &ret.image);
495         CHECK_VASTATUS(va_status, "vaCreateImage");
496
497         return ret;
498 }
499
500 void MJPEGEncoder::release_va_resources(MJPEGEncoder::VAResources resources)
501 {
502         lock_guard<mutex> lock(va_resources_mutex);
503         if (va_resources_freelist.size() > 50) {
504                 auto it = va_resources_freelist.end();
505                 --it;
506
507                 VAStatus va_status = vaDestroyBuffer(va_dpy->va_dpy, it->data_buffer);
508                 CHECK_VASTATUS(va_status, "vaDestroyBuffer");
509
510                 va_status = vaDestroyContext(va_dpy->va_dpy, it->context);
511                 CHECK_VASTATUS(va_status, "vaDestroyContext");
512
513                 va_status = vaDestroySurfaces(va_dpy->va_dpy, &it->surface, 1);
514                 CHECK_VASTATUS(va_status, "vaDestroySurfaces");
515
516                 va_status = vaDestroyImage(va_dpy->va_dpy, it->image.image_id);
517                 CHECK_VASTATUS(va_status, "vaDestroyImage");
518
519                 va_resources_freelist.erase(it);
520         }
521
522         va_resources_freelist.push_front(resources);
523 }
524
525 namespace {
526
527 void push16(uint16_t val, string *str)
528 {
529         str->push_back(val >> 8);
530         str->push_back(val & 0xff);
531 }
532
533 void push32(uint32_t val, string *str)
534 {
535         str->push_back(val >> 24);
536         str->push_back((val >> 16) & 0xff);
537         str->push_back((val >> 8) & 0xff);
538         str->push_back(val & 0xff);
539 }
540
541 }  // namespace
542
543 void MJPEGEncoder::init_jpeg_422(unsigned width, unsigned height, const RGBTriplet &white_balance, VectorDestinationManager *dest, jpeg_compress_struct *cinfo)
544 {
545         jpeg_error_mgr jerr;
546         cinfo->err = jpeg_std_error(&jerr);
547         jpeg_create_compress(cinfo);
548
549         cinfo->dest = (jpeg_destination_mgr *)dest;
550
551         cinfo->input_components = 3;
552         jpeg_set_defaults(cinfo);
553         jpeg_set_quality(cinfo, quality, /*force_baseline=*/false);
554
555         cinfo->image_width = width;
556         cinfo->image_height = height;
557         cinfo->raw_data_in = true;
558         jpeg_set_colorspace(cinfo, JCS_YCbCr);
559         cinfo->comp_info[0].h_samp_factor = 2;
560         cinfo->comp_info[0].v_samp_factor = 1;
561         cinfo->comp_info[1].h_samp_factor = 1;
562         cinfo->comp_info[1].v_samp_factor = 1;
563         cinfo->comp_info[2].h_samp_factor = 1;
564         cinfo->comp_info[2].v_samp_factor = 1;
565         cinfo->CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
566         jpeg_start_compress(cinfo, true);
567
568         if (fabs(white_balance.r - 1.0f) > 1e-3 ||
569             fabs(white_balance.g - 1.0f) > 1e-3 ||
570             fabs(white_balance.b - 1.0f) > 1e-3) {
571                 // Convert from (linear) RGB to XYZ.
572                 Matrix3d rgb_to_xyz_matrix = movit::ColorspaceConversionEffect::get_xyz_matrix(COLORSPACE_sRGB);
573                 Vector3d xyz = rgb_to_xyz_matrix * Vector3d(white_balance.r, white_balance.g, white_balance.b);
574
575                 // Convert from XYZ to xyz by normalizing.
576                 xyz /= (xyz[0] + xyz[1] + xyz[2]);
577
578                 // Create a very rudimentary EXIF header to hold our white point.
579                 string exif;
580
581                 // Exif header, followed by some padding.
582                 exif = "Exif";
583                 push16(0, &exif);
584
585                 // TIFF header first:
586                 exif += "MM";  // Big endian.
587
588                 // Magic number.
589                 push16(42, &exif);
590
591                 // Offset of first IFD (relative to the MM, immediately after the header).
592                 push32(exif.size() - 6 + 4, &exif);
593
594                 // Now the actual IFD.
595
596                 // One entry.
597                 push16(1, &exif);
598
599                 // WhitePoint tag ID.
600                 push16(0x13e, &exif);
601
602                 // Rational type.
603                 push16(5, &exif);
604
605                 // Two values (x and y; z is implicit due to normalization).
606                 push32(2, &exif);
607
608                 // Offset (relative to the MM, immediately after the last IFD).
609                 push32(exif.size() - 6 + 8, &exif);
610
611                 // No more IFDs.
612                 push32(0, &exif);
613
614                 // The actual values.
615                 push32(lrintf(xyz[0] * 10000.0f), &exif);
616                 push32(10000, &exif);
617                 push32(lrintf(xyz[1] * 10000.0f), &exif);
618                 push32(10000, &exif);
619
620                 jpeg_write_marker(cinfo, JPEG_APP0 + 1, (const JOCTET *)exif.data(), exif.size());
621         }
622
623         // This comment marker is private to FFmpeg. It signals limited Y'CbCr range
624         // (and nothing else).
625         jpeg_write_marker(cinfo, JPEG_COM, (const JOCTET *)"CS=ITU601", strlen("CS=ITU601"));
626 }
627
628 vector<uint8_t> MJPEGEncoder::get_jpeg_header(unsigned width, unsigned height, const RGBTriplet &white_balance, jpeg_compress_struct *cinfo)
629 {
630         VectorDestinationManager dest;
631         init_jpeg_422(width, height, white_balance, &dest, cinfo);
632
633         // Make a dummy black image; there's seemingly no other easy way of
634         // making libjpeg outputting all of its headers.
635         JSAMPROW yptr[8], cbptr[8], crptr[8];
636         JSAMPARRAY data[3] = { yptr, cbptr, crptr };
637         memset(tmp_y, 0, 4096);
638         memset(tmp_cb, 0, 4096);
639         memset(tmp_cr, 0, 4096);
640         for (unsigned yy = 0; yy < 8; ++yy) {
641                 yptr[yy] = tmp_y;
642                 cbptr[yy] = tmp_cb;
643                 crptr[yy] = tmp_cr;
644         }
645         for (unsigned y = 0; y < height; y += 8) {
646                 jpeg_write_raw_data(cinfo, data, /*num_lines=*/8);
647         }
648         jpeg_finish_compress(cinfo);
649
650         // We're only interested in the header, not the data after it.
651         dest.term_destination();
652         for (size_t i = 0; i < dest.dest.size() - 1; ++i) {
653                 if (dest.dest[i] == 0xff && dest.dest[i + 1] == 0xda) {  // Start of scan (SOS).
654                         unsigned len = dest.dest[i + 2] * 256 + dest.dest[i + 3];
655                         dest.dest.resize(i + len + 2);
656                         break;
657                 }
658         }
659
660         return dest.dest;
661 }
662
663 MJPEGEncoder::VAData MJPEGEncoder::get_va_data_for_parameters(unsigned width, unsigned height, const RGBTriplet &white_balance)
664 {
665         VAKey key{width, height, white_balance};
666         if (va_data_for_parameters.count(key)) {
667                 return va_data_for_parameters[key];
668         }
669
670         // Use libjpeg to generate a header and set sane defaults for e.g.
671         // quantization tables. Then do the actual encode with VA-API.
672         jpeg_compress_struct cinfo;
673         vector<uint8_t> jpeg_header = get_jpeg_header(width, height, white_balance, &cinfo);
674
675         // Picture parameters.
676         VAEncPictureParameterBufferJPEG pic_param;
677         memset(&pic_param, 0, sizeof(pic_param));
678         pic_param.reconstructed_picture = VA_INVALID_ID;
679         pic_param.picture_width = cinfo.image_width;
680         pic_param.picture_height = cinfo.image_height;
681         for (int component_idx = 0; component_idx < cinfo.num_components; ++component_idx) {
682                 const jpeg_component_info *comp = &cinfo.comp_info[component_idx];
683                 pic_param.component_id[component_idx] = comp->component_id;
684                 pic_param.quantiser_table_selector[component_idx] = comp->quant_tbl_no;
685         }
686         pic_param.num_components = cinfo.num_components;
687         pic_param.num_scan = 1;
688         pic_param.sample_bit_depth = 8;
689         pic_param.coded_buf = VA_INVALID_ID;  // To be filled out by caller.
690         pic_param.pic_flags.bits.huffman = 1;
691         pic_param.quality = 50;  // Don't scale the given quantization matrices. (See gen8_mfc_jpeg_fqm_state)
692
693         // Quantization matrices.
694         VAQMatrixBufferJPEG q;
695         memset(&q, 0, sizeof(q));
696
697         q.load_lum_quantiser_matrix = true;
698         q.load_chroma_quantiser_matrix = true;
699         for (int quant_tbl_idx = 0; quant_tbl_idx < min(4, NUM_QUANT_TBLS); ++quant_tbl_idx) {
700                 const JQUANT_TBL *qtbl = cinfo.quant_tbl_ptrs[quant_tbl_idx];
701                 assert((qtbl == nullptr) == (quant_tbl_idx >= 2));
702                 if (qtbl == nullptr) continue;
703
704                 uint8_t *qmatrix = (quant_tbl_idx == 0) ? q.lum_quantiser_matrix : q.chroma_quantiser_matrix;
705                 for (int i = 0; i < 64; ++i) {
706                         if (qtbl->quantval[i] > 255) {
707                                 fprintf(stderr, "Baseline JPEG only!\n");
708                                 abort();
709                         }
710                         qmatrix[i] = qtbl->quantval[jpeg_natural_order[i]];
711                 }
712         }
713
714         // Huffman tables (arithmetic is not supported).
715         VAHuffmanTableBufferJPEGBaseline huff;
716         memset(&huff, 0, sizeof(huff));
717
718         for (int huff_tbl_idx = 0; huff_tbl_idx < min(2, NUM_HUFF_TBLS); ++huff_tbl_idx) {
719                 const JHUFF_TBL *ac_hufftbl = cinfo.ac_huff_tbl_ptrs[huff_tbl_idx];
720                 const JHUFF_TBL *dc_hufftbl = cinfo.dc_huff_tbl_ptrs[huff_tbl_idx];
721                 if (ac_hufftbl == nullptr) {
722                         assert(dc_hufftbl == nullptr);
723                         huff.load_huffman_table[huff_tbl_idx] = 0;
724                 } else {
725                         assert(dc_hufftbl != nullptr);
726                         huff.load_huffman_table[huff_tbl_idx] = 1;
727
728                         for (int i = 0; i < 16; ++i) {
729                                 huff.huffman_table[huff_tbl_idx].num_dc_codes[i] = dc_hufftbl->bits[i + 1];
730                         }
731                         for (int i = 0; i < 12; ++i) {
732                                 huff.huffman_table[huff_tbl_idx].dc_values[i] = dc_hufftbl->huffval[i];
733                         }
734                         for (int i = 0; i < 16; ++i) {
735                                 huff.huffman_table[huff_tbl_idx].num_ac_codes[i] = ac_hufftbl->bits[i + 1];
736                         }
737                         for (int i = 0; i < 162; ++i) {
738                                 huff.huffman_table[huff_tbl_idx].ac_values[i] = ac_hufftbl->huffval[i];
739                         }
740                 }
741         }
742
743         // Slice parameters (metadata about the slice).
744         VAEncSliceParameterBufferJPEG parms;
745         memset(&parms, 0, sizeof(parms));
746         for (int component_idx = 0; component_idx < cinfo.num_components; ++component_idx) {
747                 const jpeg_component_info *comp = &cinfo.comp_info[component_idx];
748                 parms.components[component_idx].component_selector = comp->component_id;
749                 parms.components[component_idx].dc_table_selector = comp->dc_tbl_no;
750                 parms.components[component_idx].ac_table_selector = comp->ac_tbl_no;
751                 if (parms.components[component_idx].dc_table_selector > 1 ||
752                     parms.components[component_idx].ac_table_selector > 1) {
753                         fprintf(stderr, "Uses too many Huffman tables\n");
754                         abort();
755                 }
756         }
757         parms.num_components = cinfo.num_components;
758         parms.restart_interval = cinfo.restart_interval;
759
760         jpeg_destroy_compress(&cinfo);
761
762         VAData ret;
763         ret.jpeg_header = move(jpeg_header);
764         ret.pic_param = pic_param;
765         ret.q = q;
766         ret.huff = huff;
767         ret.parms = parms;
768         va_data_for_parameters[key] = ret;
769         return ret;
770 }
771
772 void MJPEGEncoder::encode_jpeg_va(QueuedFrame &&qf)
773 {
774         PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)qf.frame->userdata;
775         unsigned width = qf.video_format.width;
776         unsigned height = qf.video_format.height;
777
778         VAResources resources;
779         ReleaseVAResources release;
780         if (userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_VA_API) {
781                 resources = move(userdata->va_resources);
782                 release = move(userdata->va_resources_release);
783         } else {
784                 assert(userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_MALLOC);
785                 resources = get_va_resources(width, height);
786                 release = ReleaseVAResources(this, resources);
787         }
788
789         VAData va_data = get_va_data_for_parameters(width, height, qf.white_balance);
790         va_data.pic_param.coded_buf = resources.data_buffer;
791
792         VABufferID pic_param_buffer;
793         VAStatus va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAEncPictureParameterBufferType, sizeof(va_data.pic_param), 1, &va_data.pic_param, &pic_param_buffer);
794         CHECK_VASTATUS(va_status, "vaCreateBuffer");
795         VABufferDestroyer destroy_pic_param(va_dpy->va_dpy, pic_param_buffer);
796
797         VABufferID q_buffer;
798         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAQMatrixBufferType, sizeof(va_data.q), 1, &va_data.q, &q_buffer);
799         CHECK_VASTATUS(va_status, "vaCreateBuffer");
800         VABufferDestroyer destroy_iq(va_dpy->va_dpy, q_buffer);
801
802         VABufferID huff_buffer;
803         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAHuffmanTableBufferType, sizeof(va_data.huff), 1, &va_data.huff, &huff_buffer);
804         CHECK_VASTATUS(va_status, "vaCreateBuffer");
805         VABufferDestroyer destroy_huff(va_dpy->va_dpy, huff_buffer);
806
807         VABufferID slice_param_buffer;
808         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAEncSliceParameterBufferType, sizeof(va_data.parms), 1, &va_data.parms, &slice_param_buffer);
809         CHECK_VASTATUS(va_status, "vaCreateBuffer");
810         VABufferDestroyer destroy_slice_param(va_dpy->va_dpy, slice_param_buffer);
811
812         if (userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_VA_API) {
813                 // The pixel data is already put into the image by the caller.
814                 va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf);
815                 CHECK_VASTATUS(va_status, "vaUnmapBuffer");
816         } else {
817                 assert(userdata->data_copy_current_src == PBOFrameAllocator::Userdata::FROM_MALLOC);
818
819                 // Upload the pixel data.
820                 uint8_t *surface_p = nullptr;
821                 vaMapBuffer(va_dpy->va_dpy, resources.image.buf, (void **)&surface_p);
822
823                 size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
824                 size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
825
826                 {
827                         const uint8_t *src = qf.frame->data_copy + field_start;
828                         uint8_t *dst = (unsigned char *)surface_p + resources.image.offsets[0];
829                         memcpy_with_pitch(dst, src, qf.video_format.width * 2, resources.image.pitches[0], qf.video_format.height);
830                 }
831
832                 va_status = vaUnmapBuffer(va_dpy->va_dpy, resources.image.buf);
833                 CHECK_VASTATUS(va_status, "vaUnmapBuffer");
834         }
835
836         qf.frame->data_copy = nullptr;
837
838         // Seemingly vaPutImage() (which triggers a GPU copy) is much nicer to the
839         // CPU than vaDeriveImage() and copying directly into the GPU's buffers.
840         // Exactly why is unclear, but it seems to involve L3 cache usage when there
841         // are many high-res (1080p+) images in play.
842         va_status = vaPutImage(va_dpy->va_dpy, resources.surface, resources.image.image_id, 0, 0, width, height, 0, 0, width, height);
843         CHECK_VASTATUS(va_status, "vaPutImage");
844
845         // Finally, stick in the JPEG header.
846         VAEncPackedHeaderParameterBuffer header_parm;
847         header_parm.type = VAEncPackedHeaderRawData;
848         header_parm.bit_length = 8 * va_data.jpeg_header.size();
849
850         VABufferID header_parm_buffer;
851         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAEncPackedHeaderParameterBufferType, sizeof(header_parm), 1, &header_parm, &header_parm_buffer);
852         CHECK_VASTATUS(va_status, "vaCreateBuffer");
853         VABufferDestroyer destroy_header(va_dpy->va_dpy, header_parm_buffer);
854
855         VABufferID header_data_buffer;
856         va_status = vaCreateBuffer(va_dpy->va_dpy, resources.context, VAEncPackedHeaderDataBufferType, va_data.jpeg_header.size(), 1, va_data.jpeg_header.data(), &header_data_buffer);
857         CHECK_VASTATUS(va_status, "vaCreateBuffer");
858         VABufferDestroyer destroy_header_data(va_dpy->va_dpy, header_data_buffer);
859
860         va_status = vaBeginPicture(va_dpy->va_dpy, resources.context, resources.surface);
861         CHECK_VASTATUS(va_status, "vaBeginPicture");
862         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &pic_param_buffer, 1);
863         CHECK_VASTATUS(va_status, "vaRenderPicture(pic_param)");
864         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &q_buffer, 1);
865         CHECK_VASTATUS(va_status, "vaRenderPicture(q)");
866         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &huff_buffer, 1);
867         CHECK_VASTATUS(va_status, "vaRenderPicture(huff)");
868         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &slice_param_buffer, 1);
869         CHECK_VASTATUS(va_status, "vaRenderPicture(slice_param)");
870         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &header_parm_buffer, 1);
871         CHECK_VASTATUS(va_status, "vaRenderPicture(header_parm)");
872         va_status = vaRenderPicture(va_dpy->va_dpy, resources.context, &header_data_buffer, 1);
873         CHECK_VASTATUS(va_status, "vaRenderPicture(header_data)");
874         va_status = vaEndPicture(va_dpy->va_dpy, resources.context);
875         CHECK_VASTATUS(va_status, "vaEndPicture");
876
877         qf.resources = move(resources);
878         qf.resource_releaser = move(release);
879
880         lock_guard<mutex> lock(mu);
881         frames_encoding.push(move(qf));
882         any_frames_encoding.notify_all();
883 }
884
885 void MJPEGEncoder::va_receiver_thread_func()
886 {
887         pthread_setname_np(pthread_self(), "MJPEG_Receive");
888         for (;;) {
889                 QueuedFrame qf;
890                 {
891                         unique_lock<mutex> lock(mu);
892                         any_frames_encoding.wait(lock, [this] { return !frames_encoding.empty() || should_quit; });
893                         if (should_quit) return;
894                         qf = move(frames_encoding.front());
895                         frames_encoding.pop();
896                 }
897
898                 update_siphon_streams();
899
900                 assert(global_flags.card_to_mjpeg_stream_export.count(qf.card_index));  // Or should_encode_mjpeg_for_card() would have returned false.
901                 int stream_index = global_flags.card_to_mjpeg_stream_export[qf.card_index];
902
903                 HTTPD::StreamID multicam_id{ HTTPD::MULTICAM_STREAM, 0 };
904                 HTTPD::StreamID siphon_id{ HTTPD::SIPHON_STREAM, qf.card_index };
905                 assert(streams.count(multicam_id));
906                 assert(streams[multicam_id].avctx != nullptr);
907
908                 // Write audio before video, since Futatabi expects it.
909                 if (qf.audio.size() > 0) {
910                         write_audio_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index + global_flags.card_to_mjpeg_stream_export.size(), qf.audio);
911                         if (streams.count(siphon_id)) {
912                                 write_audio_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/1, qf.audio);
913                         }
914                 }
915
916                 VAStatus va_status = vaSyncSurface(va_dpy->va_dpy, qf.resources.surface);
917                 CHECK_VASTATUS(va_status, "vaSyncSurface");
918
919                 VACodedBufferSegment *segment;
920                 va_status = vaMapBuffer(va_dpy->va_dpy, qf.resources.data_buffer, (void **)&segment);
921                 CHECK_VASTATUS(va_status, "vaMapBuffer");
922
923                 const uint8_t *coded_buf = reinterpret_cast<uint8_t *>(segment->buf);
924                 write_mjpeg_packet(streams[multicam_id].avctx.get(), qf.pts, stream_index, coded_buf, segment->size);
925                 if (streams.count(siphon_id)) {
926                         write_mjpeg_packet(streams[siphon_id].avctx.get(), qf.pts, /*stream_index=*/0, coded_buf, segment->size);
927                 }
928
929                 va_status = vaUnmapBuffer(va_dpy->va_dpy, qf.resources.data_buffer);
930                 CHECK_VASTATUS(va_status, "vaUnmapBuffer");
931         }
932 }
933
934 vector<uint8_t> MJPEGEncoder::encode_jpeg_libjpeg(const QueuedFrame &qf)
935 {
936         unsigned width = qf.video_format.width;
937         unsigned height = qf.video_format.height;
938
939         VectorDestinationManager dest;
940         jpeg_compress_struct cinfo;
941         init_jpeg_422(width, height, qf.white_balance, &dest, &cinfo);
942
943         size_t field_start_line = qf.video_format.extra_lines_top;  // No interlacing support.
944         size_t field_start = qf.cbcr_offset * 2 + qf.video_format.width * field_start_line * 2;
945
946         JSAMPROW yptr[8], cbptr[8], crptr[8];
947         JSAMPARRAY data[3] = { yptr, cbptr, crptr };
948         for (unsigned y = 0; y < qf.video_format.height; y += 8) {
949                 const uint8_t *src = qf.frame->data_copy + field_start + y * qf.video_format.width * 2;
950
951                 memcpy_interleaved(tmp_cbcr, tmp_y, src, qf.video_format.width * 8 * 2);
952                 memcpy_interleaved(tmp_cb, tmp_cr, tmp_cbcr, qf.video_format.width * 8);
953                 for (unsigned yy = 0; yy < 8; ++yy) {
954                         yptr[yy] = tmp_y + yy * width;
955                         cbptr[yy] = tmp_cb + yy * width / 2;
956                         crptr[yy] = tmp_cr + yy * width / 2;
957                 }
958                 jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
959         }
960         jpeg_finish_compress(&cinfo);
961
962         return dest.dest;
963 }
964
965 void MJPEGEncoder::add_stream(HTTPD::StreamID stream_id)
966 {
967         AVFormatContextWithCloser avctx;
968
969         // Set up the mux. We don't use the Mux wrapper, because it's geared towards
970         // a situation with only one video stream (and possibly one audio stream)
971         // with known width/height, and we don't need the extra functionality it provides.
972         avctx.reset(avformat_alloc_context());
973         avctx->oformat = av_guess_format("nut", nullptr, nullptr);
974
975         uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
976         avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, &ffmpeg_contexts[stream_id], nullptr, nullptr, nullptr);
977         avctx->pb->write_data_type = &MJPEGEncoder::write_packet2_thunk;
978         avctx->flags = AVFMT_FLAG_CUSTOM_IO;
979
980         if (stream_id.type == HTTPD::MULTICAM_STREAM) {
981                 for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) {
982                         add_video_stream(avctx.get());
983                 }
984                 for (unsigned card_idx = 0; card_idx < global_flags.card_to_mjpeg_stream_export.size(); ++card_idx) {
985                         add_audio_stream(avctx.get());
986                 }
987         } else {
988                 assert(stream_id.type == HTTPD::SIPHON_STREAM);
989                 add_video_stream(avctx.get());
990                 add_audio_stream(avctx.get());
991         }
992         finalize_mux(avctx.get());
993
994         Stream s;
995         s.avctx = move(avctx);
996         streams[stream_id] = move(s);
997 }
998
999 void MJPEGEncoder::update_siphon_streams()
1000 {
1001         // Bring the list of streams into sync with what the clients need.
1002         for (auto it = streams.begin(); it != streams.end(); ) {
1003                 if (it->first.type != HTTPD::SIPHON_STREAM) {
1004                         ++it;
1005                         continue;
1006                 }
1007                 if (httpd->get_num_connected_siphon_clients(it->first.index) == 0) {
1008                         av_free(it->second.avctx->pb->buffer);
1009                         streams.erase(it++);
1010                 } else {
1011                         ++it;
1012                 }
1013         }
1014         for (unsigned stream_idx = 0; stream_idx < MAX_VIDEO_CARDS; ++stream_idx) {
1015                 HTTPD::StreamID stream_id{ HTTPD::SIPHON_STREAM, stream_idx };
1016                 if (streams.count(stream_id) == 0 && httpd->get_num_connected_siphon_clients(stream_idx) > 0) {
1017                         add_stream(stream_id);
1018                 }
1019         }
1020 }
1021
1022 void MJPEGEncoder::create_ffmpeg_context(HTTPD::StreamID stream_id)
1023 {
1024         ffmpeg_contexts.emplace(stream_id, WritePacket2Context{ this, stream_id });
1025 }