]> git.sesse.net Git - nageru/blob - video_stream.cpp
Assorted clang-format fixes (not complete).
[nageru] / video_stream.cpp
1 #include "video_stream.h"
2
3 extern "C" {
4 #include <libavformat/avformat.h>
5 #include <libavformat/avio.h>
6 }
7
8 #include "chroma_subsampler.h"
9 #include "context.h"
10 #include "flow.h"
11 #include "httpd.h"
12 #include "jpeg_frame_view.h"
13 #include "movit/util.h"
14 #include "mux.h"
15 #include "player.h"
16 #include "util.h"
17 #include "ycbcr_converter.h"
18
19 #include <epoxy/glx.h>
20 #include <jpeglib.h>
21 #include <unistd.h>
22
23 using namespace std;
24
25 extern HTTPD *global_httpd;
26
27 namespace {
28
29 string read_file(const string &filename)
30 {
31         FILE *fp = fopen(filename.c_str(), "rb");
32         if (fp == nullptr) {
33                 perror(filename.c_str());
34                 return "";
35         }
36
37         fseek(fp, 0, SEEK_END);
38         long len = ftell(fp);
39         rewind(fp);
40
41         string ret;
42         ret.resize(len);
43         fread(&ret[0], len, 1, fp);
44         fclose(fp);
45         return ret;
46 }
47
48 }  // namespace
49
50 struct VectorDestinationManager {
51         jpeg_destination_mgr pub;
52         std::vector<uint8_t> dest;
53
54         VectorDestinationManager()
55         {
56                 pub.init_destination = init_destination_thunk;
57                 pub.empty_output_buffer = empty_output_buffer_thunk;
58                 pub.term_destination = term_destination_thunk;
59         }
60
61         static void init_destination_thunk(j_compress_ptr ptr)
62         {
63                 ((VectorDestinationManager *)(ptr->dest))->init_destination();
64         }
65
66         inline void init_destination()
67         {
68                 make_room(0);
69         }
70
71         static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
72         {
73                 return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
74         }
75
76         inline bool empty_output_buffer()
77         {
78                 make_room(dest.size());  // Should ignore pub.free_in_buffer!
79                 return true;
80         }
81
82         inline void make_room(size_t bytes_used)
83         {
84                 dest.resize(bytes_used + 4096);
85                 dest.resize(dest.capacity());
86                 pub.next_output_byte = dest.data() + bytes_used;
87                 pub.free_in_buffer = dest.size() - bytes_used;
88         }
89
90         static void term_destination_thunk(j_compress_ptr ptr)
91         {
92                 ((VectorDestinationManager *)(ptr->dest))->term_destination();
93         }
94
95         inline void term_destination()
96         {
97                 dest.resize(dest.size() - pub.free_in_buffer);
98         }
99 };
100 static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
101
102 vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height)
103 {
104         VectorDestinationManager dest;
105
106         jpeg_compress_struct cinfo;
107         jpeg_error_mgr jerr;
108         cinfo.err = jpeg_std_error(&jerr);
109         jpeg_create_compress(&cinfo);
110
111         cinfo.dest = (jpeg_destination_mgr *)&dest;
112         cinfo.input_components = 3;
113         cinfo.in_color_space = JCS_RGB;
114         jpeg_set_defaults(&cinfo);
115         constexpr int quality = 90;
116         jpeg_set_quality(&cinfo, quality, /*force_baseline=*/false);
117
118         cinfo.image_width = width;
119         cinfo.image_height = height;
120         cinfo.raw_data_in = true;
121         jpeg_set_colorspace(&cinfo, JCS_YCbCr);
122         cinfo.comp_info[0].h_samp_factor = 2;
123         cinfo.comp_info[0].v_samp_factor = 1;
124         cinfo.comp_info[1].h_samp_factor = 1;
125         cinfo.comp_info[1].v_samp_factor = 1;
126         cinfo.comp_info[2].h_samp_factor = 1;
127         cinfo.comp_info[2].v_samp_factor = 1;
128         cinfo.CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
129         jpeg_start_compress(&cinfo, true);
130
131         JSAMPROW yptr[8], cbptr[8], crptr[8];
132         JSAMPARRAY data[3] = { yptr, cbptr, crptr };
133         for (unsigned y = 0; y < height; y += 8) {
134                 for (unsigned yy = 0; yy < 8; ++yy) {
135                         yptr[yy] = const_cast<JSAMPROW>(&y_data[(y + yy) * width]);
136                         cbptr[yy] = const_cast<JSAMPROW>(&cb_data[(y + yy) * width / 2]);
137                         crptr[yy] = const_cast<JSAMPROW>(&cr_data[(y + yy) * width / 2]);
138                 }
139
140                 jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
141         }
142
143         jpeg_finish_compress(&cinfo);
144         jpeg_destroy_compress(&cinfo);
145
146         return move(dest.dest);
147 }
148
149 VideoStream::VideoStream()
150 {
151         ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_DUAL_YCBCR, /*resource_pool=*/nullptr));
152         ycbcr_semiplanar_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_SEMIPLANAR, /*resource_pool=*/nullptr));
153
154         GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots];
155         GLuint fade_y_output_tex[num_interpolate_slots], fade_cbcr_output_tex[num_interpolate_slots];
156         GLuint cb_tex[num_interpolate_slots], cr_tex[num_interpolate_slots];
157
158         glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, input_tex);
159         glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, gray_tex);
160         glCreateTextures(GL_TEXTURE_2D, 10, fade_y_output_tex);
161         glCreateTextures(GL_TEXTURE_2D, 10, fade_cbcr_output_tex);
162         glCreateTextures(GL_TEXTURE_2D, 10, cb_tex);
163         glCreateTextures(GL_TEXTURE_2D, 10, cr_tex);
164         check_error();
165
166         constexpr size_t width = 1280, height = 720;  // FIXME: adjustable width, height
167         int levels = find_num_levels(width, height);
168         for (size_t i = 0; i < num_interpolate_slots; ++i) {
169                 glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2);
170                 check_error();
171                 glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2);
172                 check_error();
173                 glTextureStorage2D(fade_y_output_tex[i], 1, GL_R8, width, height);
174                 check_error();
175                 glTextureStorage2D(fade_cbcr_output_tex[i], 1, GL_RG8, width, height);
176                 check_error();
177                 glTextureStorage2D(cb_tex[i], 1, GL_R8, width / 2, height);
178                 check_error();
179                 glTextureStorage2D(cr_tex[i], 1, GL_R8, width / 2, height);
180                 check_error();
181
182                 InterpolatedFrameResources resource;
183                 resource.input_tex = input_tex[i];
184                 resource.gray_tex = gray_tex[i];
185                 resource.fade_y_output_tex = fade_y_output_tex[i];
186                 resource.fade_cbcr_output_tex = fade_cbcr_output_tex[i];
187                 resource.cb_tex = cb_tex[i];
188                 resource.cr_tex = cr_tex[i];
189                 glCreateFramebuffers(2, resource.input_fbos);
190                 check_error();
191                 glCreateFramebuffers(1, &resource.fade_fbo);
192                 check_error();
193
194                 glNamedFramebufferTextureLayer(resource.input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0);
195                 check_error();
196                 glNamedFramebufferTextureLayer(resource.input_fbos[0], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 0);
197                 check_error();
198                 glNamedFramebufferTextureLayer(resource.input_fbos[1], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 1);
199                 check_error();
200                 glNamedFramebufferTextureLayer(resource.input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1);
201                 check_error();
202                 glNamedFramebufferTexture(resource.fade_fbo, GL_COLOR_ATTACHMENT0, fade_y_output_tex[i], 0);
203                 check_error();
204                 glNamedFramebufferTexture(resource.fade_fbo, GL_COLOR_ATTACHMENT1, fade_cbcr_output_tex[i], 0);
205                 check_error();
206
207                 GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
208                 glNamedFramebufferDrawBuffers(resource.input_fbos[0], 2, bufs);
209                 check_error();
210                 glNamedFramebufferDrawBuffers(resource.input_fbos[1], 2, bufs);
211                 check_error();
212                 glNamedFramebufferDrawBuffers(resource.fade_fbo, 2, bufs);
213                 check_error();
214
215                 glCreateBuffers(1, &resource.pbo);
216                 check_error();
217                 glNamedBufferStorage(resource.pbo, width * height * 4, nullptr, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
218                 check_error();
219                 resource.pbo_contents = glMapNamedBufferRange(resource.pbo, 0, width * height * 4, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
220                 interpolate_resources.push_back(resource);
221         }
222
223         check_error();
224
225         compute_flow.reset(new DISComputeFlow(width, height, operating_point2));
226         interpolate.reset(new Interpolate(operating_point2, /*split_ycbcr_output=*/true));
227         interpolate_no_split.reset(new Interpolate(operating_point2, /*split_ycbcr_output=*/false));
228         chroma_subsampler.reset(new ChromaSubsampler);
229         check_error();
230
231         // The “last frame” is initially black.
232         unique_ptr<uint8_t[]> y(new uint8_t[1280 * 720]);
233         unique_ptr<uint8_t[]> cb_or_cr(new uint8_t[640 * 720]);
234         memset(y.get(), 16, 1280 * 720);
235         memset(cb_or_cr.get(), 128, 640 * 720);
236         last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), 1280, 720);
237 }
238
239 VideoStream::~VideoStream() {}
240
241 void VideoStream::start()
242 {
243         AVFormatContext *avctx = avformat_alloc_context();
244         avctx->oformat = av_guess_format("nut", nullptr, nullptr);
245
246         uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
247         avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
248         avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
249         avctx->pb->ignore_boundary_point = 1;
250
251         Mux::Codec video_codec = Mux::CODEC_MJPEG;
252
253         avctx->flags = AVFMT_FLAG_CUSTOM_IO;
254
255         string video_extradata;
256
257         constexpr int width = 1280, height = 720;  // Doesn't matter for MJPEG.
258         stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, /*audio_codec_parameters=*/nullptr, COARSE_TIMEBASE,
259                 /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
260
261
262         encode_thread = thread(&VideoStream::encode_thread_func, this);
263 }
264
265 void VideoStream::stop()
266 {
267         encode_thread.join();
268 }
269
270 void VideoStream::schedule_original_frame(int64_t output_pts, unsigned stream_idx, int64_t input_pts)
271 {
272         fprintf(stderr, "output_pts=%ld  original      input_pts=%ld\n", output_pts, input_pts);
273
274         QueuedFrame qf;
275         qf.type = QueuedFrame::ORIGINAL;
276         qf.output_pts = output_pts;
277         qf.stream_idx = stream_idx;
278         qf.input_first_pts = input_pts;
279
280         unique_lock<mutex> lock(queue_lock);
281         frame_queue.push_back(qf);
282         queue_nonempty.notify_all();
283 }
284
285 void VideoStream::schedule_faded_frame(int64_t output_pts, unsigned stream_idx, int64_t input_pts, int secondary_stream_idx, int64_t secondary_input_pts, float fade_alpha)
286 {
287         fprintf(stderr, "output_pts=%ld  faded         input_pts=%ld,%ld  fade_alpha=%.2f\n", output_pts, input_pts, secondary_input_pts, fade_alpha);
288
289         // Get the temporary OpenGL resources we need for doing the fade.
290         // (We share these with interpolated frames, which is slightly
291         // overkill, but there's no need to waste resources on keeping
292         // separate pools around.)
293         InterpolatedFrameResources resources;
294         {
295                 unique_lock<mutex> lock(queue_lock);
296                 if (interpolate_resources.empty()) {
297                         fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
298                         return;
299                 }
300                 resources = interpolate_resources.front();
301                 interpolate_resources.pop_front();
302         }
303
304         bool did_decode;
305
306         JPEGID jpeg_id1;
307         jpeg_id1.stream_idx = stream_idx;
308         jpeg_id1.pts = input_pts;
309         jpeg_id1.interpolated = false;
310         shared_ptr<Frame> frame1 = decode_jpeg_with_cache(jpeg_id1, DECODE_IF_NOT_IN_CACHE, &did_decode);
311
312         JPEGID jpeg_id2;
313         jpeg_id2.stream_idx = secondary_stream_idx;
314         jpeg_id2.pts = secondary_input_pts;
315         jpeg_id2.interpolated = false;
316         shared_ptr<Frame> frame2 = decode_jpeg_with_cache(jpeg_id2, DECODE_IF_NOT_IN_CACHE, &did_decode);
317
318         ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources.fade_fbo, 1280, 720);
319
320         QueuedFrame qf;
321         qf.type = QueuedFrame::FADED;
322         qf.output_pts = output_pts;
323         qf.stream_idx = stream_idx;
324         qf.resources = resources;
325         qf.input_first_pts = input_pts;
326
327         qf.secondary_stream_idx = secondary_stream_idx;
328         qf.secondary_input_pts = secondary_input_pts;
329
330         // Subsample and split Cb/Cr.
331         chroma_subsampler->subsample_chroma(resources.fade_cbcr_output_tex, 1280, 720, resources.cb_tex, resources.cr_tex);
332
333         // Read it down (asynchronously) to the CPU.
334         glPixelStorei(GL_PACK_ROW_LENGTH, 0);
335         glBindBuffer(GL_PIXEL_PACK_BUFFER, resources.pbo);
336         check_error();
337         glGetTextureImage(resources.fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
338         check_error();
339         glGetTextureImage(resources.cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
340         check_error();
341         glGetTextureImage(resources.cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
342         check_error();
343         glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
344
345         // Set a fence we can wait for to make sure the CPU sees the read.
346         glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
347         check_error();
348         qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
349         check_error();
350
351         unique_lock<mutex> lock(queue_lock);
352         frame_queue.push_back(qf);
353         queue_nonempty.notify_all();
354 }
355
356 void VideoStream::schedule_interpolated_frame(int64_t output_pts, unsigned stream_idx, int64_t input_first_pts, int64_t input_second_pts, float alpha, int secondary_stream_idx, int64_t secondary_input_pts, float fade_alpha)
357 {
358         if (secondary_stream_idx != -1) {
359                 fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f  secondary_pts=%ld  fade_alpha=%.2f\n", output_pts, input_first_pts, input_second_pts, alpha, secondary_input_pts, fade_alpha);
360         } else {
361                 fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, input_first_pts, input_second_pts, alpha);
362         }
363
364         JPEGID id;
365         if (secondary_stream_idx == -1) {
366                 id = JPEGID{ stream_idx, output_pts, /*interpolated=*/true };
367         } else {
368                 id = create_jpegid_for_interpolated_fade(stream_idx, output_pts, secondary_stream_idx, secondary_input_pts);
369         }
370
371         // Get the temporary OpenGL resources we need for doing the interpolation.
372         InterpolatedFrameResources resources;
373         {
374                 unique_lock<mutex> lock(queue_lock);
375                 if (interpolate_resources.empty()) {
376                         fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
377                         JPEGFrameView::insert_interpolated_frame(id, nullptr);
378                         return;
379                 }
380                 resources = interpolate_resources.front();
381                 interpolate_resources.pop_front();
382         }
383
384         QueuedFrame qf;
385         qf.type = (secondary_stream_idx == -1) ? QueuedFrame::INTERPOLATED : QueuedFrame::FADED_INTERPOLATED;
386         qf.output_pts = output_pts;
387         qf.stream_idx = stream_idx;
388         qf.resources = resources;
389         qf.id = id;
390
391         check_error();
392
393         // Convert frame0 and frame1 to OpenGL textures.
394         for (size_t frame_no = 0; frame_no < 2; ++frame_no) {
395                 JPEGID jpeg_id;
396                 jpeg_id.stream_idx = stream_idx;
397                 jpeg_id.pts = frame_no == 1 ? input_second_pts : input_first_pts;
398                 jpeg_id.interpolated = false;
399                 bool did_decode;
400                 shared_ptr<Frame> frame = decode_jpeg_with_cache(jpeg_id, DECODE_IF_NOT_IN_CACHE, &did_decode);
401                 ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources.input_fbos[frame_no], 1280, 720);
402         }
403
404         glGenerateTextureMipmap(resources.input_tex);
405         check_error();
406         glGenerateTextureMipmap(resources.gray_tex);
407         check_error();
408
409         // Compute the interpolated frame.
410         qf.flow_tex = compute_flow->exec(resources.gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
411         check_error();
412
413         if (secondary_stream_idx != -1) {
414                 // Fade. First kick off the interpolation.
415                 tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources.input_tex, resources.gray_tex, qf.flow_tex, 1280, 720, alpha);
416                 check_error();
417
418                 // Now decode the image we are fading against.
419                 JPEGID jpeg_id;
420                 jpeg_id.stream_idx = secondary_stream_idx;
421                 jpeg_id.pts = secondary_input_pts;
422                 jpeg_id.interpolated = false;
423                 bool did_decode;
424                 shared_ptr<Frame> frame2 = decode_jpeg_with_cache(jpeg_id, DECODE_IF_NOT_IN_CACHE, &did_decode);
425
426                 // Then fade against it, putting it into the fade Y' and CbCr textures.
427                 ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, frame2, fade_alpha)->render_to_fbo(resources.fade_fbo, 1280, 720);
428
429                 // Subsample and split Cb/Cr.
430                 chroma_subsampler->subsample_chroma(resources.fade_cbcr_output_tex, 1280, 720, resources.cb_tex, resources.cr_tex);
431         } else {
432                 tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources.input_tex, resources.gray_tex, qf.flow_tex, 1280, 720, alpha);
433                 check_error();
434
435                 // Subsample and split Cb/Cr.
436                 chroma_subsampler->subsample_chroma(qf.cbcr_tex, 1280, 720, resources.cb_tex, resources.cr_tex);
437         }
438
439         // We could have released qf.flow_tex here, but to make sure we don't cause a stall
440         // when trying to reuse it for the next frame, we can just as well hold on to it
441         // and release it only when the readback is done.
442
443         // Read it down (asynchronously) to the CPU.
444         glPixelStorei(GL_PACK_ROW_LENGTH, 0);
445         glBindBuffer(GL_PIXEL_PACK_BUFFER, resources.pbo);
446         check_error();
447         if (secondary_stream_idx != -1) {
448                 glGetTextureImage(resources.fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
449         } else {
450                 glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
451         }
452         check_error();
453         glGetTextureImage(resources.cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
454         check_error();
455         glGetTextureImage(resources.cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
456         check_error();
457         glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
458
459         // Set a fence we can wait for to make sure the CPU sees the read.
460         glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
461         check_error();
462         qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
463         check_error();
464
465         unique_lock<mutex> lock(queue_lock);
466         frame_queue.push_back(qf);
467         queue_nonempty.notify_all();
468 }
469
470 void VideoStream::schedule_refresh_frame(int64_t output_pts)
471 {
472         AVPacket pkt;
473         av_init_packet(&pkt);
474         pkt.stream_index = 0;
475         pkt.data = (uint8_t *)last_frame.data();
476         pkt.size = last_frame.size();
477         stream_mux->add_packet(pkt, output_pts, output_pts);
478 }
479
480 namespace {
481
482 shared_ptr<Frame> frame_from_pbo(void *contents, size_t width, size_t height)
483 {
484         size_t chroma_width = width / 2;
485
486         const uint8_t *y = (const uint8_t *)contents;
487         const uint8_t *cb = (const uint8_t *)contents + width * height;
488         const uint8_t *cr = (const uint8_t *)contents + width * height + chroma_width * height;
489
490         shared_ptr<Frame> frame(new Frame);
491         frame->y.reset(new uint8_t[width * height]);
492         frame->cb.reset(new uint8_t[chroma_width * height]);
493         frame->cr.reset(new uint8_t[chroma_width * height]);
494         for (unsigned yy = 0; yy < height; ++yy) {
495                 memcpy(frame->y.get() + width * yy, y + width * yy, width);
496                 memcpy(frame->cb.get() + chroma_width * yy, cb + chroma_width * yy, chroma_width);
497                 memcpy(frame->cr.get() + chroma_width * yy, cr + chroma_width * yy, chroma_width);
498         }
499         frame->is_semiplanar = false;
500         frame->width = width;
501         frame->height = height;
502         frame->chroma_subsampling_x = 2;
503         frame->chroma_subsampling_y = 1;
504         frame->pitch_y = width;
505         frame->pitch_chroma = chroma_width;
506         return frame;
507 }
508
509 }  // namespace
510
511 void VideoStream::encode_thread_func()
512 {
513         pthread_setname_np(pthread_self(), "VideoStream");
514         QSurface *surface = create_surface();
515         QOpenGLContext *context = create_context(surface);
516         bool ok = make_current(context, surface);
517         if (!ok) {
518                 fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
519                 exit(1);
520         }
521
522         for ( ;; ) {
523                 QueuedFrame qf;
524                 {
525                         unique_lock<mutex> lock(queue_lock);
526                         queue_nonempty.wait(lock, [this]{
527                                 return !frame_queue.empty();
528                         });
529                         qf = frame_queue.front();
530                         frame_queue.pop_front();
531                 }
532
533                 if (qf.type == QueuedFrame::ORIGINAL) {
534                         // Send the JPEG frame on, unchanged.
535                         string jpeg = read_file(filename_for_frame(qf.stream_idx, qf.input_first_pts));
536                         AVPacket pkt;
537                         av_init_packet(&pkt);
538                         pkt.stream_index = 0;
539                         pkt.data = (uint8_t *)jpeg.data();
540                         pkt.size = jpeg.size();
541                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
542
543                         last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size());
544                 } else if (qf.type == QueuedFrame::FADED) {
545                         glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
546
547                         shared_ptr<Frame> frame = frame_from_pbo(qf.resources.pbo_contents, 1280, 720);
548
549                         // Now JPEG encode it, and send it on to the stream.
550                         vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
551
552                         AVPacket pkt;
553                         av_init_packet(&pkt);
554                         pkt.stream_index = 0;
555                         pkt.data = (uint8_t *)jpeg.data();
556                         pkt.size = jpeg.size();
557                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
558                         last_frame = move(jpeg);
559
560                         // Put the frame resources back.
561                         unique_lock<mutex> lock(queue_lock);
562                         interpolate_resources.push_back(qf.resources);
563                 } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
564                         glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
565
566                         // Send a copy of the frame on to display.
567                         shared_ptr<Frame> frame = frame_from_pbo(qf.resources.pbo_contents, 1280, 720);
568                         JPEGFrameView::insert_interpolated_frame(qf.id, frame);
569
570                         // Now JPEG encode it, and send it on to the stream.
571                         vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
572                         compute_flow->release_texture(qf.flow_tex);
573                         if (qf.type != QueuedFrame::FADED_INTERPOLATED) {
574                                 interpolate->release_texture(qf.output_tex);
575                                 interpolate->release_texture(qf.cbcr_tex);
576                         }
577
578                         AVPacket pkt;
579                         av_init_packet(&pkt);
580                         pkt.stream_index = 0;
581                         pkt.data = (uint8_t *)jpeg.data();
582                         pkt.size = jpeg.size();
583                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
584                         last_frame = move(jpeg);
585
586                         // Put the frame resources back.
587                         unique_lock<mutex> lock(queue_lock);
588                         interpolate_resources.push_back(qf.resources);
589                 } else {
590                         assert(false);
591                 }
592         }
593 }
594
595 int VideoStream::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
596 {
597         VideoStream *video_stream = (VideoStream *)opaque;
598         return video_stream->write_packet2(buf, buf_size, type, time);
599 }
600
601 int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
602 {
603         if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
604                 seen_sync_markers = true;
605         } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
606                 // We don't know if this is a keyframe or not (the muxer could
607                 // avoid marking it), so we just have to make the best of it.
608                 type = AVIO_DATA_MARKER_SYNC_POINT;
609         }
610
611         if (type == AVIO_DATA_MARKER_HEADER) {
612                 stream_mux_header.append((char *)buf, buf_size);
613                 global_httpd->set_header(stream_mux_header);
614         } else {
615                 global_httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
616         }
617         return buf_size;
618 }