]> git.sesse.net Git - nageru/blob - video_stream.cpp
Do not try to show a frame until we've computed it (do it in a callback instead).
[nageru] / video_stream.cpp
1 #include "video_stream.h"
2
3 extern "C" {
4 #include <libavformat/avformat.h>
5 #include <libavformat/avio.h>
6 }
7
8 #include "chroma_subsampler.h"
9 #include "context.h"
10 #include "flags.h"
11 #include "flow.h"
12 #include "httpd.h"
13 #include "jpeg_frame_view.h"
14 #include "movit/util.h"
15 #include "mux.h"
16 #include "player.h"
17 #include "util.h"
18 #include "ycbcr_converter.h"
19
20 #include <epoxy/glx.h>
21 #include <jpeglib.h>
22 #include <unistd.h>
23
24 using namespace std;
25
26 extern HTTPD *global_httpd;
27
28 namespace {
29
30 string read_file(const string &filename)
31 {
32         FILE *fp = fopen(filename.c_str(), "rb");
33         if (fp == nullptr) {
34                 perror(filename.c_str());
35                 return "";
36         }
37
38         fseek(fp, 0, SEEK_END);
39         long len = ftell(fp);
40         rewind(fp);
41
42         string ret;
43         ret.resize(len);
44         fread(&ret[0], len, 1, fp);
45         fclose(fp);
46         return ret;
47 }
48
49 }  // namespace
50
51 struct VectorDestinationManager {
52         jpeg_destination_mgr pub;
53         std::vector<uint8_t> dest;
54
55         VectorDestinationManager()
56         {
57                 pub.init_destination = init_destination_thunk;
58                 pub.empty_output_buffer = empty_output_buffer_thunk;
59                 pub.term_destination = term_destination_thunk;
60         }
61
62         static void init_destination_thunk(j_compress_ptr ptr)
63         {
64                 ((VectorDestinationManager *)(ptr->dest))->init_destination();
65         }
66
67         inline void init_destination()
68         {
69                 make_room(0);
70         }
71
72         static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
73         {
74                 return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
75         }
76
77         inline bool empty_output_buffer()
78         {
79                 make_room(dest.size());  // Should ignore pub.free_in_buffer!
80                 return true;
81         }
82
83         inline void make_room(size_t bytes_used)
84         {
85                 dest.resize(bytes_used + 4096);
86                 dest.resize(dest.capacity());
87                 pub.next_output_byte = dest.data() + bytes_used;
88                 pub.free_in_buffer = dest.size() - bytes_used;
89         }
90
91         static void term_destination_thunk(j_compress_ptr ptr)
92         {
93                 ((VectorDestinationManager *)(ptr->dest))->term_destination();
94         }
95
96         inline void term_destination()
97         {
98                 dest.resize(dest.size() - pub.free_in_buffer);
99         }
100 };
101 static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
102
103 vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height)
104 {
105         VectorDestinationManager dest;
106
107         jpeg_compress_struct cinfo;
108         jpeg_error_mgr jerr;
109         cinfo.err = jpeg_std_error(&jerr);
110         jpeg_create_compress(&cinfo);
111
112         cinfo.dest = (jpeg_destination_mgr *)&dest;
113         cinfo.input_components = 3;
114         cinfo.in_color_space = JCS_RGB;
115         jpeg_set_defaults(&cinfo);
116         constexpr int quality = 90;
117         jpeg_set_quality(&cinfo, quality, /*force_baseline=*/false);
118
119         cinfo.image_width = width;
120         cinfo.image_height = height;
121         cinfo.raw_data_in = true;
122         jpeg_set_colorspace(&cinfo, JCS_YCbCr);
123         cinfo.comp_info[0].h_samp_factor = 2;
124         cinfo.comp_info[0].v_samp_factor = 1;
125         cinfo.comp_info[1].h_samp_factor = 1;
126         cinfo.comp_info[1].v_samp_factor = 1;
127         cinfo.comp_info[2].h_samp_factor = 1;
128         cinfo.comp_info[2].v_samp_factor = 1;
129         cinfo.CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
130         jpeg_start_compress(&cinfo, true);
131
132         JSAMPROW yptr[8], cbptr[8], crptr[8];
133         JSAMPARRAY data[3] = { yptr, cbptr, crptr };
134         for (unsigned y = 0; y < height; y += 8) {
135                 for (unsigned yy = 0; yy < 8; ++yy) {
136                         yptr[yy] = const_cast<JSAMPROW>(&y_data[(y + yy) * width]);
137                         cbptr[yy] = const_cast<JSAMPROW>(&cb_data[(y + yy) * width / 2]);
138                         crptr[yy] = const_cast<JSAMPROW>(&cr_data[(y + yy) * width / 2]);
139                 }
140
141                 jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
142         }
143
144         jpeg_finish_compress(&cinfo);
145         jpeg_destroy_compress(&cinfo);
146
147         return move(dest.dest);
148 }
149
150 VideoStream::VideoStream()
151 {
152         ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_DUAL_YCBCR, /*resource_pool=*/nullptr));
153         ycbcr_semiplanar_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_SEMIPLANAR, /*resource_pool=*/nullptr));
154
155         GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots];
156         GLuint fade_y_output_tex[num_interpolate_slots], fade_cbcr_output_tex[num_interpolate_slots];
157         GLuint cb_tex[num_interpolate_slots], cr_tex[num_interpolate_slots];
158
159         glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, input_tex);
160         glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, gray_tex);
161         glCreateTextures(GL_TEXTURE_2D, 10, fade_y_output_tex);
162         glCreateTextures(GL_TEXTURE_2D, 10, fade_cbcr_output_tex);
163         glCreateTextures(GL_TEXTURE_2D, 10, cb_tex);
164         glCreateTextures(GL_TEXTURE_2D, 10, cr_tex);
165         check_error();
166
167         constexpr size_t width = 1280, height = 720;  // FIXME: adjustable width, height
168         int levels = find_num_levels(width, height);
169         for (size_t i = 0; i < num_interpolate_slots; ++i) {
170                 glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2);
171                 check_error();
172                 glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2);
173                 check_error();
174                 glTextureStorage2D(fade_y_output_tex[i], 1, GL_R8, width, height);
175                 check_error();
176                 glTextureStorage2D(fade_cbcr_output_tex[i], 1, GL_RG8, width, height);
177                 check_error();
178                 glTextureStorage2D(cb_tex[i], 1, GL_R8, width / 2, height);
179                 check_error();
180                 glTextureStorage2D(cr_tex[i], 1, GL_R8, width / 2, height);
181                 check_error();
182
183                 InterpolatedFrameResources resource;
184                 resource.input_tex = input_tex[i];
185                 resource.gray_tex = gray_tex[i];
186                 resource.fade_y_output_tex = fade_y_output_tex[i];
187                 resource.fade_cbcr_output_tex = fade_cbcr_output_tex[i];
188                 resource.cb_tex = cb_tex[i];
189                 resource.cr_tex = cr_tex[i];
190                 glCreateFramebuffers(2, resource.input_fbos);
191                 check_error();
192                 glCreateFramebuffers(1, &resource.fade_fbo);
193                 check_error();
194
195                 glNamedFramebufferTextureLayer(resource.input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0);
196                 check_error();
197                 glNamedFramebufferTextureLayer(resource.input_fbos[0], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 0);
198                 check_error();
199                 glNamedFramebufferTextureLayer(resource.input_fbos[1], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 1);
200                 check_error();
201                 glNamedFramebufferTextureLayer(resource.input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1);
202                 check_error();
203                 glNamedFramebufferTexture(resource.fade_fbo, GL_COLOR_ATTACHMENT0, fade_y_output_tex[i], 0);
204                 check_error();
205                 glNamedFramebufferTexture(resource.fade_fbo, GL_COLOR_ATTACHMENT1, fade_cbcr_output_tex[i], 0);
206                 check_error();
207
208                 GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
209                 glNamedFramebufferDrawBuffers(resource.input_fbos[0], 2, bufs);
210                 check_error();
211                 glNamedFramebufferDrawBuffers(resource.input_fbos[1], 2, bufs);
212                 check_error();
213                 glNamedFramebufferDrawBuffers(resource.fade_fbo, 2, bufs);
214                 check_error();
215
216                 glCreateBuffers(1, &resource.pbo);
217                 check_error();
218                 glNamedBufferStorage(resource.pbo, width * height * 4, nullptr, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
219                 check_error();
220                 resource.pbo_contents = glMapNamedBufferRange(resource.pbo, 0, width * height * 4, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
221                 interpolate_resources.push_back(resource);
222         }
223
224         check_error();
225
226         OperatingPoint op;
227         if (global_flags.interpolation_quality == 1) {
228                 op = operating_point1;
229         } else if (global_flags.interpolation_quality == 2) {
230                 op = operating_point2;
231         } else if (global_flags.interpolation_quality == 3) {
232                 op = operating_point3;
233         } else if (global_flags.interpolation_quality == 4) {
234                 op = operating_point4;
235         } else {
236                 assert(false);
237         }
238
239         compute_flow.reset(new DISComputeFlow(width, height, op));
240         interpolate.reset(new Interpolate(op, /*split_ycbcr_output=*/true));
241         interpolate_no_split.reset(new Interpolate(op, /*split_ycbcr_output=*/false));
242         chroma_subsampler.reset(new ChromaSubsampler);
243         check_error();
244
245         // The “last frame” is initially black.
246         unique_ptr<uint8_t[]> y(new uint8_t[1280 * 720]);
247         unique_ptr<uint8_t[]> cb_or_cr(new uint8_t[640 * 720]);
248         memset(y.get(), 16, 1280 * 720);
249         memset(cb_or_cr.get(), 128, 640 * 720);
250         last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), 1280, 720);
251 }
252
253 VideoStream::~VideoStream() {}
254
255 void VideoStream::start()
256 {
257         AVFormatContext *avctx = avformat_alloc_context();
258         avctx->oformat = av_guess_format("nut", nullptr, nullptr);
259
260         uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
261         avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
262         avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
263         avctx->pb->ignore_boundary_point = 1;
264
265         Mux::Codec video_codec = Mux::CODEC_MJPEG;
266
267         avctx->flags = AVFMT_FLAG_CUSTOM_IO;
268
269         string video_extradata;
270
271         constexpr int width = 1280, height = 720;  // Doesn't matter for MJPEG.
272         stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, /*audio_codec_parameters=*/nullptr, COARSE_TIMEBASE,
273                 /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
274
275
276         encode_thread = thread(&VideoStream::encode_thread_func, this);
277 }
278
279 void VideoStream::stop()
280 {
281         encode_thread.join();
282 }
283
284 void VideoStream::schedule_original_frame(int64_t output_pts, function<void()> &&display_func, unsigned stream_idx, int64_t input_pts)
285 {
286         fprintf(stderr, "output_pts=%ld  original      input_pts=%ld\n", output_pts, input_pts);
287
288         QueuedFrame qf;
289         qf.type = QueuedFrame::ORIGINAL;
290         qf.output_pts = output_pts;
291         qf.stream_idx = stream_idx;
292         qf.input_first_pts = input_pts;
293         qf.display_func = move(display_func);
294
295         unique_lock<mutex> lock(queue_lock);
296         frame_queue.push_back(qf);
297         queue_nonempty.notify_all();
298 }
299
300 void VideoStream::schedule_faded_frame(int64_t output_pts, function<void()> &&display_func, unsigned stream_idx, int64_t input_pts, int secondary_stream_idx, int64_t secondary_input_pts, float fade_alpha)
301 {
302         fprintf(stderr, "output_pts=%ld  faded         input_pts=%ld,%ld  fade_alpha=%.2f\n", output_pts, input_pts, secondary_input_pts, fade_alpha);
303
304         // Get the temporary OpenGL resources we need for doing the fade.
305         // (We share these with interpolated frames, which is slightly
306         // overkill, but there's no need to waste resources on keeping
307         // separate pools around.)
308         InterpolatedFrameResources resources;
309         {
310                 unique_lock<mutex> lock(queue_lock);
311                 if (interpolate_resources.empty()) {
312                         fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
313                         return;
314                 }
315                 resources = interpolate_resources.front();
316                 interpolate_resources.pop_front();
317         }
318
319         bool did_decode;
320
321         JPEGID jpeg_id1;
322         jpeg_id1.stream_idx = stream_idx;
323         jpeg_id1.pts = input_pts;
324         jpeg_id1.interpolated = false;
325         shared_ptr<Frame> frame1 = decode_jpeg_with_cache(jpeg_id1, DECODE_IF_NOT_IN_CACHE, &did_decode);
326
327         JPEGID jpeg_id2;
328         jpeg_id2.stream_idx = secondary_stream_idx;
329         jpeg_id2.pts = secondary_input_pts;
330         jpeg_id2.interpolated = false;
331         shared_ptr<Frame> frame2 = decode_jpeg_with_cache(jpeg_id2, DECODE_IF_NOT_IN_CACHE, &did_decode);
332
333         ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources.fade_fbo, 1280, 720);
334
335         QueuedFrame qf;
336         qf.type = QueuedFrame::FADED;
337         qf.output_pts = output_pts;
338         qf.stream_idx = stream_idx;
339         qf.resources = resources;
340         qf.input_first_pts = input_pts;
341         qf.display_func = move(display_func);
342
343         qf.secondary_stream_idx = secondary_stream_idx;
344         qf.secondary_input_pts = secondary_input_pts;
345
346         // Subsample and split Cb/Cr.
347         chroma_subsampler->subsample_chroma(resources.fade_cbcr_output_tex, 1280, 720, resources.cb_tex, resources.cr_tex);
348
349         // Read it down (asynchronously) to the CPU.
350         glPixelStorei(GL_PACK_ROW_LENGTH, 0);
351         glBindBuffer(GL_PIXEL_PACK_BUFFER, resources.pbo);
352         check_error();
353         glGetTextureImage(resources.fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
354         check_error();
355         glGetTextureImage(resources.cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
356         check_error();
357         glGetTextureImage(resources.cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
358         check_error();
359         glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
360
361         // Set a fence we can wait for to make sure the CPU sees the read.
362         glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
363         check_error();
364         qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
365         check_error();
366
367         unique_lock<mutex> lock(queue_lock);
368         frame_queue.push_back(qf);
369         queue_nonempty.notify_all();
370 }
371
372 void VideoStream::schedule_interpolated_frame(int64_t output_pts, function<void()> &&display_func, unsigned stream_idx, int64_t input_first_pts, int64_t input_second_pts, float alpha, int secondary_stream_idx, int64_t secondary_input_pts, float fade_alpha)
373 {
374         if (secondary_stream_idx != -1) {
375                 fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f  secondary_pts=%ld  fade_alpha=%.2f\n", output_pts, input_first_pts, input_second_pts, alpha, secondary_input_pts, fade_alpha);
376         } else {
377                 fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, input_first_pts, input_second_pts, alpha);
378         }
379
380         JPEGID id;
381         if (secondary_stream_idx == -1) {
382                 id = JPEGID{ stream_idx, output_pts, /*interpolated=*/true };
383         } else {
384                 id = create_jpegid_for_interpolated_fade(stream_idx, output_pts, secondary_stream_idx, secondary_input_pts);
385         }
386
387         // Get the temporary OpenGL resources we need for doing the interpolation.
388         InterpolatedFrameResources resources;
389         {
390                 unique_lock<mutex> lock(queue_lock);
391                 if (interpolate_resources.empty()) {
392                         fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
393                         return;
394                 }
395                 resources = interpolate_resources.front();
396                 interpolate_resources.pop_front();
397         }
398
399         QueuedFrame qf;
400         qf.type = (secondary_stream_idx == -1) ? QueuedFrame::INTERPOLATED : QueuedFrame::FADED_INTERPOLATED;
401         qf.output_pts = output_pts;
402         qf.stream_idx = stream_idx;
403         qf.resources = resources;
404         qf.id = id;
405         qf.display_func = move(display_func);
406
407         check_error();
408
409         // Convert frame0 and frame1 to OpenGL textures.
410         for (size_t frame_no = 0; frame_no < 2; ++frame_no) {
411                 JPEGID jpeg_id;
412                 jpeg_id.stream_idx = stream_idx;
413                 jpeg_id.pts = frame_no == 1 ? input_second_pts : input_first_pts;
414                 jpeg_id.interpolated = false;
415                 bool did_decode;
416                 shared_ptr<Frame> frame = decode_jpeg_with_cache(jpeg_id, DECODE_IF_NOT_IN_CACHE, &did_decode);
417                 ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources.input_fbos[frame_no], 1280, 720);
418         }
419
420         glGenerateTextureMipmap(resources.input_tex);
421         check_error();
422         glGenerateTextureMipmap(resources.gray_tex);
423         check_error();
424
425         // Compute the interpolated frame.
426         qf.flow_tex = compute_flow->exec(resources.gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
427         check_error();
428
429         if (secondary_stream_idx != -1) {
430                 // Fade. First kick off the interpolation.
431                 tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources.input_tex, resources.gray_tex, qf.flow_tex, 1280, 720, alpha);
432                 check_error();
433
434                 // Now decode the image we are fading against.
435                 JPEGID jpeg_id;
436                 jpeg_id.stream_idx = secondary_stream_idx;
437                 jpeg_id.pts = secondary_input_pts;
438                 jpeg_id.interpolated = false;
439                 bool did_decode;
440                 shared_ptr<Frame> frame2 = decode_jpeg_with_cache(jpeg_id, DECODE_IF_NOT_IN_CACHE, &did_decode);
441
442                 // Then fade against it, putting it into the fade Y' and CbCr textures.
443                 ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, frame2, fade_alpha)->render_to_fbo(resources.fade_fbo, 1280, 720);
444
445                 // Subsample and split Cb/Cr.
446                 chroma_subsampler->subsample_chroma(resources.fade_cbcr_output_tex, 1280, 720, resources.cb_tex, resources.cr_tex);
447         } else {
448                 tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources.input_tex, resources.gray_tex, qf.flow_tex, 1280, 720, alpha);
449                 check_error();
450
451                 // Subsample and split Cb/Cr.
452                 chroma_subsampler->subsample_chroma(qf.cbcr_tex, 1280, 720, resources.cb_tex, resources.cr_tex);
453         }
454
455         // We could have released qf.flow_tex here, but to make sure we don't cause a stall
456         // when trying to reuse it for the next frame, we can just as well hold on to it
457         // and release it only when the readback is done.
458
459         // Read it down (asynchronously) to the CPU.
460         glPixelStorei(GL_PACK_ROW_LENGTH, 0);
461         glBindBuffer(GL_PIXEL_PACK_BUFFER, resources.pbo);
462         check_error();
463         if (secondary_stream_idx != -1) {
464                 glGetTextureImage(resources.fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
465         } else {
466                 glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
467         }
468         check_error();
469         glGetTextureImage(resources.cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
470         check_error();
471         glGetTextureImage(resources.cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
472         check_error();
473         glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
474
475         // Set a fence we can wait for to make sure the CPU sees the read.
476         glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
477         check_error();
478         qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
479         check_error();
480
481         unique_lock<mutex> lock(queue_lock);
482         frame_queue.push_back(qf);
483         queue_nonempty.notify_all();
484 }
485
486 void VideoStream::schedule_refresh_frame(int64_t output_pts, function<void()> &&display_func)
487 {
488         QueuedFrame qf;
489         qf.type = QueuedFrame::REFRESH;
490         qf.output_pts = output_pts;
491         qf.display_func = move(display_func);
492
493         unique_lock<mutex> lock(queue_lock);
494         frame_queue.push_back(qf);
495         queue_nonempty.notify_all();
496 }
497
498 namespace {
499
500 shared_ptr<Frame> frame_from_pbo(void *contents, size_t width, size_t height)
501 {
502         size_t chroma_width = width / 2;
503
504         const uint8_t *y = (const uint8_t *)contents;
505         const uint8_t *cb = (const uint8_t *)contents + width * height;
506         const uint8_t *cr = (const uint8_t *)contents + width * height + chroma_width * height;
507
508         shared_ptr<Frame> frame(new Frame);
509         frame->y.reset(new uint8_t[width * height]);
510         frame->cb.reset(new uint8_t[chroma_width * height]);
511         frame->cr.reset(new uint8_t[chroma_width * height]);
512         for (unsigned yy = 0; yy < height; ++yy) {
513                 memcpy(frame->y.get() + width * yy, y + width * yy, width);
514                 memcpy(frame->cb.get() + chroma_width * yy, cb + chroma_width * yy, chroma_width);
515                 memcpy(frame->cr.get() + chroma_width * yy, cr + chroma_width * yy, chroma_width);
516         }
517         frame->is_semiplanar = false;
518         frame->width = width;
519         frame->height = height;
520         frame->chroma_subsampling_x = 2;
521         frame->chroma_subsampling_y = 1;
522         frame->pitch_y = width;
523         frame->pitch_chroma = chroma_width;
524         return frame;
525 }
526
527 }  // namespace
528
529 void VideoStream::encode_thread_func()
530 {
531         pthread_setname_np(pthread_self(), "VideoStream");
532         QSurface *surface = create_surface();
533         QOpenGLContext *context = create_context(surface);
534         bool ok = make_current(context, surface);
535         if (!ok) {
536                 fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
537                 exit(1);
538         }
539
540         for ( ;; ) {
541                 QueuedFrame qf;
542                 {
543                         unique_lock<mutex> lock(queue_lock);
544                         queue_nonempty.wait(lock, [this]{
545                                 return !frame_queue.empty();
546                         });
547                         qf = frame_queue.front();
548                         frame_queue.pop_front();
549                 }
550
551                 if (qf.type == QueuedFrame::ORIGINAL) {
552                         // Send the JPEG frame on, unchanged.
553                         string jpeg = read_file(filename_for_frame(qf.stream_idx, qf.input_first_pts));
554                         AVPacket pkt;
555                         av_init_packet(&pkt);
556                         pkt.stream_index = 0;
557                         pkt.data = (uint8_t *)jpeg.data();
558                         pkt.size = jpeg.size();
559                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
560
561                         last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size());
562                 } else if (qf.type == QueuedFrame::FADED) {
563                         glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
564
565                         shared_ptr<Frame> frame = frame_from_pbo(qf.resources.pbo_contents, 1280, 720);
566
567                         // Now JPEG encode it, and send it on to the stream.
568                         vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
569
570                         AVPacket pkt;
571                         av_init_packet(&pkt);
572                         pkt.stream_index = 0;
573                         pkt.data = (uint8_t *)jpeg.data();
574                         pkt.size = jpeg.size();
575                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
576                         last_frame = move(jpeg);
577
578                         // Put the frame resources back.
579                         unique_lock<mutex> lock(queue_lock);
580                         interpolate_resources.push_back(qf.resources);
581                 } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
582                         glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
583
584                         // Send a copy of the frame on to display.
585                         shared_ptr<Frame> frame = frame_from_pbo(qf.resources.pbo_contents, 1280, 720);
586                         JPEGFrameView::insert_interpolated_frame(qf.id, frame);
587
588                         // Now JPEG encode it, and send it on to the stream.
589                         vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
590                         compute_flow->release_texture(qf.flow_tex);
591                         if (qf.type != QueuedFrame::FADED_INTERPOLATED) {
592                                 interpolate->release_texture(qf.output_tex);
593                                 interpolate->release_texture(qf.cbcr_tex);
594                         }
595
596                         AVPacket pkt;
597                         av_init_packet(&pkt);
598                         pkt.stream_index = 0;
599                         pkt.data = (uint8_t *)jpeg.data();
600                         pkt.size = jpeg.size();
601                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
602                         last_frame = move(jpeg);
603
604                         // Put the frame resources back.
605                         unique_lock<mutex> lock(queue_lock);
606                         interpolate_resources.push_back(qf.resources);
607                 } else if (qf.type == QueuedFrame::REFRESH) {
608                         AVPacket pkt;
609                         av_init_packet(&pkt);
610                         pkt.stream_index = 0;
611                         pkt.data = (uint8_t *)last_frame.data();
612                         pkt.size = last_frame.size();
613                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
614                 } else {
615                         assert(false);
616                 }
617                 if (qf.display_func != nullptr) {
618                         qf.display_func();
619                 }
620         }
621 }
622
623 int VideoStream::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
624 {
625         VideoStream *video_stream = (VideoStream *)opaque;
626         return video_stream->write_packet2(buf, buf_size, type, time);
627 }
628
629 int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
630 {
631         if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
632                 seen_sync_markers = true;
633         } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
634                 // We don't know if this is a keyframe or not (the muxer could
635                 // avoid marking it), so we just have to make the best of it.
636                 type = AVIO_DATA_MARKER_SYNC_POINT;
637         }
638
639         if (type == AVIO_DATA_MARKER_HEADER) {
640                 stream_mux_header.append((char *)buf, buf_size);
641                 global_httpd->set_header(stream_mux_header);
642         } else {
643                 global_httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
644         }
645         return buf_size;
646 }