]> git.sesse.net Git - nageru/blob - video_stream.cpp
Fix some more leaks on clear_queue().
[nageru] / video_stream.cpp
1 #include "video_stream.h"
2
3 extern "C" {
4 #include <libavformat/avformat.h>
5 #include <libavformat/avio.h>
6 }
7
8 #include "chroma_subsampler.h"
9 #include "context.h"
10 #include "flags.h"
11 #include "flow.h"
12 #include "httpd.h"
13 #include "jpeg_frame_view.h"
14 #include "movit/util.h"
15 #include "mux.h"
16 #include "player.h"
17 #include "util.h"
18 #include "ycbcr_converter.h"
19
20 #include <epoxy/glx.h>
21 #include <jpeglib.h>
22 #include <unistd.h>
23
24 using namespace std;
25 using namespace std::chrono;
26
27 extern HTTPD *global_httpd;
28
29 namespace {
30
31 string read_file(const string &filename)
32 {
33         FILE *fp = fopen(filename.c_str(), "rb");
34         if (fp == nullptr) {
35                 perror(filename.c_str());
36                 return "";
37         }
38
39         fseek(fp, 0, SEEK_END);
40         long len = ftell(fp);
41         rewind(fp);
42
43         string ret;
44         ret.resize(len);
45         fread(&ret[0], len, 1, fp);
46         fclose(fp);
47         return ret;
48 }
49
50 }  // namespace
51
52 struct VectorDestinationManager {
53         jpeg_destination_mgr pub;
54         std::vector<uint8_t> dest;
55
56         VectorDestinationManager()
57         {
58                 pub.init_destination = init_destination_thunk;
59                 pub.empty_output_buffer = empty_output_buffer_thunk;
60                 pub.term_destination = term_destination_thunk;
61         }
62
63         static void init_destination_thunk(j_compress_ptr ptr)
64         {
65                 ((VectorDestinationManager *)(ptr->dest))->init_destination();
66         }
67
68         inline void init_destination()
69         {
70                 make_room(0);
71         }
72
73         static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
74         {
75                 return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
76         }
77
78         inline bool empty_output_buffer()
79         {
80                 make_room(dest.size());  // Should ignore pub.free_in_buffer!
81                 return true;
82         }
83
84         inline void make_room(size_t bytes_used)
85         {
86                 dest.resize(bytes_used + 4096);
87                 dest.resize(dest.capacity());
88                 pub.next_output_byte = dest.data() + bytes_used;
89                 pub.free_in_buffer = dest.size() - bytes_used;
90         }
91
92         static void term_destination_thunk(j_compress_ptr ptr)
93         {
94                 ((VectorDestinationManager *)(ptr->dest))->term_destination();
95         }
96
97         inline void term_destination()
98         {
99                 dest.resize(dest.size() - pub.free_in_buffer);
100         }
101 };
102 static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
103
104 vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height)
105 {
106         VectorDestinationManager dest;
107
108         jpeg_compress_struct cinfo;
109         jpeg_error_mgr jerr;
110         cinfo.err = jpeg_std_error(&jerr);
111         jpeg_create_compress(&cinfo);
112
113         cinfo.dest = (jpeg_destination_mgr *)&dest;
114         cinfo.input_components = 3;
115         cinfo.in_color_space = JCS_RGB;
116         jpeg_set_defaults(&cinfo);
117         constexpr int quality = 90;
118         jpeg_set_quality(&cinfo, quality, /*force_baseline=*/false);
119
120         cinfo.image_width = width;
121         cinfo.image_height = height;
122         cinfo.raw_data_in = true;
123         jpeg_set_colorspace(&cinfo, JCS_YCbCr);
124         cinfo.comp_info[0].h_samp_factor = 2;
125         cinfo.comp_info[0].v_samp_factor = 1;
126         cinfo.comp_info[1].h_samp_factor = 1;
127         cinfo.comp_info[1].v_samp_factor = 1;
128         cinfo.comp_info[2].h_samp_factor = 1;
129         cinfo.comp_info[2].v_samp_factor = 1;
130         cinfo.CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
131         jpeg_start_compress(&cinfo, true);
132
133         JSAMPROW yptr[8], cbptr[8], crptr[8];
134         JSAMPARRAY data[3] = { yptr, cbptr, crptr };
135         for (unsigned y = 0; y < height; y += 8) {
136                 for (unsigned yy = 0; yy < 8; ++yy) {
137                         yptr[yy] = const_cast<JSAMPROW>(&y_data[(y + yy) * width]);
138                         cbptr[yy] = const_cast<JSAMPROW>(&cb_data[(y + yy) * width / 2]);
139                         crptr[yy] = const_cast<JSAMPROW>(&cr_data[(y + yy) * width / 2]);
140                 }
141
142                 jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
143         }
144
145         jpeg_finish_compress(&cinfo);
146         jpeg_destroy_compress(&cinfo);
147
148         return move(dest.dest);
149 }
150
151 VideoStream::VideoStream()
152 {
153         ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_DUAL_YCBCR, /*resource_pool=*/nullptr));
154         ycbcr_semiplanar_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_SEMIPLANAR, /*resource_pool=*/nullptr));
155
156         GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots];
157         GLuint fade_y_output_tex[num_interpolate_slots], fade_cbcr_output_tex[num_interpolate_slots];
158         GLuint cb_tex[num_interpolate_slots], cr_tex[num_interpolate_slots];
159
160         glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, input_tex);
161         glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, gray_tex);
162         glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_y_output_tex);
163         glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_cbcr_output_tex);
164         glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cb_tex);
165         glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cr_tex);
166         check_error();
167
168         constexpr size_t width = 1280, height = 720;  // FIXME: adjustable width, height
169         int levels = find_num_levels(width, height);
170         for (size_t i = 0; i < num_interpolate_slots; ++i) {
171                 glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2);
172                 check_error();
173                 glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2);
174                 check_error();
175                 glTextureStorage2D(fade_y_output_tex[i], 1, GL_R8, width, height);
176                 check_error();
177                 glTextureStorage2D(fade_cbcr_output_tex[i], 1, GL_RG8, width, height);
178                 check_error();
179                 glTextureStorage2D(cb_tex[i], 1, GL_R8, width / 2, height);
180                 check_error();
181                 glTextureStorage2D(cr_tex[i], 1, GL_R8, width / 2, height);
182                 check_error();
183
184                 unique_ptr<InterpolatedFrameResources> resource(new InterpolatedFrameResources);
185                 resource->owner = this;
186                 resource->input_tex = input_tex[i];
187                 resource->gray_tex = gray_tex[i];
188                 resource->fade_y_output_tex = fade_y_output_tex[i];
189                 resource->fade_cbcr_output_tex = fade_cbcr_output_tex[i];
190                 resource->cb_tex = cb_tex[i];
191                 resource->cr_tex = cr_tex[i];
192                 glCreateFramebuffers(2, resource->input_fbos);
193                 check_error();
194                 glCreateFramebuffers(1, &resource->fade_fbo);
195                 check_error();
196
197                 glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0);
198                 check_error();
199                 glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 0);
200                 check_error();
201                 glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 1);
202                 check_error();
203                 glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1);
204                 check_error();
205                 glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT0, fade_y_output_tex[i], 0);
206                 check_error();
207                 glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT1, fade_cbcr_output_tex[i], 0);
208                 check_error();
209
210                 GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
211                 glNamedFramebufferDrawBuffers(resource->input_fbos[0], 2, bufs);
212                 check_error();
213                 glNamedFramebufferDrawBuffers(resource->input_fbos[1], 2, bufs);
214                 check_error();
215                 glNamedFramebufferDrawBuffers(resource->fade_fbo, 2, bufs);
216                 check_error();
217
218                 glCreateBuffers(1, &resource->pbo);
219                 check_error();
220                 glNamedBufferStorage(resource->pbo, width * height * 4, nullptr, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
221                 check_error();
222                 resource->pbo_contents = glMapNamedBufferRange(resource->pbo, 0, width * height * 4, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
223                 interpolate_resources.push_back(move(resource));
224         }
225
226         check_error();
227
228         OperatingPoint op;
229         if (global_flags.interpolation_quality == 1) {
230                 op = operating_point1;
231         } else if (global_flags.interpolation_quality == 2) {
232                 op = operating_point2;
233         } else if (global_flags.interpolation_quality == 3) {
234                 op = operating_point3;
235         } else if (global_flags.interpolation_quality == 4) {
236                 op = operating_point4;
237         } else {
238                 assert(false);
239         }
240
241         compute_flow.reset(new DISComputeFlow(width, height, op));
242         interpolate.reset(new Interpolate(op, /*split_ycbcr_output=*/true));
243         interpolate_no_split.reset(new Interpolate(op, /*split_ycbcr_output=*/false));
244         chroma_subsampler.reset(new ChromaSubsampler);
245         check_error();
246
247         // The “last frame” is initially black.
248         unique_ptr<uint8_t[]> y(new uint8_t[1280 * 720]);
249         unique_ptr<uint8_t[]> cb_or_cr(new uint8_t[640 * 720]);
250         memset(y.get(), 16, 1280 * 720);
251         memset(cb_or_cr.get(), 128, 640 * 720);
252         last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), 1280, 720);
253 }
254
255 VideoStream::~VideoStream() {}
256
257 void VideoStream::start()
258 {
259         AVFormatContext *avctx = avformat_alloc_context();
260         avctx->oformat = av_guess_format("nut", nullptr, nullptr);
261
262         uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
263         avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
264         avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
265         avctx->pb->ignore_boundary_point = 1;
266
267         Mux::Codec video_codec = Mux::CODEC_MJPEG;
268
269         avctx->flags = AVFMT_FLAG_CUSTOM_IO;
270
271         string video_extradata;
272
273         constexpr int width = 1280, height = 720;  // Doesn't matter for MJPEG.
274         stream_mux.reset(new Mux(avctx, width, height, video_codec, video_extradata, /*audio_codec_parameters=*/nullptr, COARSE_TIMEBASE,
275                 /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}));
276
277
278         encode_thread = thread(&VideoStream::encode_thread_func, this);
279 }
280
281 void VideoStream::stop()
282 {
283         encode_thread.join();
284 }
285
286 void VideoStream::clear_queue()
287 {
288         deque<QueuedFrame> q;
289
290         {
291                 unique_lock<mutex> lock(queue_lock);
292                 q = move(frame_queue);
293         }
294
295         // These are not RAII-ed, unfortunately, so we'll need to clean them ourselves.
296         // Note that release_texture() is thread-safe.
297         for (const QueuedFrame &qf : q) {
298                 if (qf.type == QueuedFrame::INTERPOLATED ||
299                     qf.type == QueuedFrame::FADED_INTERPOLATED) {
300                         compute_flow->release_texture(qf.flow_tex);
301                 }
302                 if (qf.type == QueuedFrame::INTERPOLATED) {
303                         interpolate->release_texture(qf.output_tex);
304                         interpolate->release_texture(qf.cbcr_tex);
305                 }
306         }
307
308         // Destroy q outside the mutex, as that would be a double-lock.
309 }
310
311 void VideoStream::schedule_original_frame(steady_clock::time_point local_pts,
312                                           int64_t output_pts, function<void()> &&display_func,
313                                           QueueSpotHolder &&queue_spot_holder,
314                                           unsigned stream_idx, int64_t input_pts)
315 {
316         fprintf(stderr, "output_pts=%ld  original      input_pts=%ld\n", output_pts, input_pts);
317
318         // Preload the file from disk, so that the encoder thread does not get stalled.
319         // TODO: Consider sending it through the queue instead.
320         (void)read_file(filename_for_frame(stream_idx, input_pts));
321
322         QueuedFrame qf;
323         qf.local_pts = local_pts;
324         qf.type = QueuedFrame::ORIGINAL;
325         qf.output_pts = output_pts;
326         qf.stream_idx = stream_idx;
327         qf.input_first_pts = input_pts;
328         qf.display_func = move(display_func);
329         qf.queue_spot_holder = move(queue_spot_holder);
330
331         unique_lock<mutex> lock(queue_lock);
332         frame_queue.push_back(move(qf));
333         queue_changed.notify_all();
334 }
335
336 void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64_t output_pts,
337                                        function<void()> &&display_func,
338                                        QueueSpotHolder &&queue_spot_holder,
339                                        unsigned stream_idx, int64_t input_pts, int secondary_stream_idx,
340                                        int64_t secondary_input_pts, float fade_alpha)
341 {
342         fprintf(stderr, "output_pts=%ld  faded         input_pts=%ld,%ld  fade_alpha=%.2f\n", output_pts, input_pts, secondary_input_pts, fade_alpha);
343
344         // Get the temporary OpenGL resources we need for doing the fade.
345         // (We share these with interpolated frames, which is slightly
346         // overkill, but there's no need to waste resources on keeping
347         // separate pools around.)
348         BorrowedInterpolatedFrameResources resources;
349         {
350                 unique_lock<mutex> lock(queue_lock);
351                 if (interpolate_resources.empty()) {
352                         fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
353                         return;
354                 }
355                 resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release());
356                 interpolate_resources.pop_front();
357         }
358
359         bool did_decode;
360
361         JPEGID jpeg_id1;
362         jpeg_id1.stream_idx = stream_idx;
363         jpeg_id1.pts = input_pts;
364         jpeg_id1.interpolated = false;
365         shared_ptr<Frame> frame1 = decode_jpeg_with_cache(jpeg_id1, DECODE_IF_NOT_IN_CACHE, &did_decode);
366
367         JPEGID jpeg_id2;
368         jpeg_id2.stream_idx = secondary_stream_idx;
369         jpeg_id2.pts = secondary_input_pts;
370         jpeg_id2.interpolated = false;
371         shared_ptr<Frame> frame2 = decode_jpeg_with_cache(jpeg_id2, DECODE_IF_NOT_IN_CACHE, &did_decode);
372
373         ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720);
374
375         QueuedFrame qf;
376         qf.local_pts = local_pts;
377         qf.type = QueuedFrame::FADED;
378         qf.output_pts = output_pts;
379         qf.stream_idx = stream_idx;
380         qf.input_first_pts = input_pts;
381         qf.display_func = move(display_func);
382         qf.queue_spot_holder = move(queue_spot_holder);
383
384         qf.secondary_stream_idx = secondary_stream_idx;
385         qf.secondary_input_pts = secondary_input_pts;
386
387         // Subsample and split Cb/Cr.
388         chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
389
390         // Read it down (asynchronously) to the CPU.
391         glPixelStorei(GL_PACK_ROW_LENGTH, 0);
392         glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
393         check_error();
394         glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
395         check_error();
396         glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
397         check_error();
398         glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
399         check_error();
400         glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
401
402         // Set a fence we can wait for to make sure the CPU sees the read.
403         glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
404         check_error();
405         qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
406         check_error();
407         qf.resources = move(resources);
408         qf.local_pts = local_pts;
409
410         unique_lock<mutex> lock(queue_lock);
411         frame_queue.push_back(move(qf));
412         queue_changed.notify_all();
413 }
414
415 void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts,
416                                               int64_t output_pts, function<void()> &&display_func,
417                                               QueueSpotHolder &&queue_spot_holder,
418                                               unsigned stream_idx, int64_t input_first_pts,
419                                               int64_t input_second_pts, float alpha,
420                                               int secondary_stream_idx, int64_t secondary_input_pts,
421                                               float fade_alpha)
422 {
423         if (secondary_stream_idx != -1) {
424                 fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f  secondary_pts=%ld  fade_alpha=%.2f\n", output_pts, input_first_pts, input_second_pts, alpha, secondary_input_pts, fade_alpha);
425         } else {
426                 fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, input_first_pts, input_second_pts, alpha);
427         }
428
429         JPEGID id;
430         if (secondary_stream_idx == -1) {
431                 id = JPEGID{ stream_idx, output_pts, /*interpolated=*/true };
432         } else {
433                 id = create_jpegid_for_interpolated_fade(stream_idx, output_pts, secondary_stream_idx, secondary_input_pts);
434         }
435
436         // Get the temporary OpenGL resources we need for doing the interpolation.
437         BorrowedInterpolatedFrameResources resources;
438         {
439                 unique_lock<mutex> lock(queue_lock);
440                 if (interpolate_resources.empty()) {
441                         fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
442                         return;
443                 }
444                 resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release());
445                 interpolate_resources.pop_front();
446         }
447
448         QueuedFrame qf;
449         qf.type = (secondary_stream_idx == -1) ? QueuedFrame::INTERPOLATED : QueuedFrame::FADED_INTERPOLATED;
450         qf.output_pts = output_pts;
451         qf.stream_idx = stream_idx;
452         qf.id = id;
453         qf.display_func = move(display_func);
454         qf.queue_spot_holder = move(queue_spot_holder);
455         qf.local_pts = local_pts;
456
457         check_error();
458
459         // Convert frame0 and frame1 to OpenGL textures.
460         for (size_t frame_no = 0; frame_no < 2; ++frame_no) {
461                 JPEGID jpeg_id;
462                 jpeg_id.stream_idx = stream_idx;
463                 jpeg_id.pts = frame_no == 1 ? input_second_pts : input_first_pts;
464                 jpeg_id.interpolated = false;
465                 bool did_decode;
466                 shared_ptr<Frame> frame = decode_jpeg_with_cache(jpeg_id, DECODE_IF_NOT_IN_CACHE, &did_decode);
467                 ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], 1280, 720);
468         }
469
470         glGenerateTextureMipmap(resources->input_tex);
471         check_error();
472         glGenerateTextureMipmap(resources->gray_tex);
473         check_error();
474
475         // Compute the interpolated frame.
476         qf.flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
477         check_error();
478
479         if (secondary_stream_idx != -1) {
480                 // Fade. First kick off the interpolation.
481                 tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha);
482                 check_error();
483
484                 // Now decode the image we are fading against.
485                 JPEGID jpeg_id;
486                 jpeg_id.stream_idx = secondary_stream_idx;
487                 jpeg_id.pts = secondary_input_pts;
488                 jpeg_id.interpolated = false;
489                 bool did_decode;
490                 shared_ptr<Frame> frame2 = decode_jpeg_with_cache(jpeg_id, DECODE_IF_NOT_IN_CACHE, &did_decode);
491
492                 // Then fade against it, putting it into the fade Y' and CbCr textures.
493                 ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, 1280, 720);
494
495                 // Subsample and split Cb/Cr.
496                 chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
497
498                 interpolate_no_split->release_texture(qf.output_tex);
499         } else {
500                 tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, qf.flow_tex, 1280, 720, alpha);
501                 check_error();
502
503                 // Subsample and split Cb/Cr.
504                 chroma_subsampler->subsample_chroma(qf.cbcr_tex, 1280, 720, resources->cb_tex, resources->cr_tex);
505         }
506
507         // We could have released qf.flow_tex here, but to make sure we don't cause a stall
508         // when trying to reuse it for the next frame, we can just as well hold on to it
509         // and release it only when the readback is done.
510
511         // Read it down (asynchronously) to the CPU.
512         glPixelStorei(GL_PACK_ROW_LENGTH, 0);
513         glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
514         check_error();
515         if (secondary_stream_idx != -1) {
516                 glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
517         } else {
518                 glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
519         }
520         check_error();
521         glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
522         check_error();
523         glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
524         check_error();
525         glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
526
527         // Set a fence we can wait for to make sure the CPU sees the read.
528         glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
529         check_error();
530         qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
531         check_error();
532         qf.resources = move(resources);
533
534         unique_lock<mutex> lock(queue_lock);
535         frame_queue.push_back(move(qf));
536         queue_changed.notify_all();
537 }
538
539 void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts,
540                                          int64_t output_pts, function<void()> &&display_func,
541                                          QueueSpotHolder &&queue_spot_holder)
542 {
543         QueuedFrame qf;
544         qf.type = QueuedFrame::REFRESH;
545         qf.output_pts = output_pts;
546         qf.display_func = move(display_func);
547         qf.queue_spot_holder = move(queue_spot_holder);
548
549         unique_lock<mutex> lock(queue_lock);
550         frame_queue.push_back(move(qf));
551         queue_changed.notify_all();
552 }
553
554 namespace {
555
556 shared_ptr<Frame> frame_from_pbo(void *contents, size_t width, size_t height)
557 {
558         size_t chroma_width = width / 2;
559
560         const uint8_t *y = (const uint8_t *)contents;
561         const uint8_t *cb = (const uint8_t *)contents + width * height;
562         const uint8_t *cr = (const uint8_t *)contents + width * height + chroma_width * height;
563
564         shared_ptr<Frame> frame(new Frame);
565         frame->y.reset(new uint8_t[width * height]);
566         frame->cb.reset(new uint8_t[chroma_width * height]);
567         frame->cr.reset(new uint8_t[chroma_width * height]);
568         for (unsigned yy = 0; yy < height; ++yy) {
569                 memcpy(frame->y.get() + width * yy, y + width * yy, width);
570                 memcpy(frame->cb.get() + chroma_width * yy, cb + chroma_width * yy, chroma_width);
571                 memcpy(frame->cr.get() + chroma_width * yy, cr + chroma_width * yy, chroma_width);
572         }
573         frame->is_semiplanar = false;
574         frame->width = width;
575         frame->height = height;
576         frame->chroma_subsampling_x = 2;
577         frame->chroma_subsampling_y = 1;
578         frame->pitch_y = width;
579         frame->pitch_chroma = chroma_width;
580         return frame;
581 }
582
583 }  // namespace
584
585 void VideoStream::encode_thread_func()
586 {
587         pthread_setname_np(pthread_self(), "VideoStream");
588         QSurface *surface = create_surface();
589         QOpenGLContext *context = create_context(surface);
590         bool ok = make_current(context, surface);
591         if (!ok) {
592                 fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
593                 exit(1);
594         }
595
596         for ( ;; ) {
597                 QueuedFrame qf;
598                 {
599                         unique_lock<mutex> lock(queue_lock);
600
601                         // Wait until we have a frame to play.
602                         queue_changed.wait(lock, [this]{
603                                 return !frame_queue.empty();
604                         });
605                         steady_clock::time_point frame_start = frame_queue.front().local_pts;
606
607                         // Now sleep until the frame is supposed to start (the usual case),
608                         // _or_ clear_queue() happened.
609                         bool aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start]{
610                                 return frame_queue.empty() || frame_queue.front().local_pts != frame_start;
611                         });
612                         if (aborted) {
613                                 // clear_queue() happened, so don't play this frame after all.
614                                 continue;
615                         }
616                         qf = move(frame_queue.front());
617                         frame_queue.pop_front();
618                 }
619
620                 if (qf.type == QueuedFrame::ORIGINAL) {
621                         // Send the JPEG frame on, unchanged.
622                         string jpeg = read_file(filename_for_frame(qf.stream_idx, qf.input_first_pts));
623                         AVPacket pkt;
624                         av_init_packet(&pkt);
625                         pkt.stream_index = 0;
626                         pkt.data = (uint8_t *)jpeg.data();
627                         pkt.size = jpeg.size();
628                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
629
630                         last_frame.assign(&jpeg[0], &jpeg[0] + jpeg.size());
631                 } else if (qf.type == QueuedFrame::FADED) {
632                         glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
633
634                         shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720);
635
636                         // Now JPEG encode it, and send it on to the stream.
637                         vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
638
639                         AVPacket pkt;
640                         av_init_packet(&pkt);
641                         pkt.stream_index = 0;
642                         pkt.data = (uint8_t *)jpeg.data();
643                         pkt.size = jpeg.size();
644                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
645                         last_frame = move(jpeg);
646                 } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
647                         glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
648
649                         // Send a copy of the frame on to display.
650                         shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, 1280, 720);
651                         JPEGFrameView::insert_interpolated_frame(qf.id, frame);
652
653                         // Now JPEG encode it, and send it on to the stream.
654                         vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
655                         compute_flow->release_texture(qf.flow_tex);
656                         if (qf.type != QueuedFrame::FADED_INTERPOLATED) {
657                                 interpolate->release_texture(qf.output_tex);
658                                 interpolate->release_texture(qf.cbcr_tex);
659                         }
660
661                         AVPacket pkt;
662                         av_init_packet(&pkt);
663                         pkt.stream_index = 0;
664                         pkt.data = (uint8_t *)jpeg.data();
665                         pkt.size = jpeg.size();
666                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
667                         last_frame = move(jpeg);
668                 } else if (qf.type == QueuedFrame::REFRESH) {
669                         AVPacket pkt;
670                         av_init_packet(&pkt);
671                         pkt.stream_index = 0;
672                         pkt.data = (uint8_t *)last_frame.data();
673                         pkt.size = last_frame.size();
674                         stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
675                 } else {
676                         assert(false);
677                 }
678                 if (qf.display_func != nullptr) {
679                         qf.display_func();
680                 }
681         }
682 }
683
684 int VideoStream::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
685 {
686         VideoStream *video_stream = (VideoStream *)opaque;
687         return video_stream->write_packet2(buf, buf_size, type, time);
688 }
689
690 int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
691 {
692         if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
693                 seen_sync_markers = true;
694         } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
695                 // We don't know if this is a keyframe or not (the muxer could
696                 // avoid marking it), so we just have to make the best of it.
697                 type = AVIO_DATA_MARKER_SYNC_POINT;
698         }
699
700         if (type == AVIO_DATA_MARKER_HEADER) {
701                 stream_mux_header.append((char *)buf, buf_size);
702                 global_httpd->set_header(stream_mux_header);
703         } else {
704                 global_httpd->add_data((char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
705         }
706         return buf_size;
707 }