]> git.sesse.net Git - nageru/blob - futatabi/video_stream.cpp
Make multitrack export include audio.
[nageru] / futatabi / video_stream.cpp
1 #include "video_stream.h"
2
3 extern "C" {
4 #include <libavformat/avformat.h>
5 #include <libavformat/avio.h>
6 }
7
8 #include "chroma_subsampler.h"
9 #include "flags.h"
10 #include "flow.h"
11 #include "jpeg_frame_view.h"
12 #include "movit/util.h"
13 #include "player.h"
14 #include "shared/context.h"
15 #include "shared/httpd.h"
16 #include "shared/mux.h"
17 #include "util.h"
18 #include "ycbcr_converter.h"
19
20 #include <epoxy/glx.h>
21 #include <jpeglib.h>
22 #include <unistd.h>
23
24 using namespace std;
25 using namespace std::chrono;
26
27 extern HTTPD *global_httpd;
28
29 struct VectorDestinationManager {
30         jpeg_destination_mgr pub;
31         string dest;
32
33         VectorDestinationManager()
34         {
35                 pub.init_destination = init_destination_thunk;
36                 pub.empty_output_buffer = empty_output_buffer_thunk;
37                 pub.term_destination = term_destination_thunk;
38         }
39
40         static void init_destination_thunk(j_compress_ptr ptr)
41         {
42                 ((VectorDestinationManager *)(ptr->dest))->init_destination();
43         }
44
45         inline void init_destination()
46         {
47                 make_room(0);
48         }
49
50         static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
51         {
52                 return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
53         }
54
55         inline bool empty_output_buffer()
56         {
57                 make_room(dest.size());  // Should ignore pub.free_in_buffer!
58                 return true;
59         }
60
61         inline void make_room(size_t bytes_used)
62         {
63                 dest.resize(bytes_used + 4096);
64                 dest.resize(dest.capacity());
65                 pub.next_output_byte = (uint8_t *)dest.data() + bytes_used;
66                 pub.free_in_buffer = dest.size() - bytes_used;
67         }
68
69         static void term_destination_thunk(j_compress_ptr ptr)
70         {
71                 ((VectorDestinationManager *)(ptr->dest))->term_destination();
72         }
73
74         inline void term_destination()
75         {
76                 dest.resize(dest.size() - pub.free_in_buffer);
77         }
78 };
79 static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
80
81 string encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height)
82 {
83         VectorDestinationManager dest;
84
85         jpeg_compress_struct cinfo;
86         jpeg_error_mgr jerr;
87         cinfo.err = jpeg_std_error(&jerr);
88         jpeg_create_compress(&cinfo);
89
90         cinfo.dest = (jpeg_destination_mgr *)&dest;
91         cinfo.input_components = 3;
92         cinfo.in_color_space = JCS_RGB;
93         jpeg_set_defaults(&cinfo);
94         constexpr int quality = 90;
95         jpeg_set_quality(&cinfo, quality, /*force_baseline=*/false);
96
97         cinfo.image_width = width;
98         cinfo.image_height = height;
99         cinfo.raw_data_in = true;
100         jpeg_set_colorspace(&cinfo, JCS_YCbCr);
101         cinfo.comp_info[0].h_samp_factor = 2;
102         cinfo.comp_info[0].v_samp_factor = 1;
103         cinfo.comp_info[1].h_samp_factor = 1;
104         cinfo.comp_info[1].v_samp_factor = 1;
105         cinfo.comp_info[2].h_samp_factor = 1;
106         cinfo.comp_info[2].v_samp_factor = 1;
107         cinfo.CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
108         jpeg_start_compress(&cinfo, true);
109
110         // This comment marker is private to FFmpeg. It signals limited Y'CbCr range
111         // (and nothing else).
112         jpeg_write_marker(&cinfo, JPEG_COM, (const JOCTET *)"CS=ITU601", strlen("CS=ITU601"));
113
114         JSAMPROW yptr[8], cbptr[8], crptr[8];
115         JSAMPARRAY data[3] = { yptr, cbptr, crptr };
116         for (unsigned y = 0; y < height; y += 8) {
117                 for (unsigned yy = 0; yy < 8; ++yy) {
118                         yptr[yy] = const_cast<JSAMPROW>(&y_data[(y + yy) * width]);
119                         cbptr[yy] = const_cast<JSAMPROW>(&cb_data[(y + yy) * width / 2]);
120                         crptr[yy] = const_cast<JSAMPROW>(&cr_data[(y + yy) * width / 2]);
121                 }
122
123                 jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
124         }
125
126         jpeg_finish_compress(&cinfo);
127         jpeg_destroy_compress(&cinfo);
128
129         return move(dest.dest);
130 }
131
132 VideoStream::VideoStream(AVFormatContext *file_avctx)
133         : avctx(file_avctx), output_fast_forward(file_avctx != nullptr)
134 {
135         ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_DUAL_YCBCR, /*resource_pool=*/nullptr));
136         ycbcr_semiplanar_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_SEMIPLANAR, /*resource_pool=*/nullptr));
137
138         GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots];
139         GLuint fade_y_output_tex[num_interpolate_slots], fade_cbcr_output_tex[num_interpolate_slots];
140         GLuint cb_tex[num_interpolate_slots], cr_tex[num_interpolate_slots];
141
142         glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, input_tex);
143         glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, gray_tex);
144         glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_y_output_tex);
145         glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_cbcr_output_tex);
146         glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cb_tex);
147         glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cr_tex);
148         check_error();
149
150         size_t width = global_flags.width, height = global_flags.height;
151         int levels = find_num_levels(width, height);
152         for (size_t i = 0; i < num_interpolate_slots; ++i) {
153                 glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2);
154                 check_error();
155                 glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2);
156                 check_error();
157                 glTextureStorage2D(fade_y_output_tex[i], 1, GL_R8, width, height);
158                 check_error();
159                 glTextureStorage2D(fade_cbcr_output_tex[i], 1, GL_RG8, width, height);
160                 check_error();
161                 glTextureStorage2D(cb_tex[i], 1, GL_R8, width / 2, height);
162                 check_error();
163                 glTextureStorage2D(cr_tex[i], 1, GL_R8, width / 2, height);
164                 check_error();
165
166                 unique_ptr<InterpolatedFrameResources> resource(new InterpolatedFrameResources);
167                 resource->owner = this;
168                 resource->input_tex = input_tex[i];
169                 resource->gray_tex = gray_tex[i];
170                 resource->fade_y_output_tex = fade_y_output_tex[i];
171                 resource->fade_cbcr_output_tex = fade_cbcr_output_tex[i];
172                 resource->cb_tex = cb_tex[i];
173                 resource->cr_tex = cr_tex[i];
174                 glCreateFramebuffers(2, resource->input_fbos);
175                 check_error();
176                 glCreateFramebuffers(1, &resource->fade_fbo);
177                 check_error();
178
179                 glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0);
180                 check_error();
181                 glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 0);
182                 check_error();
183                 glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 1);
184                 check_error();
185                 glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1);
186                 check_error();
187                 glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT0, fade_y_output_tex[i], 0);
188                 check_error();
189                 glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT1, fade_cbcr_output_tex[i], 0);
190                 check_error();
191
192                 GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
193                 glNamedFramebufferDrawBuffers(resource->input_fbos[0], 2, bufs);
194                 check_error();
195                 glNamedFramebufferDrawBuffers(resource->input_fbos[1], 2, bufs);
196                 check_error();
197                 glNamedFramebufferDrawBuffers(resource->fade_fbo, 2, bufs);
198                 check_error();
199
200                 glCreateBuffers(1, &resource->pbo);
201                 check_error();
202                 glNamedBufferStorage(resource->pbo, width * height * 4, nullptr, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
203                 check_error();
204                 resource->pbo_contents = glMapNamedBufferRange(resource->pbo, 0, width * height * 4, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
205                 interpolate_resources.push_back(move(resource));
206         }
207
208         check_error();
209
210         OperatingPoint op;
211         if (global_flags.interpolation_quality == 0 ||
212             global_flags.interpolation_quality == 1) {
213                 op = operating_point1;
214         } else if (global_flags.interpolation_quality == 2) {
215                 op = operating_point2;
216         } else if (global_flags.interpolation_quality == 3) {
217                 op = operating_point3;
218         } else if (global_flags.interpolation_quality == 4) {
219                 op = operating_point4;
220         } else {
221                 // Quality 0 will be changed to 1 in flags.cpp.
222                 assert(false);
223         }
224
225         compute_flow.reset(new DISComputeFlow(width, height, op));
226         interpolate.reset(new Interpolate(op, /*split_ycbcr_output=*/true));
227         interpolate_no_split.reset(new Interpolate(op, /*split_ycbcr_output=*/false));
228         chroma_subsampler.reset(new ChromaSubsampler);
229         check_error();
230
231         // The “last frame” is initially black.
232         unique_ptr<uint8_t[]> y(new uint8_t[global_flags.width * global_flags.height]);
233         unique_ptr<uint8_t[]> cb_or_cr(new uint8_t[(global_flags.width / 2) * global_flags.height]);
234         memset(y.get(), 16, global_flags.width * global_flags.height);
235         memset(cb_or_cr.get(), 128, (global_flags.width / 2) * global_flags.height);
236         last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), global_flags.width, global_flags.height);
237 }
238
239 VideoStream::~VideoStream()
240 {
241         if (last_flow_tex != 0) {
242                 compute_flow->release_texture(last_flow_tex);
243         }
244
245         for (const unique_ptr<InterpolatedFrameResources> &resource : interpolate_resources) {
246                 glUnmapNamedBuffer(resource->pbo);
247                 check_error();
248                 glDeleteBuffers(1, &resource->pbo);
249                 check_error();
250                 glDeleteFramebuffers(2, resource->input_fbos);
251                 check_error();
252                 glDeleteFramebuffers(1, &resource->fade_fbo);
253                 check_error();
254                 glDeleteTextures(1, &resource->input_tex);
255                 check_error();
256                 glDeleteTextures(1, &resource->gray_tex);
257                 check_error();
258                 glDeleteTextures(1, &resource->fade_y_output_tex);
259                 check_error();
260                 glDeleteTextures(1, &resource->fade_cbcr_output_tex);
261                 check_error();
262                 glDeleteTextures(1, &resource->cb_tex);
263                 check_error();
264                 glDeleteTextures(1, &resource->cr_tex);
265                 check_error();
266         }
267         assert(interpolate_resources.size() == num_interpolate_slots);
268 }
269
270 void VideoStream::start()
271 {
272         if (avctx == nullptr) {
273                 avctx = avformat_alloc_context();
274
275                 // We use Matroska, because it's pretty much the only mux where FFmpeg
276                 // allows writing chroma location to override JFIF's default center placement.
277                 // (Note that at the time of writing, however, FFmpeg does not correctly
278                 // _read_ this information!)
279                 avctx->oformat = av_guess_format("matroska", nullptr, nullptr);
280
281                 uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
282                 avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
283                 avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
284                 avctx->pb->ignore_boundary_point = 1;
285
286                 avctx->flags = AVFMT_FLAG_CUSTOM_IO;
287         }
288
289         size_t width = global_flags.width, height = global_flags.height;  // Doesn't matter for MJPEG.
290         mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", /*audio_codec_parameters=*/nullptr,
291                           AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}, Mux::WITH_SUBTITLES));
292
293         encode_thread = thread(&VideoStream::encode_thread_func, this);
294 }
295
296 void VideoStream::stop()
297 {
298         should_quit = true;
299         queue_changed.notify_all();
300         clear_queue();
301         encode_thread.join();
302 }
303
304 void VideoStream::clear_queue()
305 {
306         deque<QueuedFrame> q;
307
308         {
309                 lock_guard<mutex> lock(queue_lock);
310                 q = move(frame_queue);
311         }
312
313         // These are not RAII-ed, unfortunately, so we'll need to clean them ourselves.
314         // Note that release_texture() is thread-safe.
315         for (const QueuedFrame &qf : q) {
316                 if (qf.type == QueuedFrame::INTERPOLATED ||
317                     qf.type == QueuedFrame::FADED_INTERPOLATED) {
318                         if (qf.flow_tex != 0) {
319                                 compute_flow->release_texture(qf.flow_tex);
320                         }
321                 }
322                 if (qf.type == QueuedFrame::INTERPOLATED) {
323                         interpolate->release_texture(qf.output_tex);
324                         interpolate->release_texture(qf.cbcr_tex);
325                 }
326         }
327
328         // Destroy q outside the mutex, as that would be a double-lock.
329 }
330
331 void VideoStream::schedule_original_frame(steady_clock::time_point local_pts,
332                                           int64_t output_pts, function<void()> &&display_func,
333                                           QueueSpotHolder &&queue_spot_holder,
334                                           FrameOnDisk frame, const string &subtitle)
335 {
336         fprintf(stderr, "output_pts=%" PRId64 "  original      input_pts=%" PRId64 "\n", output_pts, frame.pts);
337
338         // TODO: Write audio if at the right speed.
339
340         QueuedFrame qf;
341         qf.local_pts = local_pts;
342         qf.type = QueuedFrame::ORIGINAL;
343         qf.output_pts = output_pts;
344         qf.display_func = move(display_func);
345         qf.queue_spot_holder = move(queue_spot_holder);
346         qf.subtitle = subtitle;
347         qf.encoded_jpeg.reset(new string(frame_reader.read_frame(frame, /*read_audio=*/false).video));
348
349         lock_guard<mutex> lock(queue_lock);
350         frame_queue.push_back(move(qf));
351         queue_changed.notify_all();
352 }
353
354 void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64_t output_pts,
355                                        function<void()> &&display_func,
356                                        QueueSpotHolder &&queue_spot_holder,
357                                        FrameOnDisk frame1_spec, FrameOnDisk frame2_spec,
358                                        float fade_alpha, const string &subtitle)
359 {
360         fprintf(stderr, "output_pts=%" PRId64 "  faded         input_pts=%" PRId64 ",%" PRId64 "  fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha);
361
362         // Get the temporary OpenGL resources we need for doing the fade.
363         // (We share these with interpolated frames, which is slightly
364         // overkill, but there's no need to waste resources on keeping
365         // separate pools around.)
366         BorrowedInterpolatedFrameResources resources;
367         {
368                 lock_guard<mutex> lock(queue_lock);
369                 if (interpolate_resources.empty()) {
370                         fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
371                         return;
372                 }
373                 resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release());
374                 interpolate_resources.pop_front();
375         }
376
377         bool did_decode;
378
379         shared_ptr<Frame> frame1 = decode_jpeg_with_cache(frame1_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
380         shared_ptr<Frame> frame2 = decode_jpeg_with_cache(frame2_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
381
382         ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height);
383
384         QueuedFrame qf;
385         qf.local_pts = local_pts;
386         qf.type = QueuedFrame::FADED;
387         qf.output_pts = output_pts;
388         qf.frame1 = frame1_spec;
389         qf.display_func = move(display_func);
390         qf.queue_spot_holder = move(queue_spot_holder);
391         qf.subtitle = subtitle;
392
393         qf.secondary_frame = frame2_spec;
394
395         // Subsample and split Cb/Cr.
396         chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
397
398         // Read it down (asynchronously) to the CPU.
399         glPixelStorei(GL_PACK_ROW_LENGTH, 0);
400         glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
401         check_error();
402         glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
403         check_error();
404         glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3, BUFFER_OFFSET(global_flags.width * global_flags.height));
405         check_error();
406         glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3 - (global_flags.width / 2) * global_flags.height, BUFFER_OFFSET(global_flags.width * global_flags.height + (global_flags.width / 2) * global_flags.height));
407         check_error();
408         glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
409
410         // Set a fence we can wait for to make sure the CPU sees the read.
411         glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
412         check_error();
413         qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
414         check_error();
415         qf.resources = move(resources);
416         qf.local_pts = local_pts;
417
418         lock_guard<mutex> lock(queue_lock);
419         frame_queue.push_back(move(qf));
420         queue_changed.notify_all();
421 }
422
423 void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts,
424                                               int64_t output_pts, function<void(shared_ptr<Frame>)> &&display_func,
425                                               QueueSpotHolder &&queue_spot_holder,
426                                               FrameOnDisk frame1, FrameOnDisk frame2,
427                                               float alpha, FrameOnDisk secondary_frame, float fade_alpha, const string &subtitle)
428 {
429         if (secondary_frame.pts != -1) {
430                 fprintf(stderr, "output_pts=%" PRId64 "  interpolated  input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f  secondary_pts=%" PRId64 "  fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
431         } else {
432                 fprintf(stderr, "output_pts=%" PRId64 "  interpolated  input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha);
433         }
434
435         // Get the temporary OpenGL resources we need for doing the interpolation.
436         BorrowedInterpolatedFrameResources resources;
437         {
438                 lock_guard<mutex> lock(queue_lock);
439                 if (interpolate_resources.empty()) {
440                         fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
441                         return;
442                 }
443                 resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release());
444                 interpolate_resources.pop_front();
445         }
446
447         QueuedFrame qf;
448         qf.type = (secondary_frame.pts == -1) ? QueuedFrame::INTERPOLATED : QueuedFrame::FADED_INTERPOLATED;
449         qf.output_pts = output_pts;
450         qf.display_decoded_func = move(display_func);
451         qf.queue_spot_holder = move(queue_spot_holder);
452         qf.local_pts = local_pts;
453         qf.subtitle = subtitle;
454
455         check_error();
456
457         // Convert frame0 and frame1 to OpenGL textures.
458         for (size_t frame_no = 0; frame_no < 2; ++frame_no) {
459                 FrameOnDisk frame_spec = frame_no == 1 ? frame2 : frame1;
460                 bool did_decode;
461                 shared_ptr<Frame> frame = decode_jpeg_with_cache(frame_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
462                 ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], global_flags.width, global_flags.height);
463         }
464
465         glGenerateTextureMipmap(resources->input_tex);
466         check_error();
467         glGenerateTextureMipmap(resources->gray_tex);
468         check_error();
469
470         GLuint flow_tex;
471         if (last_flow_tex != 0 && frame1 == last_frame1 && frame2 == last_frame2) {
472                 // Reuse the flow from previous computation. This frequently happens
473                 // if we slow down by more than 2x, so that there are multiple interpolated
474                 // frames between each original.
475                 flow_tex = last_flow_tex;
476                 qf.flow_tex = 0;
477         } else {
478                 // Cache miss, so release last_flow_tex.
479                 qf.flow_tex = last_flow_tex;
480
481                 // Compute the flow.
482                 flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
483                 check_error();
484
485                 // Store the flow texture for possible reuse next frame.
486                 last_flow_tex = flow_tex;
487                 last_frame1 = frame1;
488                 last_frame2 = frame2;
489         }
490
491         if (secondary_frame.pts != -1) {
492                 // Fade. First kick off the interpolation.
493                 tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha);
494                 check_error();
495
496                 // Now decode the image we are fading against.
497                 bool did_decode;
498                 shared_ptr<Frame> frame2 = decode_jpeg_with_cache(secondary_frame, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
499
500                 // Then fade against it, putting it into the fade Y' and CbCr textures.
501                 ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, global_flags.width, global_flags.height, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height);
502
503                 // Subsample and split Cb/Cr.
504                 chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
505
506                 interpolate_no_split->release_texture(qf.output_tex);
507         } else {
508                 tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha);
509                 check_error();
510
511                 // Subsample and split Cb/Cr.
512                 chroma_subsampler->subsample_chroma(qf.cbcr_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
513         }
514
515         // We could have released qf.flow_tex here, but to make sure we don't cause a stall
516         // when trying to reuse it for the next frame, we can just as well hold on to it
517         // and release it only when the readback is done.
518         //
519         // TODO: This is maybe less relevant now that qf.flow_tex contains the texture we used
520         // _last_ frame, not this one.
521
522         // Read it down (asynchronously) to the CPU.
523         glPixelStorei(GL_PACK_ROW_LENGTH, 0);
524         glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
525         check_error();
526         if (secondary_frame.pts != -1) {
527                 glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
528         } else {
529                 glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
530         }
531         check_error();
532         glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3, BUFFER_OFFSET(global_flags.width * global_flags.height));
533         check_error();
534         glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3 - (global_flags.width / 2) * global_flags.height, BUFFER_OFFSET(global_flags.width * global_flags.height + (global_flags.width / 2) * global_flags.height));
535         check_error();
536         glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
537
538         // Set a fence we can wait for to make sure the CPU sees the read.
539         glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
540         check_error();
541         qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
542         check_error();
543         qf.resources = move(resources);
544
545         lock_guard<mutex> lock(queue_lock);
546         frame_queue.push_back(move(qf));
547         queue_changed.notify_all();
548 }
549
550 void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts,
551                                          int64_t output_pts, function<void()> &&display_func,
552                                          QueueSpotHolder &&queue_spot_holder, const string &subtitle)
553 {
554         QueuedFrame qf;
555         qf.type = QueuedFrame::REFRESH;
556         qf.output_pts = output_pts;
557         qf.display_func = move(display_func);
558         qf.queue_spot_holder = move(queue_spot_holder);
559         qf.subtitle = subtitle;
560
561         lock_guard<mutex> lock(queue_lock);
562         frame_queue.push_back(move(qf));
563         queue_changed.notify_all();
564 }
565
566 namespace {
567
568 shared_ptr<Frame> frame_from_pbo(void *contents, size_t width, size_t height)
569 {
570         size_t chroma_width = width / 2;
571
572         const uint8_t *y = (const uint8_t *)contents;
573         const uint8_t *cb = (const uint8_t *)contents + width * height;
574         const uint8_t *cr = (const uint8_t *)contents + width * height + chroma_width * height;
575
576         shared_ptr<Frame> frame(new Frame);
577         frame->y.reset(new uint8_t[width * height]);
578         frame->cb.reset(new uint8_t[chroma_width * height]);
579         frame->cr.reset(new uint8_t[chroma_width * height]);
580         for (unsigned yy = 0; yy < height; ++yy) {
581                 memcpy(frame->y.get() + width * yy, y + width * yy, width);
582                 memcpy(frame->cb.get() + chroma_width * yy, cb + chroma_width * yy, chroma_width);
583                 memcpy(frame->cr.get() + chroma_width * yy, cr + chroma_width * yy, chroma_width);
584         }
585         frame->is_semiplanar = false;
586         frame->width = width;
587         frame->height = height;
588         frame->chroma_subsampling_x = 2;
589         frame->chroma_subsampling_y = 1;
590         frame->pitch_y = width;
591         frame->pitch_chroma = chroma_width;
592         return frame;
593 }
594
595 }  // namespace
596
597 void VideoStream::encode_thread_func()
598 {
599         pthread_setname_np(pthread_self(), "VideoStream");
600         QSurface *surface = create_surface();
601         QOpenGLContext *context = create_context(surface);
602         bool ok = make_current(context, surface);
603         if (!ok) {
604                 fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
605                 abort();
606         }
607
608         while (!should_quit) {
609                 QueuedFrame qf;
610                 {
611                         unique_lock<mutex> lock(queue_lock);
612
613                         // Wait until we have a frame to play.
614                         queue_changed.wait(lock, [this] {
615                                 return !frame_queue.empty() || should_quit;
616                         });
617                         if (should_quit) {
618                                 break;
619                         }
620                         steady_clock::time_point frame_start = frame_queue.front().local_pts;
621
622                         // Now sleep until the frame is supposed to start (the usual case),
623                         // _or_ clear_queue() happened.
624                         bool aborted;
625                         if (output_fast_forward) {
626                                 aborted = frame_queue.empty() || frame_queue.front().local_pts != frame_start;
627                         } else {
628                                 aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start] {
629                                         return frame_queue.empty() || frame_queue.front().local_pts != frame_start;
630                                 });
631                         }
632                         if (aborted) {
633                                 // clear_queue() happened, so don't play this frame after all.
634                                 continue;
635                         }
636                         qf = move(frame_queue.front());
637                         frame_queue.pop_front();
638                 }
639
640                 // Hack: We mux the subtitle packet one time unit before the actual frame,
641                 // so that Nageru is sure to get it first.
642                 if (!qf.subtitle.empty()) {
643                         AVPacket pkt;
644                         av_init_packet(&pkt);
645                         pkt.stream_index = mux->get_subtitle_stream_idx();
646                         assert(pkt.stream_index != -1);
647                         pkt.data = (uint8_t *)qf.subtitle.data();
648                         pkt.size = qf.subtitle.size();
649                         pkt.flags = 0;
650                         pkt.duration = lrint(TIMEBASE / global_flags.output_framerate);  // Doesn't really matter for Nageru.
651                         mux->add_packet(pkt, qf.output_pts - 1, qf.output_pts - 1);
652                 }
653
654                 if (qf.type == QueuedFrame::ORIGINAL) {
655                         // Send the JPEG frame on, unchanged.
656                         string jpeg = move(*qf.encoded_jpeg);
657                         AVPacket pkt;
658                         av_init_packet(&pkt);
659                         pkt.stream_index = 0;
660                         pkt.data = (uint8_t *)jpeg.data();
661                         pkt.size = jpeg.size();
662                         pkt.flags = AV_PKT_FLAG_KEY;
663                         mux->add_packet(pkt, qf.output_pts, qf.output_pts);
664                         last_frame = move(jpeg);
665                 } else if (qf.type == QueuedFrame::FADED) {
666                         glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
667
668                         shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height);
669
670                         // Now JPEG encode it, and send it on to the stream.
671                         string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height);
672
673                         AVPacket pkt;
674                         av_init_packet(&pkt);
675                         pkt.stream_index = 0;
676                         pkt.data = (uint8_t *)jpeg.data();
677                         pkt.size = jpeg.size();
678                         pkt.flags = AV_PKT_FLAG_KEY;
679                         mux->add_packet(pkt, qf.output_pts, qf.output_pts);
680                         last_frame = move(jpeg);
681                 } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
682                         glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
683
684                         // Send it on to display.
685                         shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height);
686                         if (qf.display_decoded_func != nullptr) {
687                                 qf.display_decoded_func(frame);
688                         }
689
690                         // Now JPEG encode it, and send it on to the stream.
691                         string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height);
692                         if (qf.flow_tex != 0) {
693                                 compute_flow->release_texture(qf.flow_tex);
694                         }
695                         if (qf.type != QueuedFrame::FADED_INTERPOLATED) {
696                                 interpolate->release_texture(qf.output_tex);
697                                 interpolate->release_texture(qf.cbcr_tex);
698                         }
699
700                         AVPacket pkt;
701                         av_init_packet(&pkt);
702                         pkt.stream_index = 0;
703                         pkt.data = (uint8_t *)jpeg.data();
704                         pkt.size = jpeg.size();
705                         pkt.flags = AV_PKT_FLAG_KEY;
706                         mux->add_packet(pkt, qf.output_pts, qf.output_pts);
707                         last_frame = move(jpeg);
708                 } else if (qf.type == QueuedFrame::REFRESH) {
709                         AVPacket pkt;
710                         av_init_packet(&pkt);
711                         pkt.stream_index = 0;
712                         pkt.data = (uint8_t *)last_frame.data();
713                         pkt.size = last_frame.size();
714                         pkt.flags = AV_PKT_FLAG_KEY;
715                         mux->add_packet(pkt, qf.output_pts, qf.output_pts);
716                 } else {
717                         assert(false);
718                 }
719                 if (qf.display_func != nullptr) {
720                         qf.display_func();
721                 }
722         }
723 }
724
725 int VideoStream::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
726 {
727         VideoStream *video_stream = (VideoStream *)opaque;
728         return video_stream->write_packet2(buf, buf_size, type, time);
729 }
730
731 int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
732 {
733         if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
734                 seen_sync_markers = true;
735         } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
736                 // We don't know if this is a keyframe or not (the muxer could
737                 // avoid marking it), so we just have to make the best of it.
738                 type = AVIO_DATA_MARKER_SYNC_POINT;
739         }
740
741         if (type == AVIO_DATA_MARKER_HEADER) {
742                 stream_mux_header.append((char *)buf, buf_size);
743                 global_httpd->set_header(HTTPD::MAIN_STREAM, stream_mux_header);
744         } else {
745                 global_httpd->add_data(HTTPD::MAIN_STREAM, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
746         }
747         return buf_size;
748 }