]> git.sesse.net Git - nageru/blob - futatabi/video_stream.cpp
Make it possible to siphon out a single MJPEG stream.
[nageru] / futatabi / video_stream.cpp
1 #include "video_stream.h"
2
3 extern "C" {
4 #include <libavformat/avformat.h>
5 #include <libavformat/avio.h>
6 }
7
8 #include "chroma_subsampler.h"
9 #include "exif_parser.h"
10 #include "flags.h"
11 #include "flow.h"
12 #include "jpeg_frame_view.h"
13 #include "movit/util.h"
14 #include "player.h"
15 #include "shared/context.h"
16 #include "shared/httpd.h"
17 #include "shared/shared_defs.h"
18 #include "shared/mux.h"
19 #include "util.h"
20 #include "ycbcr_converter.h"
21
22 #include <epoxy/glx.h>
23 #include <jpeglib.h>
24 #include <unistd.h>
25
26 using namespace movit;
27 using namespace std;
28 using namespace std::chrono;
29
30 extern HTTPD *global_httpd;
31
32 struct VectorDestinationManager {
33         jpeg_destination_mgr pub;
34         string dest;
35
36         VectorDestinationManager()
37         {
38                 pub.init_destination = init_destination_thunk;
39                 pub.empty_output_buffer = empty_output_buffer_thunk;
40                 pub.term_destination = term_destination_thunk;
41         }
42
43         static void init_destination_thunk(j_compress_ptr ptr)
44         {
45                 ((VectorDestinationManager *)(ptr->dest))->init_destination();
46         }
47
48         inline void init_destination()
49         {
50                 make_room(0);
51         }
52
53         static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
54         {
55                 return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
56         }
57
58         inline bool empty_output_buffer()
59         {
60                 make_room(dest.size());  // Should ignore pub.free_in_buffer!
61                 return true;
62         }
63
64         inline void make_room(size_t bytes_used)
65         {
66                 dest.resize(bytes_used + 4096);
67                 dest.resize(dest.capacity());
68                 pub.next_output_byte = (uint8_t *)dest.data() + bytes_used;
69                 pub.free_in_buffer = dest.size() - bytes_used;
70         }
71
72         static void term_destination_thunk(j_compress_ptr ptr)
73         {
74                 ((VectorDestinationManager *)(ptr->dest))->term_destination();
75         }
76
77         inline void term_destination()
78         {
79                 dest.resize(dest.size() - pub.free_in_buffer);
80         }
81 };
82 static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
83
84 string encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height, const string exif_data)
85 {
86         VectorDestinationManager dest;
87
88         jpeg_compress_struct cinfo;
89         jpeg_error_mgr jerr;
90         cinfo.err = jpeg_std_error(&jerr);
91         jpeg_create_compress(&cinfo);
92
93         cinfo.dest = (jpeg_destination_mgr *)&dest;
94         cinfo.input_components = 3;
95         cinfo.in_color_space = JCS_RGB;
96         jpeg_set_defaults(&cinfo);
97         constexpr int quality = 90;
98         jpeg_set_quality(&cinfo, quality, /*force_baseline=*/false);
99
100         cinfo.image_width = width;
101         cinfo.image_height = height;
102         cinfo.raw_data_in = true;
103         jpeg_set_colorspace(&cinfo, JCS_YCbCr);
104         cinfo.comp_info[0].h_samp_factor = 2;
105         cinfo.comp_info[0].v_samp_factor = 1;
106         cinfo.comp_info[1].h_samp_factor = 1;
107         cinfo.comp_info[1].v_samp_factor = 1;
108         cinfo.comp_info[2].h_samp_factor = 1;
109         cinfo.comp_info[2].v_samp_factor = 1;
110         cinfo.CCIR601_sampling = true;  // Seems to be mostly ignored by libjpeg, though.
111         jpeg_start_compress(&cinfo, true);
112
113         // This comment marker is private to FFmpeg. It signals limited Y'CbCr range
114         // (and nothing else).
115         jpeg_write_marker(&cinfo, JPEG_COM, (const JOCTET *)"CS=ITU601", strlen("CS=ITU601"));
116
117         if (!exif_data.empty()) {
118                 jpeg_write_marker(&cinfo, JPEG_APP0 + 1, (const JOCTET *)exif_data.data(), exif_data.size());
119         }
120
121         JSAMPROW yptr[8], cbptr[8], crptr[8];
122         JSAMPARRAY data[3] = { yptr, cbptr, crptr };
123         for (unsigned y = 0; y < height; y += 8) {
124                 for (unsigned yy = 0; yy < 8; ++yy) {
125                         yptr[yy] = const_cast<JSAMPROW>(&y_data[(y + yy) * width]);
126                         cbptr[yy] = const_cast<JSAMPROW>(&cb_data[(y + yy) * width / 2]);
127                         crptr[yy] = const_cast<JSAMPROW>(&cr_data[(y + yy) * width / 2]);
128                 }
129
130                 jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
131         }
132
133         jpeg_finish_compress(&cinfo);
134         jpeg_destroy_compress(&cinfo);
135
136         return move(dest.dest);
137 }
138
139 VideoStream::VideoStream(AVFormatContext *file_avctx)
140         : avctx(file_avctx), output_fast_forward(file_avctx != nullptr)
141 {
142         ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_DUAL_YCBCR, /*resource_pool=*/nullptr));
143         ycbcr_semiplanar_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_SEMIPLANAR, /*resource_pool=*/nullptr));
144
145         GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots];
146         GLuint fade_y_output_tex[num_interpolate_slots], fade_cbcr_output_tex[num_interpolate_slots];
147         GLuint cb_tex[num_interpolate_slots], cr_tex[num_interpolate_slots];
148
149         glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, input_tex);
150         glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, gray_tex);
151         glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_y_output_tex);
152         glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_cbcr_output_tex);
153         glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cb_tex);
154         glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cr_tex);
155         check_error();
156
157         size_t width = global_flags.width, height = global_flags.height;
158         int levels = find_num_levels(width, height);
159         for (size_t i = 0; i < num_interpolate_slots; ++i) {
160                 glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2);
161                 check_error();
162                 glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2);
163                 check_error();
164                 glTextureStorage2D(fade_y_output_tex[i], 1, GL_R8, width, height);
165                 check_error();
166                 glTextureStorage2D(fade_cbcr_output_tex[i], 1, GL_RG8, width, height);
167                 check_error();
168                 glTextureStorage2D(cb_tex[i], 1, GL_R8, width / 2, height);
169                 check_error();
170                 glTextureStorage2D(cr_tex[i], 1, GL_R8, width / 2, height);
171                 check_error();
172
173                 unique_ptr<InterpolatedFrameResources> resource(new InterpolatedFrameResources);
174                 resource->owner = this;
175                 resource->input_tex = input_tex[i];
176                 resource->gray_tex = gray_tex[i];
177                 resource->fade_y_output_tex = fade_y_output_tex[i];
178                 resource->fade_cbcr_output_tex = fade_cbcr_output_tex[i];
179                 resource->cb_tex = cb_tex[i];
180                 resource->cr_tex = cr_tex[i];
181                 glCreateFramebuffers(2, resource->input_fbos);
182                 check_error();
183                 glCreateFramebuffers(1, &resource->fade_fbo);
184                 check_error();
185
186                 glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0);
187                 check_error();
188                 glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 0);
189                 check_error();
190                 glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 1);
191                 check_error();
192                 glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1);
193                 check_error();
194                 glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT0, fade_y_output_tex[i], 0);
195                 check_error();
196                 glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT1, fade_cbcr_output_tex[i], 0);
197                 check_error();
198
199                 GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
200                 glNamedFramebufferDrawBuffers(resource->input_fbos[0], 2, bufs);
201                 check_error();
202                 glNamedFramebufferDrawBuffers(resource->input_fbos[1], 2, bufs);
203                 check_error();
204                 glNamedFramebufferDrawBuffers(resource->fade_fbo, 2, bufs);
205                 check_error();
206
207                 glCreateBuffers(1, &resource->pbo);
208                 check_error();
209                 glNamedBufferStorage(resource->pbo, width * height * 4, nullptr, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
210                 check_error();
211                 resource->pbo_contents = glMapNamedBufferRange(resource->pbo, 0, width * height * 4, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
212                 interpolate_resources.push_back(move(resource));
213         }
214
215         check_error();
216
217         OperatingPoint op;
218         if (global_flags.interpolation_quality == 0 ||
219             global_flags.interpolation_quality == 1) {
220                 op = operating_point1;
221         } else if (global_flags.interpolation_quality == 2) {
222                 op = operating_point2;
223         } else if (global_flags.interpolation_quality == 3) {
224                 op = operating_point3;
225         } else if (global_flags.interpolation_quality == 4) {
226                 op = operating_point4;
227         } else {
228                 // Quality 0 will be changed to 1 in flags.cpp.
229                 assert(false);
230         }
231
232         compute_flow.reset(new DISComputeFlow(width, height, op));
233         interpolate.reset(new Interpolate(op, /*split_ycbcr_output=*/true));
234         interpolate_no_split.reset(new Interpolate(op, /*split_ycbcr_output=*/false));
235         chroma_subsampler.reset(new ChromaSubsampler);
236         check_error();
237
238         // The “last frame” is initially black.
239         unique_ptr<uint8_t[]> y(new uint8_t[global_flags.width * global_flags.height]);
240         unique_ptr<uint8_t[]> cb_or_cr(new uint8_t[(global_flags.width / 2) * global_flags.height]);
241         memset(y.get(), 16, global_flags.width * global_flags.height);
242         memset(cb_or_cr.get(), 128, (global_flags.width / 2) * global_flags.height);
243         last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), global_flags.width, global_flags.height, /*exif_data=*/"");
244
245         if (file_avctx != nullptr) {
246                 with_subtitles = Mux::WITHOUT_SUBTITLES;
247         } else {
248                 with_subtitles = Mux::WITH_SUBTITLES;
249         }
250 }
251
252 VideoStream::~VideoStream()
253 {
254         if (last_flow_tex != 0) {
255                 compute_flow->release_texture(last_flow_tex);
256         }
257
258         for (const unique_ptr<InterpolatedFrameResources> &resource : interpolate_resources) {
259                 glUnmapNamedBuffer(resource->pbo);
260                 check_error();
261                 glDeleteBuffers(1, &resource->pbo);
262                 check_error();
263                 glDeleteFramebuffers(2, resource->input_fbos);
264                 check_error();
265                 glDeleteFramebuffers(1, &resource->fade_fbo);
266                 check_error();
267                 glDeleteTextures(1, &resource->input_tex);
268                 check_error();
269                 glDeleteTextures(1, &resource->gray_tex);
270                 check_error();
271                 glDeleteTextures(1, &resource->fade_y_output_tex);
272                 check_error();
273                 glDeleteTextures(1, &resource->fade_cbcr_output_tex);
274                 check_error();
275                 glDeleteTextures(1, &resource->cb_tex);
276                 check_error();
277                 glDeleteTextures(1, &resource->cr_tex);
278                 check_error();
279         }
280         assert(interpolate_resources.size() == num_interpolate_slots);
281 }
282
283 void VideoStream::start()
284 {
285         if (avctx == nullptr) {
286                 avctx = avformat_alloc_context();
287
288                 // We use Matroska, because it's pretty much the only mux where FFmpeg
289                 // allows writing chroma location to override JFIF's default center placement.
290                 // (Note that at the time of writing, however, FFmpeg does not correctly
291                 // _read_ this information!)
292                 avctx->oformat = av_guess_format("matroska", nullptr, nullptr);
293
294                 uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
295                 avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
296                 avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
297                 avctx->pb->ignore_boundary_point = 1;
298
299                 avctx->flags = AVFMT_FLAG_CUSTOM_IO;
300         }
301
302         AVCodecParameters *audio_codecpar = avcodec_parameters_alloc();
303
304         audio_codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
305         audio_codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
306         audio_codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
307         audio_codecpar->channels = 2;
308         audio_codecpar->sample_rate = OUTPUT_FREQUENCY;
309
310         size_t width = global_flags.width, height = global_flags.height;  // Doesn't matter for MJPEG.
311         mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", audio_codecpar,
312                           AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}, with_subtitles));
313
314         avcodec_parameters_free(&audio_codecpar);
315         encode_thread = thread(&VideoStream::encode_thread_func, this);
316 }
317
318 void VideoStream::stop()
319 {
320         should_quit = true;
321         queue_changed.notify_all();
322         clear_queue();
323         encode_thread.join();
324 }
325
326 void VideoStream::clear_queue()
327 {
328         deque<QueuedFrame> q;
329
330         {
331                 lock_guard<mutex> lock(queue_lock);
332                 q = move(frame_queue);
333         }
334
335         // These are not RAII-ed, unfortunately, so we'll need to clean them ourselves.
336         // Note that release_texture() is thread-safe.
337         for (const QueuedFrame &qf : q) {
338                 if (qf.type == QueuedFrame::INTERPOLATED ||
339                     qf.type == QueuedFrame::FADED_INTERPOLATED) {
340                         if (qf.flow_tex != 0) {
341                                 compute_flow->release_texture(qf.flow_tex);
342                         }
343                 }
344                 if (qf.type == QueuedFrame::INTERPOLATED) {
345                         interpolate->release_texture(qf.output_tex);
346                         interpolate->release_texture(qf.cbcr_tex);
347                 }
348         }
349
350         // Destroy q outside the mutex, as that would be a double-lock.
351 }
352
353 void VideoStream::schedule_original_frame(steady_clock::time_point local_pts,
354                                           int64_t output_pts, function<void()> &&display_func,
355                                           QueueSpotHolder &&queue_spot_holder,
356                                           FrameOnDisk frame, const string &subtitle, bool include_audio)
357 {
358         fprintf(stderr, "output_pts=%" PRId64 "  original      input_pts=%" PRId64 "\n", output_pts, frame.pts);
359
360         QueuedFrame qf;
361         qf.local_pts = local_pts;
362         qf.type = QueuedFrame::ORIGINAL;
363         qf.output_pts = output_pts;
364         qf.display_func = move(display_func);
365         qf.queue_spot_holder = move(queue_spot_holder);
366         qf.subtitle = subtitle;
367         FrameReader::Frame read_frame = frame_reader.read_frame(frame, /*read_video=*/true, include_audio);
368         qf.encoded_jpeg.reset(new string(move(read_frame.video)));
369         qf.audio = move(read_frame.audio);
370
371         lock_guard<mutex> lock(queue_lock);
372         frame_queue.push_back(move(qf));
373         queue_changed.notify_all();
374 }
375
376 void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64_t output_pts,
377                                        function<void()> &&display_func,
378                                        QueueSpotHolder &&queue_spot_holder,
379                                        FrameOnDisk frame1_spec, FrameOnDisk frame2_spec,
380                                        float fade_alpha, const string &subtitle)
381 {
382         fprintf(stderr, "output_pts=%" PRId64 "  faded         input_pts=%" PRId64 ",%" PRId64 "  fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha);
383
384         // Get the temporary OpenGL resources we need for doing the fade.
385         // (We share these with interpolated frames, which is slightly
386         // overkill, but there's no need to waste resources on keeping
387         // separate pools around.)
388         BorrowedInterpolatedFrameResources resources;
389         {
390                 lock_guard<mutex> lock(queue_lock);
391                 if (interpolate_resources.empty()) {
392                         fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
393                         return;
394                 }
395                 resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release());
396                 interpolate_resources.pop_front();
397         }
398
399         bool did_decode;
400
401         shared_ptr<Frame> frame1 = decode_jpeg_with_cache(frame1_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
402         shared_ptr<Frame> frame2 = decode_jpeg_with_cache(frame2_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
403
404         ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height);
405
406         QueuedFrame qf;
407         qf.local_pts = local_pts;
408         qf.type = QueuedFrame::FADED;
409         qf.output_pts = output_pts;
410         qf.frame1 = frame1_spec;
411         qf.display_func = move(display_func);
412         qf.queue_spot_holder = move(queue_spot_holder);
413         qf.subtitle = subtitle;
414
415         qf.secondary_frame = frame2_spec;
416
417         // Subsample and split Cb/Cr.
418         chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
419
420         // Read it down (asynchronously) to the CPU.
421         glPixelStorei(GL_PACK_ROW_LENGTH, 0);
422         glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
423         check_error();
424         glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
425         check_error();
426         glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3, BUFFER_OFFSET(global_flags.width * global_flags.height));
427         check_error();
428         glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3 - (global_flags.width / 2) * global_flags.height, BUFFER_OFFSET(global_flags.width * global_flags.height + (global_flags.width / 2) * global_flags.height));
429         check_error();
430         glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
431
432         // Set a fence we can wait for to make sure the CPU sees the read.
433         glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
434         check_error();
435         qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
436         check_error();
437         qf.resources = move(resources);
438         qf.local_pts = local_pts;
439
440         lock_guard<mutex> lock(queue_lock);
441         frame_queue.push_back(move(qf));
442         queue_changed.notify_all();
443 }
444
445 void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts,
446                                               int64_t output_pts, function<void(shared_ptr<Frame>)> &&display_func,
447                                               QueueSpotHolder &&queue_spot_holder,
448                                               FrameOnDisk frame1, FrameOnDisk frame2,
449                                               float alpha, FrameOnDisk secondary_frame, float fade_alpha, const string &subtitle,
450                                               bool play_audio)
451 {
452         if (secondary_frame.pts != -1) {
453                 fprintf(stderr, "output_pts=%" PRId64 "  interpolated  input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f  secondary_pts=%" PRId64 "  fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
454         } else {
455                 fprintf(stderr, "output_pts=%" PRId64 "  interpolated  input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha);
456         }
457
458         // Get the temporary OpenGL resources we need for doing the interpolation.
459         BorrowedInterpolatedFrameResources resources;
460         {
461                 lock_guard<mutex> lock(queue_lock);
462                 if (interpolate_resources.empty()) {
463                         fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
464                         return;
465                 }
466                 resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release());
467                 interpolate_resources.pop_front();
468         }
469
470         QueuedFrame qf;
471         qf.type = (secondary_frame.pts == -1) ? QueuedFrame::INTERPOLATED : QueuedFrame::FADED_INTERPOLATED;
472         qf.output_pts = output_pts;
473         qf.display_decoded_func = move(display_func);
474         qf.queue_spot_holder = move(queue_spot_holder);
475         qf.local_pts = local_pts;
476         qf.subtitle = subtitle;
477
478         if (play_audio) {
479                 qf.audio = frame_reader.read_frame(frame1, /*read_video=*/false, /*read_audio=*/true).audio;
480         }
481
482         check_error();
483
484         // Convert frame0 and frame1 to OpenGL textures.
485         for (size_t frame_no = 0; frame_no < 2; ++frame_no) {
486                 FrameOnDisk frame_spec = frame_no == 1 ? frame2 : frame1;
487                 bool did_decode;
488                 shared_ptr<Frame> frame = decode_jpeg_with_cache(frame_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
489                 ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], global_flags.width, global_flags.height);
490                 if (frame_no == 1) {
491                         qf.exif_data = frame->exif_data;  // Use the white point from the last frame.
492                 }
493         }
494
495         glGenerateTextureMipmap(resources->input_tex);
496         check_error();
497         glGenerateTextureMipmap(resources->gray_tex);
498         check_error();
499
500         GLuint flow_tex;
501         if (last_flow_tex != 0 && frame1 == last_frame1 && frame2 == last_frame2) {
502                 // Reuse the flow from previous computation. This frequently happens
503                 // if we slow down by more than 2x, so that there are multiple interpolated
504                 // frames between each original.
505                 flow_tex = last_flow_tex;
506                 qf.flow_tex = 0;
507         } else {
508                 // Cache miss, so release last_flow_tex.
509                 qf.flow_tex = last_flow_tex;
510
511                 // Compute the flow.
512                 flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
513                 check_error();
514
515                 // Store the flow texture for possible reuse next frame.
516                 last_flow_tex = flow_tex;
517                 last_frame1 = frame1;
518                 last_frame2 = frame2;
519         }
520
521         if (secondary_frame.pts != -1) {
522                 // Fade. First kick off the interpolation.
523                 tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha);
524                 check_error();
525
526                 // Now decode the image we are fading against.
527                 bool did_decode;
528                 shared_ptr<Frame> frame2 = decode_jpeg_with_cache(secondary_frame, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
529
530                 // Then fade against it, putting it into the fade Y' and CbCr textures.
531                 RGBTriplet neutral_color = get_neutral_color(qf.exif_data);
532                 ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, neutral_color, global_flags.width, global_flags.height, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height);
533
534                 // Subsample and split Cb/Cr.
535                 chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
536
537                 interpolate_no_split->release_texture(qf.output_tex);
538
539                 // We already applied the white balance, so don't have the client redo it.
540                 qf.exif_data.clear();
541         } else {
542                 tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha);
543                 check_error();
544
545                 // Subsample and split Cb/Cr.
546                 chroma_subsampler->subsample_chroma(qf.cbcr_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
547         }
548
549         // We could have released qf.flow_tex here, but to make sure we don't cause a stall
550         // when trying to reuse it for the next frame, we can just as well hold on to it
551         // and release it only when the readback is done.
552         //
553         // TODO: This is maybe less relevant now that qf.flow_tex contains the texture we used
554         // _last_ frame, not this one.
555
556         // Read it down (asynchronously) to the CPU.
557         glPixelStorei(GL_PACK_ROW_LENGTH, 0);
558         glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
559         check_error();
560         if (secondary_frame.pts != -1) {
561                 glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
562         } else {
563                 glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
564         }
565         check_error();
566         glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3, BUFFER_OFFSET(global_flags.width * global_flags.height));
567         check_error();
568         glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3 - (global_flags.width / 2) * global_flags.height, BUFFER_OFFSET(global_flags.width * global_flags.height + (global_flags.width / 2) * global_flags.height));
569         check_error();
570         glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
571
572         // Set a fence we can wait for to make sure the CPU sees the read.
573         glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
574         check_error();
575         qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
576         check_error();
577         qf.resources = move(resources);
578
579         lock_guard<mutex> lock(queue_lock);
580         frame_queue.push_back(move(qf));
581         queue_changed.notify_all();
582 }
583
584 void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts,
585                                          int64_t output_pts, function<void()> &&display_func,
586                                          QueueSpotHolder &&queue_spot_holder, const string &subtitle)
587 {
588         QueuedFrame qf;
589         qf.type = QueuedFrame::REFRESH;
590         qf.output_pts = output_pts;
591         qf.display_func = move(display_func);
592         qf.queue_spot_holder = move(queue_spot_holder);
593         qf.subtitle = subtitle;
594
595         lock_guard<mutex> lock(queue_lock);
596         frame_queue.push_back(move(qf));
597         queue_changed.notify_all();
598 }
599
600 void VideoStream::schedule_silence(steady_clock::time_point local_pts, int64_t output_pts,
601                                    int64_t length_pts, QueueSpotHolder &&queue_spot_holder)
602 {
603         QueuedFrame qf;
604         qf.type = QueuedFrame::SILENCE;
605         qf.output_pts = output_pts;
606         qf.queue_spot_holder = move(queue_spot_holder);
607         qf.silence_length_pts = length_pts;
608
609         lock_guard<mutex> lock(queue_lock);
610         frame_queue.push_back(move(qf));
611         queue_changed.notify_all();
612 }
613
614 namespace {
615
616 shared_ptr<Frame> frame_from_pbo(void *contents, size_t width, size_t height)
617 {
618         size_t chroma_width = width / 2;
619
620         const uint8_t *y = (const uint8_t *)contents;
621         const uint8_t *cb = (const uint8_t *)contents + width * height;
622         const uint8_t *cr = (const uint8_t *)contents + width * height + chroma_width * height;
623
624         shared_ptr<Frame> frame(new Frame);
625         frame->y.reset(new uint8_t[width * height]);
626         frame->cb.reset(new uint8_t[chroma_width * height]);
627         frame->cr.reset(new uint8_t[chroma_width * height]);
628         for (unsigned yy = 0; yy < height; ++yy) {
629                 memcpy(frame->y.get() + width * yy, y + width * yy, width);
630                 memcpy(frame->cb.get() + chroma_width * yy, cb + chroma_width * yy, chroma_width);
631                 memcpy(frame->cr.get() + chroma_width * yy, cr + chroma_width * yy, chroma_width);
632         }
633         frame->is_semiplanar = false;
634         frame->width = width;
635         frame->height = height;
636         frame->chroma_subsampling_x = 2;
637         frame->chroma_subsampling_y = 1;
638         frame->pitch_y = width;
639         frame->pitch_chroma = chroma_width;
640         return frame;
641 }
642
643 }  // namespace
644
645 void VideoStream::encode_thread_func()
646 {
647         pthread_setname_np(pthread_self(), "VideoStream");
648         QSurface *surface = create_surface();
649         QOpenGLContext *context = create_context(surface);
650         bool ok = make_current(context, surface);
651         if (!ok) {
652                 fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
653                 abort();
654         }
655
656         while (!should_quit) {
657                 QueuedFrame qf;
658                 {
659                         unique_lock<mutex> lock(queue_lock);
660
661                         // Wait until we have a frame to play.
662                         queue_changed.wait(lock, [this] {
663                                 return !frame_queue.empty() || should_quit;
664                         });
665                         if (should_quit) {
666                                 break;
667                         }
668                         steady_clock::time_point frame_start = frame_queue.front().local_pts;
669
670                         // Now sleep until the frame is supposed to start (the usual case),
671                         // _or_ clear_queue() happened.
672                         bool aborted;
673                         if (output_fast_forward) {
674                                 aborted = frame_queue.empty() || frame_queue.front().local_pts != frame_start;
675                         } else {
676                                 aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start] {
677                                         return frame_queue.empty() || frame_queue.front().local_pts != frame_start;
678                                 });
679                         }
680                         if (aborted) {
681                                 // clear_queue() happened, so don't play this frame after all.
682                                 continue;
683                         }
684                         qf = move(frame_queue.front());
685                         frame_queue.pop_front();
686                 }
687
688                 // Hack: We mux the subtitle packet one time unit before the actual frame,
689                 // so that Nageru is sure to get it first.
690                 if (!qf.subtitle.empty() && with_subtitles == Mux::WITH_SUBTITLES) {
691                         AVPacket pkt;
692                         av_init_packet(&pkt);
693                         pkt.stream_index = mux->get_subtitle_stream_idx();
694                         assert(pkt.stream_index != -1);
695                         pkt.data = (uint8_t *)qf.subtitle.data();
696                         pkt.size = qf.subtitle.size();
697                         pkt.flags = 0;
698                         pkt.duration = lrint(TIMEBASE / global_flags.output_framerate);  // Doesn't really matter for Nageru.
699                         mux->add_packet(pkt, qf.output_pts - 1, qf.output_pts - 1);
700                 }
701
702                 if (qf.type == QueuedFrame::ORIGINAL) {
703                         // Send the JPEG frame on, unchanged.
704                         string jpeg = move(*qf.encoded_jpeg);
705                         AVPacket pkt;
706                         av_init_packet(&pkt);
707                         pkt.stream_index = 0;
708                         pkt.data = (uint8_t *)jpeg.data();
709                         pkt.size = jpeg.size();
710                         pkt.flags = AV_PKT_FLAG_KEY;
711                         mux->add_packet(pkt, qf.output_pts, qf.output_pts);
712                         last_frame = move(jpeg);
713
714                         add_audio_or_silence(qf);
715                 } else if (qf.type == QueuedFrame::FADED) {
716                         glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
717
718                         shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height);
719                         assert(frame->exif_data.empty());
720
721                         // Now JPEG encode it, and send it on to the stream.
722                         string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height, /*exif_data=*/"");
723
724                         AVPacket pkt;
725                         av_init_packet(&pkt);
726                         pkt.stream_index = 0;
727                         pkt.data = (uint8_t *)jpeg.data();
728                         pkt.size = jpeg.size();
729                         pkt.flags = AV_PKT_FLAG_KEY;
730                         mux->add_packet(pkt, qf.output_pts, qf.output_pts);
731                         last_frame = move(jpeg);
732
733                         add_audio_or_silence(qf);
734                 } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
735                         glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
736
737                         // Send it on to display.
738                         shared_ptr<Frame> frame = frame_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height);
739                         if (qf.display_decoded_func != nullptr) {
740                                 qf.display_decoded_func(frame);
741                         }
742
743                         // Now JPEG encode it, and send it on to the stream.
744                         string jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), global_flags.width, global_flags.height, move(qf.exif_data));
745                         if (qf.flow_tex != 0) {
746                                 compute_flow->release_texture(qf.flow_tex);
747                         }
748                         if (qf.type != QueuedFrame::FADED_INTERPOLATED) {
749                                 interpolate->release_texture(qf.output_tex);
750                                 interpolate->release_texture(qf.cbcr_tex);
751                         }
752
753                         AVPacket pkt;
754                         av_init_packet(&pkt);
755                         pkt.stream_index = 0;
756                         pkt.data = (uint8_t *)jpeg.data();
757                         pkt.size = jpeg.size();
758                         pkt.flags = AV_PKT_FLAG_KEY;
759                         mux->add_packet(pkt, qf.output_pts, qf.output_pts);
760                         last_frame = move(jpeg);
761
762                         add_audio_or_silence(qf);
763                 } else if (qf.type == QueuedFrame::REFRESH) {
764                         AVPacket pkt;
765                         av_init_packet(&pkt);
766                         pkt.stream_index = 0;
767                         pkt.data = (uint8_t *)last_frame.data();
768                         pkt.size = last_frame.size();
769                         pkt.flags = AV_PKT_FLAG_KEY;
770                         mux->add_packet(pkt, qf.output_pts, qf.output_pts);
771
772                         add_audio_or_silence(qf);  // Definitely silence.
773                 } else if (qf.type == QueuedFrame::SILENCE) {
774                         add_silence(qf.output_pts, qf.silence_length_pts);
775                 } else {
776                         assert(false);
777                 }
778                 if (qf.display_func != nullptr) {
779                         qf.display_func();
780                 }
781         }
782 }
783
784 int VideoStream::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
785 {
786         VideoStream *video_stream = (VideoStream *)opaque;
787         return video_stream->write_packet2(buf, buf_size, type, time);
788 }
789
790 int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
791 {
792         if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
793                 seen_sync_markers = true;
794         } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
795                 // We don't know if this is a keyframe or not (the muxer could
796                 // avoid marking it), so we just have to make the best of it.
797                 type = AVIO_DATA_MARKER_SYNC_POINT;
798         }
799
800         HTTPD::StreamID stream_id{ HTTPD::MAIN_STREAM, 0 };
801         if (type == AVIO_DATA_MARKER_HEADER) {
802                 stream_mux_header.append((char *)buf, buf_size);
803                 global_httpd->set_header(stream_id, stream_mux_header);
804         } else {
805                 global_httpd->add_data(stream_id, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
806         }
807         return buf_size;
808 }
809
810 void VideoStream::add_silence(int64_t pts, int64_t length_pts)
811 {
812         // At 59.94, this will never quite add up (even discounting refresh frames,
813         // which have unpredictable length), but hopefully, the player in the other
814         // end should be able to stretch silence easily enough.
815         long num_samples = lrint(length_pts * double(OUTPUT_FREQUENCY) / double(TIMEBASE)) * 2;
816         uint8_t *zero = (uint8_t *)calloc(num_samples, sizeof(int32_t));
817
818         AVPacket pkt;
819         av_init_packet(&pkt);
820         pkt.stream_index = 1;
821         pkt.data = zero;
822         pkt.size = num_samples * sizeof(int32_t);
823         pkt.flags = AV_PKT_FLAG_KEY;
824         mux->add_packet(pkt, pts, pts);
825
826         free(zero);
827 }
828
829 void VideoStream::add_audio_or_silence(const QueuedFrame &qf)
830 {
831         if (qf.audio.empty()) {
832                 int64_t frame_length = lrint(double(TIMEBASE) / global_flags.output_framerate);
833                 add_silence(qf.output_pts, frame_length);
834         } else {
835                 AVPacket pkt;
836                 av_init_packet(&pkt);
837                 pkt.stream_index = 1;
838                 pkt.data = (uint8_t *)qf.audio.data();
839                 pkt.size = qf.audio.size();
840                 pkt.flags = AV_PKT_FLAG_KEY;
841                 mux->add_packet(pkt, qf.output_pts, qf.output_pts);
842         }
843 }