1 #include "video_stream.h"
4 #include <libavformat/avformat.h>
5 #include <libavformat/avio.h>
6 #include <libavutil/channel_layout.h>
9 #include "chroma_subsampler.h"
10 #include "exif_parser.h"
13 #include "jpeg_frame_view.h"
14 #include "movit/util.h"
17 #include "shared/context.h"
18 #include "shared/httpd.h"
19 #include "shared/metrics.h"
20 #include "shared/shared_defs.h"
21 #include "shared/mux.h"
23 #include "ycbcr_converter.h"
25 #include <epoxy/glx.h>
29 using namespace movit;
31 using namespace std::chrono;
35 once_flag video_metrics_inited;
36 Summary metric_jpeg_encode_time_seconds;
37 Summary metric_fade_latency_seconds;
38 Summary metric_interpolation_latency_seconds;
39 Summary metric_fade_fence_wait_time_seconds;
40 Summary metric_interpolation_fence_wait_time_seconds;
42 void wait_for_upload(shared_ptr<Frame> &frame)
44 if (frame->uploaded_interpolation != nullptr) {
45 glWaitSync(frame->uploaded_interpolation.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
46 frame->uploaded_interpolation.reset();
52 extern HTTPD *global_httpd;
54 struct VectorDestinationManager {
55 jpeg_destination_mgr pub;
58 VectorDestinationManager()
60 pub.init_destination = init_destination_thunk;
61 pub.empty_output_buffer = empty_output_buffer_thunk;
62 pub.term_destination = term_destination_thunk;
65 static void init_destination_thunk(j_compress_ptr ptr)
67 ((VectorDestinationManager *)(ptr->dest))->init_destination();
70 inline void init_destination()
75 static boolean empty_output_buffer_thunk(j_compress_ptr ptr)
77 return ((VectorDestinationManager *)(ptr->dest))->empty_output_buffer();
80 inline bool empty_output_buffer()
82 make_room(dest.size()); // Should ignore pub.free_in_buffer!
86 inline void make_room(size_t bytes_used)
88 dest.resize(bytes_used + 4096);
89 dest.resize(dest.capacity());
90 pub.next_output_byte = (uint8_t *)dest.data() + bytes_used;
91 pub.free_in_buffer = dest.size() - bytes_used;
94 static void term_destination_thunk(j_compress_ptr ptr)
96 ((VectorDestinationManager *)(ptr->dest))->term_destination();
99 inline void term_destination()
101 dest.resize(dest.size() - pub.free_in_buffer);
104 static_assert(std::is_standard_layout<VectorDestinationManager>::value, "");
106 string encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const uint8_t *cr_data, unsigned width, unsigned height, const string exif_data)
108 steady_clock::time_point start = steady_clock::now();
109 VectorDestinationManager dest;
111 jpeg_compress_struct cinfo;
113 cinfo.err = jpeg_std_error(&jerr);
114 jpeg_create_compress(&cinfo);
116 cinfo.dest = (jpeg_destination_mgr *)&dest;
117 cinfo.input_components = 3;
118 cinfo.in_color_space = JCS_RGB;
119 jpeg_set_defaults(&cinfo);
120 constexpr int quality = 90;
121 jpeg_set_quality(&cinfo, quality, /*force_baseline=*/false);
123 cinfo.image_width = width;
124 cinfo.image_height = height;
125 cinfo.raw_data_in = true;
126 jpeg_set_colorspace(&cinfo, JCS_YCbCr);
127 cinfo.comp_info[0].h_samp_factor = 2;
128 cinfo.comp_info[0].v_samp_factor = 1;
129 cinfo.comp_info[1].h_samp_factor = 1;
130 cinfo.comp_info[1].v_samp_factor = 1;
131 cinfo.comp_info[2].h_samp_factor = 1;
132 cinfo.comp_info[2].v_samp_factor = 1;
133 cinfo.CCIR601_sampling = true; // Seems to be mostly ignored by libjpeg, though.
134 jpeg_start_compress(&cinfo, true);
136 // This comment marker is private to FFmpeg. It signals limited Y'CbCr range
137 // (and nothing else).
138 jpeg_write_marker(&cinfo, JPEG_COM, (const JOCTET *)"CS=ITU601", strlen("CS=ITU601"));
140 if (!exif_data.empty()) {
141 jpeg_write_marker(&cinfo, JPEG_APP0 + 1, (const JOCTET *)exif_data.data(), exif_data.size());
144 JSAMPROW yptr[8], cbptr[8], crptr[8];
145 JSAMPARRAY data[3] = { yptr, cbptr, crptr };
146 for (unsigned y = 0; y < height; y += 8) {
147 for (unsigned yy = 0; yy < 8; ++yy) {
148 yptr[yy] = const_cast<JSAMPROW>(&y_data[(y + yy) * width]);
149 cbptr[yy] = const_cast<JSAMPROW>(&cb_data[(y + yy) * width / 2]);
150 crptr[yy] = const_cast<JSAMPROW>(&cr_data[(y + yy) * width / 2]);
153 jpeg_write_raw_data(&cinfo, data, /*num_lines=*/8);
156 jpeg_finish_compress(&cinfo);
157 jpeg_destroy_compress(&cinfo);
159 steady_clock::time_point stop = steady_clock::now();
160 metric_jpeg_encode_time_seconds.count_event(duration<double>(stop - start).count());
162 return move(dest.dest);
165 string encode_jpeg_from_pbo(void *contents, unsigned width, unsigned height, const string exif_data)
167 unsigned chroma_width = width / 2;
169 const uint8_t *y = (const uint8_t *)contents;
170 const uint8_t *cb = (const uint8_t *)contents + width * height;
171 const uint8_t *cr = (const uint8_t *)contents + width * height + chroma_width * height;
172 return encode_jpeg(y, cb, cr, width, height, move(exif_data));
175 VideoStream::VideoStream(AVFormatContext *file_avctx)
176 : avctx(file_avctx), output_fast_forward(file_avctx != nullptr)
178 call_once(video_metrics_inited, [] {
179 vector<double> quantiles{ 0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99 };
180 metric_jpeg_encode_time_seconds.init(quantiles, 60.0);
181 global_metrics.add("jpeg_encode_time_seconds", &metric_jpeg_encode_time_seconds);
182 metric_fade_fence_wait_time_seconds.init(quantiles, 60.0);
183 global_metrics.add("fade_fence_wait_time_seconds", &metric_fade_fence_wait_time_seconds);
184 metric_interpolation_fence_wait_time_seconds.init(quantiles, 60.0);
185 global_metrics.add("interpolation_fence_wait_time_seconds", &metric_interpolation_fence_wait_time_seconds);
186 metric_fade_latency_seconds.init(quantiles, 60.0);
187 global_metrics.add("fade_latency_seconds", &metric_fade_latency_seconds);
188 metric_interpolation_latency_seconds.init(quantiles, 60.0);
189 global_metrics.add("interpolation_latency_seconds", &metric_interpolation_latency_seconds);
192 ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_DUAL_YCBCR, /*resource_pool=*/nullptr));
193 ycbcr_semiplanar_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_SEMIPLANAR, /*resource_pool=*/nullptr));
195 GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots];
196 GLuint fade_y_output_tex[num_interpolate_slots], fade_cbcr_output_tex[num_interpolate_slots];
197 GLuint cb_tex[num_interpolate_slots], cr_tex[num_interpolate_slots];
199 glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, input_tex);
200 glCreateTextures(GL_TEXTURE_2D_ARRAY, num_interpolate_slots, gray_tex);
201 glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_y_output_tex);
202 glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, fade_cbcr_output_tex);
203 glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cb_tex);
204 glCreateTextures(GL_TEXTURE_2D, num_interpolate_slots, cr_tex);
207 size_t width = global_flags.width, height = global_flags.height;
208 int levels = find_num_levels(width, height);
209 for (size_t i = 0; i < num_interpolate_slots; ++i) {
210 glTextureStorage3D(input_tex[i], levels, GL_RGBA8, width, height, 2);
212 glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2);
214 glTextureStorage2D(fade_y_output_tex[i], 1, GL_R8, width, height);
216 glTextureStorage2D(fade_cbcr_output_tex[i], 1, GL_RG8, width, height);
218 glTextureStorage2D(cb_tex[i], 1, GL_R8, width / 2, height);
220 glTextureStorage2D(cr_tex[i], 1, GL_R8, width / 2, height);
223 unique_ptr<InterpolatedFrameResources> resource(new InterpolatedFrameResources);
224 resource->owner = this;
225 resource->input_tex = input_tex[i];
226 resource->gray_tex = gray_tex[i];
227 resource->fade_y_output_tex = fade_y_output_tex[i];
228 resource->fade_cbcr_output_tex = fade_cbcr_output_tex[i];
229 resource->cb_tex = cb_tex[i];
230 resource->cr_tex = cr_tex[i];
231 glCreateFramebuffers(2, resource->input_fbos);
233 glCreateFramebuffers(1, &resource->fade_fbo);
236 glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0);
238 glNamedFramebufferTextureLayer(resource->input_fbos[0], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 0);
240 glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 1);
242 glNamedFramebufferTextureLayer(resource->input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1);
244 glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT0, fade_y_output_tex[i], 0);
246 glNamedFramebufferTexture(resource->fade_fbo, GL_COLOR_ATTACHMENT1, fade_cbcr_output_tex[i], 0);
249 GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
250 glNamedFramebufferDrawBuffers(resource->input_fbos[0], 2, bufs);
252 glNamedFramebufferDrawBuffers(resource->input_fbos[1], 2, bufs);
254 glNamedFramebufferDrawBuffers(resource->fade_fbo, 2, bufs);
257 glCreateBuffers(1, &resource->pbo);
259 glNamedBufferStorage(resource->pbo, width * height * 4, nullptr, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
261 resource->pbo_contents = glMapNamedBufferRange(resource->pbo, 0, width * height * 4, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
262 interpolate_resources.push_back(move(resource));
268 if (global_flags.interpolation_quality == 0 ||
269 global_flags.interpolation_quality == 1) {
270 op = operating_point1;
271 } else if (global_flags.interpolation_quality == 2) {
272 op = operating_point2;
273 } else if (global_flags.interpolation_quality == 3) {
274 op = operating_point3;
275 } else if (global_flags.interpolation_quality == 4) {
276 op = operating_point4;
278 // Quality 0 will be changed to 1 in flags.cpp.
282 compute_flow.reset(new DISComputeFlow(width, height, op));
283 interpolate.reset(new Interpolate(op, /*split_ycbcr_output=*/true));
284 interpolate_no_split.reset(new Interpolate(op, /*split_ycbcr_output=*/false));
285 chroma_subsampler.reset(new ChromaSubsampler);
288 // The “last frame” is initially black.
289 unique_ptr<uint8_t[]> y(new uint8_t[global_flags.width * global_flags.height]);
290 unique_ptr<uint8_t[]> cb_or_cr(new uint8_t[(global_flags.width / 2) * global_flags.height]);
291 memset(y.get(), 16, global_flags.width * global_flags.height);
292 memset(cb_or_cr.get(), 128, (global_flags.width / 2) * global_flags.height);
293 last_frame = encode_jpeg(y.get(), cb_or_cr.get(), cb_or_cr.get(), global_flags.width, global_flags.height, /*exif_data=*/"");
295 if (file_avctx != nullptr) {
296 with_subtitles = Mux::WITHOUT_SUBTITLES;
298 with_subtitles = Mux::WITH_SUBTITLES;
302 VideoStream::~VideoStream()
304 if (last_flow_tex != 0) {
305 compute_flow->release_texture(last_flow_tex);
308 for (const unique_ptr<InterpolatedFrameResources> &resource : interpolate_resources) {
309 glUnmapNamedBuffer(resource->pbo);
311 glDeleteBuffers(1, &resource->pbo);
313 glDeleteFramebuffers(2, resource->input_fbos);
315 glDeleteFramebuffers(1, &resource->fade_fbo);
317 glDeleteTextures(1, &resource->input_tex);
319 glDeleteTextures(1, &resource->gray_tex);
321 glDeleteTextures(1, &resource->fade_y_output_tex);
323 glDeleteTextures(1, &resource->fade_cbcr_output_tex);
325 glDeleteTextures(1, &resource->cb_tex);
327 glDeleteTextures(1, &resource->cr_tex);
330 assert(interpolate_resources.size() == num_interpolate_slots);
333 void VideoStream::start()
335 if (avctx == nullptr) {
336 avctx = avformat_alloc_context();
338 // We use Matroska, because it's pretty much the only mux where FFmpeg
339 // allows writing chroma location to override JFIF's default center placement.
340 // (Note that at the time of writing, however, FFmpeg does not correctly
341 // _read_ this information!)
342 avctx->oformat = av_guess_format("matroska", nullptr, nullptr);
344 uint8_t *buf = (uint8_t *)av_malloc(MUX_BUFFER_SIZE);
345 avctx->pb = avio_alloc_context(buf, MUX_BUFFER_SIZE, 1, this, nullptr, nullptr, nullptr);
346 avctx->pb->write_data_type = &VideoStream::write_packet2_thunk;
347 avctx->pb->ignore_boundary_point = 1;
349 avctx->flags = AVFMT_FLAG_CUSTOM_IO;
352 AVCodecParameters *audio_codecpar = avcodec_parameters_alloc();
354 audio_codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
355 audio_codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
356 audio_codecpar->ch_layout.order = AV_CHANNEL_ORDER_NATIVE;
357 audio_codecpar->ch_layout.nb_channels = 2;
358 audio_codecpar->ch_layout.u.mask = AV_CH_LAYOUT_STEREO;
359 audio_codecpar->sample_rate = OUTPUT_FREQUENCY;
361 size_t width = global_flags.width, height = global_flags.height; // Doesn't matter for MJPEG.
362 mux.reset(new Mux(avctx, width, height, Mux::CODEC_MJPEG, /*video_extradata=*/"", audio_codecpar,
363 AVCOL_SPC_BT709, COARSE_TIMEBASE, /*write_callback=*/nullptr, Mux::WRITE_FOREGROUND, {}, with_subtitles));
365 avcodec_parameters_free(&audio_codecpar);
366 encode_thread = thread(&VideoStream::encode_thread_func, this);
369 void VideoStream::stop()
372 queue_changed.notify_all();
374 encode_thread.join();
377 void VideoStream::clear_queue()
379 deque<QueuedFrame> q;
382 lock_guard<mutex> lock(queue_lock);
383 q = move(frame_queue);
386 // These are not RAII-ed, unfortunately, so we'll need to clean them ourselves.
387 // Note that release_texture() is thread-safe.
388 for (const QueuedFrame &qf : q) {
389 if (qf.type == QueuedFrame::INTERPOLATED ||
390 qf.type == QueuedFrame::FADED_INTERPOLATED) {
391 if (qf.flow_tex != 0) {
392 compute_flow->release_texture(qf.flow_tex);
395 if (qf.type == QueuedFrame::INTERPOLATED) {
396 interpolate->release_texture(qf.output_tex);
397 interpolate->release_texture(qf.cbcr_tex);
401 // Destroy q outside the mutex, as that would be a double-lock.
404 void VideoStream::schedule_original_frame(steady_clock::time_point local_pts,
405 int64_t output_pts, function<void()> &&display_func,
406 QueueSpotHolder &&queue_spot_holder,
407 FrameOnDisk frame, const string &subtitle, bool include_audio)
409 fprintf(stderr, "output_pts=%" PRId64 " original input_pts=%" PRId64 "\n", output_pts, frame.pts);
412 qf.local_pts = local_pts;
413 qf.type = QueuedFrame::ORIGINAL;
414 qf.output_pts = output_pts;
415 qf.display_func = move(display_func);
416 qf.queue_spot_holder = move(queue_spot_holder);
417 qf.subtitle = subtitle;
418 FrameReader::Frame read_frame = frame_reader.read_frame(frame, /*read_video=*/true, include_audio);
419 qf.encoded_jpeg.reset(new string(move(read_frame.video)));
420 qf.audio = move(read_frame.audio);
422 lock_guard<mutex> lock(queue_lock);
423 frame_queue.push_back(move(qf));
424 queue_changed.notify_all();
427 void VideoStream::schedule_faded_frame(steady_clock::time_point local_pts, int64_t output_pts,
428 function<void()> &&display_func,
429 QueueSpotHolder &&queue_spot_holder,
430 FrameOnDisk frame1_spec, FrameOnDisk frame2_spec,
431 float fade_alpha, const string &subtitle)
433 fprintf(stderr, "output_pts=%" PRId64 " faded input_pts=%" PRId64 ",%" PRId64 " fade_alpha=%.2f\n", output_pts, frame1_spec.pts, frame2_spec.pts, fade_alpha);
435 // Get the temporary OpenGL resources we need for doing the fade.
436 // (We share these with interpolated frames, which is slightly
437 // overkill, but there's no need to waste resources on keeping
438 // separate pools around.)
439 BorrowedInterpolatedFrameResources resources;
441 lock_guard<mutex> lock(queue_lock);
442 if (interpolate_resources.empty()) {
443 fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
446 resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release());
447 interpolate_resources.pop_front();
452 shared_ptr<Frame> frame1 = decode_jpeg_with_cache(frame1_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
453 shared_ptr<Frame> frame2 = decode_jpeg_with_cache(frame2_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
454 wait_for_upload(frame1);
455 wait_for_upload(frame2);
457 ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height);
460 qf.local_pts = local_pts;
461 qf.type = QueuedFrame::FADED;
462 qf.output_pts = output_pts;
463 qf.frame1 = frame1_spec;
464 qf.display_func = move(display_func);
465 qf.queue_spot_holder = move(queue_spot_holder);
466 qf.subtitle = subtitle;
468 qf.secondary_frame = frame2_spec;
470 // Subsample and split Cb/Cr.
471 chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
473 // Read it down (asynchronously) to the CPU.
474 glPixelStorei(GL_PACK_ROW_LENGTH, 0);
475 glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
477 glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
479 glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3, BUFFER_OFFSET(global_flags.width * global_flags.height));
481 glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3 - (global_flags.width / 2) * global_flags.height, BUFFER_OFFSET(global_flags.width * global_flags.height + (global_flags.width / 2) * global_flags.height));
483 glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
485 // Set a fence we can wait for to make sure the CPU sees the read.
486 glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
488 qf.fence_created = steady_clock::now();
489 qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
491 qf.resources = move(resources);
492 qf.local_pts = local_pts;
494 lock_guard<mutex> lock(queue_lock);
495 frame_queue.push_back(move(qf));
496 queue_changed.notify_all();
499 void VideoStream::schedule_interpolated_frame(steady_clock::time_point local_pts,
500 int64_t output_pts, function<void(shared_ptr<Frame>)> &&display_func,
501 QueueSpotHolder &&queue_spot_holder,
502 FrameOnDisk frame1, FrameOnDisk frame2,
503 float alpha, FrameOnDisk secondary_frame, float fade_alpha, const string &subtitle,
506 if (secondary_frame.pts != -1) {
507 fprintf(stderr, "output_pts=%" PRId64 " interpolated input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f secondary_pts=%" PRId64 " fade_alpha=%.2f\n", output_pts, frame1.pts, frame2.pts, alpha, secondary_frame.pts, fade_alpha);
509 fprintf(stderr, "output_pts=%" PRId64 " interpolated input_pts1=%" PRId64 " input_pts2=%" PRId64 " alpha=%.3f\n", output_pts, frame1.pts, frame2.pts, alpha);
512 // Get the temporary OpenGL resources we need for doing the interpolation.
513 BorrowedInterpolatedFrameResources resources;
515 lock_guard<mutex> lock(queue_lock);
516 if (interpolate_resources.empty()) {
517 fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
520 resources = BorrowedInterpolatedFrameResources(interpolate_resources.front().release());
521 interpolate_resources.pop_front();
525 qf.type = (secondary_frame.pts == -1) ? QueuedFrame::INTERPOLATED : QueuedFrame::FADED_INTERPOLATED;
526 qf.output_pts = output_pts;
527 qf.display_decoded_func = move(display_func);
528 qf.queue_spot_holder = move(queue_spot_holder);
529 qf.local_pts = local_pts;
530 qf.subtitle = subtitle;
533 qf.audio = frame_reader.read_frame(frame1, /*read_video=*/false, /*read_audio=*/true).audio;
538 // Convert frame0 and frame1 to OpenGL textures.
539 for (size_t frame_no = 0; frame_no < 2; ++frame_no) {
540 FrameOnDisk frame_spec = frame_no == 1 ? frame2 : frame1;
542 shared_ptr<Frame> frame = decode_jpeg_with_cache(frame_spec, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
543 wait_for_upload(frame);
544 ycbcr_converter->prepare_chain_for_conversion(frame)->render_to_fbo(resources->input_fbos[frame_no], global_flags.width, global_flags.height);
546 qf.exif_data = frame->exif_data; // Use the white point from the last frame.
550 glGenerateTextureMipmap(resources->input_tex);
552 glGenerateTextureMipmap(resources->gray_tex);
556 if (last_flow_tex != 0 && frame1 == last_frame1 && frame2 == last_frame2) {
557 // Reuse the flow from previous computation. This frequently happens
558 // if we slow down by more than 2x, so that there are multiple interpolated
559 // frames between each original.
560 flow_tex = last_flow_tex;
563 // Cache miss, so release last_flow_tex.
564 qf.flow_tex = last_flow_tex;
567 flow_tex = compute_flow->exec(resources->gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
570 // Store the flow texture for possible reuse next frame.
571 last_flow_tex = flow_tex;
572 last_frame1 = frame1;
573 last_frame2 = frame2;
576 if (secondary_frame.pts != -1) {
577 // Fade. First kick off the interpolation.
578 tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha);
581 // Now decode the image we are fading against.
583 shared_ptr<Frame> frame2 = decode_jpeg_with_cache(secondary_frame, DECODE_IF_NOT_IN_CACHE, &frame_reader, &did_decode);
584 wait_for_upload(frame2);
586 // Then fade against it, putting it into the fade Y' and CbCr textures.
587 RGBTriplet neutral_color = get_neutral_color(qf.exif_data);
588 ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, neutral_color, global_flags.width, global_flags.height, frame2, fade_alpha)->render_to_fbo(resources->fade_fbo, global_flags.width, global_flags.height);
590 // Subsample and split Cb/Cr.
591 chroma_subsampler->subsample_chroma(resources->fade_cbcr_output_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
593 interpolate_no_split->release_texture(qf.output_tex);
595 // We already applied the white balance, so don't have the client redo it.
596 qf.exif_data.clear();
598 tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources->input_tex, resources->gray_tex, flow_tex, global_flags.width, global_flags.height, alpha);
601 // Subsample and split Cb/Cr.
602 chroma_subsampler->subsample_chroma(qf.cbcr_tex, global_flags.width, global_flags.height, resources->cb_tex, resources->cr_tex);
605 // We could have released qf.flow_tex here, but to make sure we don't cause a stall
606 // when trying to reuse it for the next frame, we can just as well hold on to it
607 // and release it only when the readback is done.
609 // TODO: This is maybe less relevant now that qf.flow_tex contains the texture we used
610 // _last_ frame, not this one.
612 // Read it down (asynchronously) to the CPU.
613 glPixelStorei(GL_PACK_ROW_LENGTH, 0);
614 glBindBuffer(GL_PIXEL_PACK_BUFFER, resources->pbo);
616 if (secondary_frame.pts != -1) {
617 glGetTextureImage(resources->fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
619 glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 4, BUFFER_OFFSET(0));
622 glGetTextureImage(resources->cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3, BUFFER_OFFSET(global_flags.width * global_flags.height));
624 glGetTextureImage(resources->cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, global_flags.width * global_flags.height * 3 - (global_flags.width / 2) * global_flags.height, BUFFER_OFFSET(global_flags.width * global_flags.height + (global_flags.width / 2) * global_flags.height));
626 glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
628 // Set a fence we can wait for to make sure the CPU sees the read.
629 glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
631 qf.fence_created = steady_clock::now();
632 qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
634 qf.resources = move(resources);
636 lock_guard<mutex> lock(queue_lock);
637 frame_queue.push_back(move(qf));
638 queue_changed.notify_all();
641 void VideoStream::schedule_refresh_frame(steady_clock::time_point local_pts,
642 int64_t output_pts, function<void()> &&display_func,
643 QueueSpotHolder &&queue_spot_holder, const string &subtitle)
646 qf.type = QueuedFrame::REFRESH;
647 qf.output_pts = output_pts;
648 qf.display_func = move(display_func);
649 qf.queue_spot_holder = move(queue_spot_holder);
650 qf.subtitle = subtitle;
652 lock_guard<mutex> lock(queue_lock);
653 frame_queue.push_back(move(qf));
654 queue_changed.notify_all();
657 void VideoStream::schedule_silence(steady_clock::time_point local_pts, int64_t output_pts,
658 int64_t length_pts, QueueSpotHolder &&queue_spot_holder)
661 qf.type = QueuedFrame::SILENCE;
662 qf.output_pts = output_pts;
663 qf.queue_spot_holder = move(queue_spot_holder);
664 qf.silence_length_pts = length_pts;
666 lock_guard<mutex> lock(queue_lock);
667 frame_queue.push_back(move(qf));
668 queue_changed.notify_all();
673 RefCountedTexture clone_r8_texture(GLuint src_tex, unsigned width, unsigned height)
676 glCreateTextures(GL_TEXTURE_2D, 1, &tex);
678 glTextureStorage2D(tex, 1, GL_R8, width, height);
680 glCopyImageSubData(src_tex, GL_TEXTURE_2D, 0, 0, 0, 0,
681 tex, GL_TEXTURE_2D, 0, 0, 0, 0,
684 glTextureParameteri(tex, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
686 glTextureParameteri(tex, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
688 glTextureParameteri(tex, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
690 glTextureParameteri(tex, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
693 return RefCountedTexture(new GLuint(tex), TextureDeleter());
698 void VideoStream::encode_thread_func()
700 pthread_setname_np(pthread_self(), "VideoStream");
701 QSurface *surface = create_surface();
702 QOpenGLContext *context = create_context(surface);
703 bool ok = make_current(context, surface);
705 fprintf(stderr, "Video stream couldn't get an OpenGL context\n");
711 while (!should_quit) {
714 unique_lock<mutex> lock(queue_lock);
716 // Wait until we have a frame to play.
717 queue_changed.wait(lock, [this] {
718 return !frame_queue.empty() || should_quit;
723 steady_clock::time_point frame_start = frame_queue.front().local_pts;
725 // Now sleep until the frame is supposed to start (the usual case),
726 // _or_ clear_queue() happened.
728 if (output_fast_forward) {
729 aborted = frame_queue.empty() || frame_queue.front().local_pts != frame_start;
731 aborted = queue_changed.wait_until(lock, frame_start, [this, frame_start] {
732 return frame_queue.empty() || frame_queue.front().local_pts != frame_start;
736 // clear_queue() happened, so don't play this frame after all.
739 qf = move(frame_queue.front());
740 frame_queue.pop_front();
743 // Hack: We mux the subtitle packet one time unit before the actual frame,
744 // so that Nageru is sure to get it first.
745 if (!qf.subtitle.empty() && with_subtitles == Mux::WITH_SUBTITLES) {
747 av_init_packet(&pkt);
748 pkt.stream_index = mux->get_subtitle_stream_idx();
749 assert(pkt.stream_index != -1);
750 pkt.data = (uint8_t *)qf.subtitle.data();
751 pkt.size = qf.subtitle.size();
753 pkt.duration = lrint(TIMEBASE / global_flags.output_framerate); // Doesn't really matter for Nageru.
754 mux->add_packet(pkt, qf.output_pts - 1, qf.output_pts - 1);
757 if (qf.type == QueuedFrame::ORIGINAL) {
758 // Send the JPEG frame on, unchanged.
759 string jpeg = move(*qf.encoded_jpeg);
761 av_init_packet(&pkt);
762 pkt.stream_index = 0;
763 pkt.data = (uint8_t *)jpeg.data();
764 pkt.size = jpeg.size();
765 pkt.flags = AV_PKT_FLAG_KEY;
766 mux->add_packet(pkt, qf.output_pts, qf.output_pts);
767 last_frame = move(jpeg);
769 add_audio_or_silence(qf);
770 } else if (qf.type == QueuedFrame::FADED) {
771 steady_clock::time_point start = steady_clock::now();
772 glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
773 steady_clock::time_point stop = steady_clock::now();
774 metric_fade_fence_wait_time_seconds.count_event(duration<double>(stop - start).count());
775 metric_fade_latency_seconds.count_event(duration<double>(stop - qf.fence_created).count());
777 // Now JPEG encode it, and send it on to the stream.
778 string jpeg = encode_jpeg_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height, /*exif_data=*/"");
781 av_init_packet(&pkt);
782 pkt.stream_index = 0;
783 pkt.data = (uint8_t *)jpeg.data();
784 pkt.size = jpeg.size();
785 pkt.flags = AV_PKT_FLAG_KEY;
786 mux->add_packet(pkt, qf.output_pts, qf.output_pts);
787 last_frame = move(jpeg);
789 add_audio_or_silence(qf);
790 } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
791 steady_clock::time_point start = steady_clock::now();
792 glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
793 steady_clock::time_point stop = steady_clock::now();
794 metric_interpolation_fence_wait_time_seconds.count_event(duration<double>(stop - start).count());
795 metric_interpolation_latency_seconds.count_event(duration<double>(stop - qf.fence_created).count());
797 // Send it on to display.
798 if (qf.display_decoded_func != nullptr) {
799 shared_ptr<Frame> frame(new Frame);
800 if (qf.type == QueuedFrame::FADED_INTERPOLATED) {
801 frame->y = clone_r8_texture(qf.resources->fade_y_output_tex, global_flags.width, global_flags.height);
803 frame->y = clone_r8_texture(qf.output_tex, global_flags.width, global_flags.height);
805 frame->cb = clone_r8_texture(qf.resources->cb_tex, global_flags.width / 2, global_flags.height);
806 frame->cr = clone_r8_texture(qf.resources->cr_tex, global_flags.width / 2, global_flags.height);
807 frame->width = global_flags.width;
808 frame->height = global_flags.height;
809 frame->chroma_subsampling_x = 2;
810 frame->chroma_subsampling_y = 1;
811 frame->uploaded_ui_thread = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
812 qf.display_decoded_func(move(frame));
815 // Now JPEG encode it, and send it on to the stream.
816 string jpeg = encode_jpeg_from_pbo(qf.resources->pbo_contents, global_flags.width, global_flags.height, move(qf.exif_data));
817 if (qf.flow_tex != 0) {
818 compute_flow->release_texture(qf.flow_tex);
820 if (qf.type != QueuedFrame::FADED_INTERPOLATED) {
821 interpolate->release_texture(qf.output_tex);
822 interpolate->release_texture(qf.cbcr_tex);
826 av_init_packet(&pkt);
827 pkt.stream_index = 0;
828 pkt.data = (uint8_t *)jpeg.data();
829 pkt.size = jpeg.size();
830 pkt.flags = AV_PKT_FLAG_KEY;
831 mux->add_packet(pkt, qf.output_pts, qf.output_pts);
832 last_frame = move(jpeg);
834 add_audio_or_silence(qf);
835 } else if (qf.type == QueuedFrame::REFRESH) {
837 av_init_packet(&pkt);
838 pkt.stream_index = 0;
839 pkt.data = (uint8_t *)last_frame.data();
840 pkt.size = last_frame.size();
841 pkt.flags = AV_PKT_FLAG_KEY;
842 mux->add_packet(pkt, qf.output_pts, qf.output_pts);
844 add_audio_or_silence(qf); // Definitely silence.
845 } else if (qf.type == QueuedFrame::SILENCE) {
846 add_silence(qf.output_pts, qf.silence_length_pts);
850 if (qf.display_func != nullptr) {
856 int VideoStream::write_packet2_thunk(void *opaque, uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
858 VideoStream *video_stream = (VideoStream *)opaque;
859 return video_stream->write_packet2(buf, buf_size, type, time);
862 int VideoStream::write_packet2(uint8_t *buf, int buf_size, AVIODataMarkerType type, int64_t time)
864 if (type == AVIO_DATA_MARKER_SYNC_POINT || type == AVIO_DATA_MARKER_BOUNDARY_POINT) {
865 seen_sync_markers = true;
866 } else if (type == AVIO_DATA_MARKER_UNKNOWN && !seen_sync_markers) {
867 // We don't know if this is a keyframe or not (the muxer could
868 // avoid marking it), so we just have to make the best of it.
869 type = AVIO_DATA_MARKER_SYNC_POINT;
872 HTTPD::StreamID stream_id{ HTTPD::MAIN_STREAM, 0 };
873 if (type == AVIO_DATA_MARKER_HEADER) {
874 stream_mux_header.append((char *)buf, buf_size);
875 global_httpd->set_header(stream_id, stream_mux_header);
877 global_httpd->add_data(stream_id, (char *)buf, buf_size, type == AVIO_DATA_MARKER_SYNC_POINT, time, AVRational{ AV_TIME_BASE, 1 });
882 void VideoStream::add_silence(int64_t pts, int64_t length_pts)
884 // At 59.94, this will never quite add up (even discounting refresh frames,
885 // which have unpredictable length), but hopefully, the player in the other
886 // end should be able to stretch silence easily enough.
887 long num_samples = lrint(length_pts * double(OUTPUT_FREQUENCY) / double(TIMEBASE)) * 2;
888 uint8_t *zero = (uint8_t *)calloc(num_samples, sizeof(int32_t));
891 av_init_packet(&pkt);
892 pkt.stream_index = 1;
894 pkt.size = num_samples * sizeof(int32_t);
895 pkt.flags = AV_PKT_FLAG_KEY;
896 mux->add_packet(pkt, pts, pts);
901 void VideoStream::add_audio_or_silence(const QueuedFrame &qf)
903 if (qf.audio.empty()) {
904 int64_t frame_length = lrint(double(TIMEBASE) / global_flags.output_framerate);
905 add_silence(qf.output_pts, frame_length);
908 av_init_packet(&pkt);
909 pkt.stream_index = 1;
910 pkt.data = (uint8_t *)qf.audio.data();
911 pkt.size = qf.audio.size();
912 pkt.flags = AV_PKT_FLAG_KEY;
913 mux->add_packet(pkt, qf.output_pts, qf.output_pts);