]> git.sesse.net Git - nageru/blob - nageru/ffmpeg_capture.cpp
Set CEF autoplay policy to be more lenient.
[nageru] / nageru / ffmpeg_capture.cpp
1 #include "ffmpeg_capture.h"
2 #include "defs.h"
3 #include "shared/shared_defs.h"
4
5 #include <assert.h>
6 #include <cerrno>
7 #include <ctime>
8 #include <limits>
9 #include <map>
10 #include <memory>
11 #include <movit/effect.h>
12 #include <movit/image_format.h>
13 #include <movit/ycbcr.h>
14 #include <mutex>
15 #include <pthread.h>
16 #include <stdint.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <string>
21 #include <sys/stat.h>
22 #include <thread>
23
24 extern "C" {
25 #include <libavcodec/avcodec.h>
26 #include <libavcodec/codec.h>
27 #include <libavcodec/codec_id.h>
28 #include <libavcodec/codec_par.h>
29 #include <libavformat/avformat.h>
30 #include <libavutil/avutil.h>
31 #include <libavutil/buffer.h>
32 #include <libavutil/channel_layout.h>
33 #include <libavutil/common.h>
34 #include <libavutil/dict.h>
35 #include <libavutil/error.h>
36 #include <libavutil/frame.h>
37 #include <libavutil/hwcontext.h>
38 #include <libavutil/mathematics.h>
39 #include <libavutil/pixdesc.h>
40 #include <libavutil/pixfmt.h>
41 #include <libavutil/rational.h>
42 #include <libavutil/samplefmt.h>
43 #include <libavutil/version.h>
44 #include <libswresample/swresample.h>
45 #include <libswscale/swscale.h>
46 }
47
48 #include <chrono>
49 #include <cstdint>
50 #include <utility>
51 #include <vector>
52 #include <unordered_set>
53
54 #include <Eigen/Core>
55 #include <Eigen/LU>
56 #include <movit/colorspace_conversion_effect.h>
57
58 #include "bmusb/bmusb.h"
59 #include "shared/ffmpeg_raii.h"
60 #include "ffmpeg_util.h"
61 #include "flags.h"
62 #include "ref_counted_frame.h"
63 #include "shared/timebase.h"
64
65 #ifdef HAVE_SRT
66 #include <srt/srt.h>
67 #endif
68
69 using namespace std;
70 using namespace std::chrono;
71 using namespace bmusb;
72 using namespace movit;
73 using namespace Eigen;
74
75 // Avoid deprecation warnings, but we don't want to drop FFmpeg 5.1 support just yet.
76 #if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 30, 100)
77 #define pkt_duration duration
78 #endif
79
80 namespace {
81
82 steady_clock::time_point compute_frame_start(int64_t frame_pts, int64_t pts_origin, const AVRational &video_timebase, const steady_clock::time_point &origin, double rate)
83 {
84         const duration<double> pts((frame_pts - pts_origin) * double(video_timebase.num) / double(video_timebase.den));
85         return origin + duration_cast<steady_clock::duration>(pts / rate);
86 }
87
88 bool changed_since(const std::string &pathname, const timespec &ts)
89 {
90         if (ts.tv_sec < 0) {
91                 return false;
92         }
93         struct stat buf;
94         if (stat(pathname.c_str(), &buf) != 0) {
95                 fprintf(stderr, "%s: Couldn't check for new version, leaving the old in place.\n", pathname.c_str());
96                 return false;
97         }
98         return (buf.st_mtim.tv_sec != ts.tv_sec || buf.st_mtim.tv_nsec != ts.tv_nsec);
99 }
100
101 bool is_full_range(const AVPixFmtDescriptor *desc)
102 {
103         // This is horrible, but there's no better way that I know of.
104         return (strchr(desc->name, 'j') != nullptr);
105 }
106
107 AVPixelFormat decide_dst_format(AVPixelFormat src_format, bmusb::PixelFormat dst_format_type)
108 {
109         if (dst_format_type == bmusb::PixelFormat_8BitBGRA) {
110                 return AV_PIX_FMT_BGRA;
111         }
112         if (dst_format_type == FFmpegCapture::PixelFormat_NV12) {
113                 return AV_PIX_FMT_NV12;
114         }
115
116         assert(dst_format_type == bmusb::PixelFormat_8BitYCbCrPlanar);
117
118         // If this is a non-Y'CbCr format, just convert to 4:4:4 Y'CbCr
119         // and be done with it. It's too strange to spend a lot of time on.
120         // (Let's hope there's no alpha.)
121         const AVPixFmtDescriptor *src_desc = av_pix_fmt_desc_get(src_format);
122         if (src_desc == nullptr ||
123             src_desc->nb_components != 3 ||
124             (src_desc->flags & AV_PIX_FMT_FLAG_RGB)) {
125                 return AV_PIX_FMT_YUV444P;
126         }
127
128         // The best for us would be Cb and Cr together if possible,
129         // but FFmpeg doesn't support that except in the special case of
130         // NV12, so we need to go to planar even for the case of NV12.
131         // Thus, look for the closest (but no worse) 8-bit planar Y'CbCr format
132         // that matches in color range. (This will also include the case of
133         // the source format already being acceptable.)
134         bool src_full_range = is_full_range(src_desc);
135         const char *best_format = "yuv444p";
136         unsigned best_score = numeric_limits<unsigned>::max();
137         for (const AVPixFmtDescriptor *desc = av_pix_fmt_desc_next(nullptr);
138              desc;
139              desc = av_pix_fmt_desc_next(desc)) {
140                 // Find planar Y'CbCr formats only.
141                 if (desc->nb_components != 3) continue;
142                 if (desc->flags & AV_PIX_FMT_FLAG_RGB) continue;
143                 if (!(desc->flags & AV_PIX_FMT_FLAG_PLANAR)) continue;
144                 if (desc->comp[0].plane != 0 ||
145                     desc->comp[1].plane != 1 ||
146                     desc->comp[2].plane != 2) continue;
147
148                 // 8-bit formats only.
149                 if (desc->flags & AV_PIX_FMT_FLAG_BE) continue;
150                 if (desc->comp[0].depth != 8) continue;
151
152                 // Same or better chroma resolution only.
153                 int chroma_w_diff = src_desc->log2_chroma_w - desc->log2_chroma_w;
154                 int chroma_h_diff = src_desc->log2_chroma_h - desc->log2_chroma_h;
155                 if (chroma_w_diff < 0 || chroma_h_diff < 0)
156                         continue;
157
158                 // Matching full/limited range only.
159                 if (is_full_range(desc) != src_full_range)
160                         continue;
161
162                 // Pick something with as little excess chroma resolution as possible.
163                 unsigned score = (1 << (chroma_w_diff)) << chroma_h_diff;
164                 if (score < best_score) {
165                         best_score = score;
166                         best_format = desc->name;
167                 }
168         }
169         return av_get_pix_fmt(best_format);
170 }
171
172 YCbCrFormat decode_ycbcr_format(const AVPixFmtDescriptor *desc, const AVFrame *frame, bool is_mjpeg, AVColorSpace *last_colorspace, AVChromaLocation *last_chroma_location)
173 {
174         YCbCrFormat format;
175         AVColorSpace colorspace = frame->colorspace;
176         switch (colorspace) {
177         case AVCOL_SPC_BT709:
178                 format.luma_coefficients = YCBCR_REC_709;
179                 break;
180         case AVCOL_SPC_BT470BG:
181         case AVCOL_SPC_SMPTE170M:
182         case AVCOL_SPC_SMPTE240M:
183                 format.luma_coefficients = YCBCR_REC_601;
184                 break;
185         case AVCOL_SPC_BT2020_NCL:
186                 format.luma_coefficients = YCBCR_REC_2020;
187                 break;
188         case AVCOL_SPC_UNSPECIFIED:
189                 format.luma_coefficients = (frame->height >= 720 ? YCBCR_REC_709 : YCBCR_REC_601);
190                 break;
191         default:
192                 if (colorspace != *last_colorspace) {
193                         fprintf(stderr, "Unknown Y'CbCr coefficient enum %d from FFmpeg; choosing Rec. 709.\n",
194                                 colorspace);
195                 }
196                 format.luma_coefficients = YCBCR_REC_709;
197                 break;
198         }
199         *last_colorspace = colorspace;
200
201         format.full_range = is_full_range(desc);
202         format.num_levels = 1 << desc->comp[0].depth;
203         format.chroma_subsampling_x = 1 << desc->log2_chroma_w;
204         format.chroma_subsampling_y = 1 << desc->log2_chroma_h;
205
206         switch (frame->chroma_location) {
207         case AVCHROMA_LOC_LEFT:
208                 format.cb_x_position = 0.0;
209                 format.cb_y_position = 0.5;
210                 break;
211         case AVCHROMA_LOC_CENTER:
212                 format.cb_x_position = 0.5;
213                 format.cb_y_position = 0.5;
214                 break;
215         case AVCHROMA_LOC_TOPLEFT:
216                 format.cb_x_position = 0.0;
217                 format.cb_y_position = 0.0;
218                 break;
219         case AVCHROMA_LOC_TOP:
220                 format.cb_x_position = 0.5;
221                 format.cb_y_position = 0.0;
222                 break;
223         case AVCHROMA_LOC_BOTTOMLEFT:
224                 format.cb_x_position = 0.0;
225                 format.cb_y_position = 1.0;
226                 break;
227         case AVCHROMA_LOC_BOTTOM:
228                 format.cb_x_position = 0.5;
229                 format.cb_y_position = 1.0;
230                 break;
231         default:
232                 if (frame->chroma_location != *last_chroma_location) {
233                         fprintf(stderr, "Unknown chroma location coefficient enum %d from FFmpeg; choosing center.\n",
234                                 frame->chroma_location);
235                 }
236                 format.cb_x_position = 0.5;
237                 format.cb_y_position = 0.5;
238                 break;
239         }
240         *last_chroma_location = frame->chroma_location;
241
242         if (is_mjpeg && !format.full_range) {
243                 // Limited-range MJPEG is only detected by FFmpeg whenever a special
244                 // JPEG comment is set, which means that in practice, the stream is
245                 // almost certainly generated by Futatabi. Override FFmpeg's forced
246                 // MJPEG defaults (it disregards the values set in the mux) with what
247                 // Futatabi sets.
248                 format.luma_coefficients = YCBCR_REC_709;
249                 format.cb_x_position = 0.0;
250                 format.cb_y_position = 0.5;
251         }
252
253         format.cr_x_position = format.cb_x_position;
254         format.cr_y_position = format.cb_y_position;
255         return format;
256 }
257
258 RGBTriplet get_neutral_color(AVDictionary *metadata)
259 {
260         if (metadata == nullptr) {
261                 return RGBTriplet(1.0f, 1.0f, 1.0f);
262         }
263         AVDictionaryEntry *entry = av_dict_get(metadata, "WhitePoint", nullptr, 0);
264         if (entry == nullptr) {
265                 return RGBTriplet(1.0f, 1.0f, 1.0f);
266         }
267
268         unsigned x_nom, x_den, y_nom, y_den;
269         if (sscanf(entry->value, " %u:%u , %u:%u", &x_nom, &x_den, &y_nom, &y_den) != 4) {
270                 fprintf(stderr, "WARNING: Unable to parse white point '%s', using default white point\n", entry->value);
271                 return RGBTriplet(1.0f, 1.0f, 1.0f);
272         }
273
274         double x = double(x_nom) / x_den;
275         double y = double(y_nom) / y_den;
276         double z = 1.0 - x - y;
277
278         Matrix3d rgb_to_xyz_matrix = movit::ColorspaceConversionEffect::get_xyz_matrix(COLORSPACE_sRGB);
279         Vector3d rgb = rgb_to_xyz_matrix.inverse() * Vector3d(x, y, z);
280
281         return RGBTriplet(rgb[0], rgb[1], rgb[2]);
282 }
283
284 }  // namespace
285
286 FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
287         : filename(filename), width(width), height(height), video_timebase{1, 1}
288 {
289         description = "Video: " + filename;
290
291         last_frame = steady_clock::now();
292
293         avformat_network_init();  // In case someone wants this.
294 }
295
296 #ifdef HAVE_SRT
297 FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id)
298         : srt_sock(srt_sock),
299           width(0),  // Don't resize; SRT streams typically have stable resolution, and should behave much like regular cards in general.
300           height(0),
301           pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar),
302           video_timebase{1, 1}
303 {
304         if (stream_id.empty()) {
305                 description = "SRT stream";
306         } else {
307                 description = stream_id;
308         }
309         play_as_fast_as_possible = true;
310         play_once = true;
311         last_frame = steady_clock::now();
312 }
313 #endif
314
315 FFmpegCapture::~FFmpegCapture()
316 {
317         if (has_dequeue_callbacks) {
318                 dequeue_cleanup_callback();
319         }
320         swr_free(&resampler);
321 #ifdef HAVE_SRT
322         if (srt_sock != -1) {
323                 srt_close(srt_sock);
324         }
325 #endif
326 }
327
328 void FFmpegCapture::configure_card()
329 {
330         if (video_frame_allocator == nullptr) {
331                 owned_video_frame_allocator.reset(new MallocFrameAllocator(FRAME_SIZE, NUM_QUEUED_VIDEO_FRAMES));
332                 set_video_frame_allocator(owned_video_frame_allocator.get());
333         }
334         if (audio_frame_allocator == nullptr) {
335                 // Audio can come out in pretty large chunks, so increase from the default 1 MB.
336                 owned_audio_frame_allocator.reset(new MallocFrameAllocator(1 << 20, NUM_QUEUED_AUDIO_FRAMES));
337                 set_audio_frame_allocator(owned_audio_frame_allocator.get());
338         }
339 }
340
341 void FFmpegCapture::start_bm_capture()
342 {
343         if (running) {
344                 return;
345         }
346         running = true;
347         producer_thread_should_quit.unquit();
348         producer_thread = thread(&FFmpegCapture::producer_thread_func, this);
349 }
350
351 void FFmpegCapture::stop_dequeue_thread()
352 {
353         if (!running) {
354                 return;
355         }
356         running = false;
357         producer_thread_should_quit.quit();
358         producer_thread.join();
359 }
360
361 std::map<uint32_t, VideoMode> FFmpegCapture::get_available_video_modes() const
362 {
363         // Note: This will never really be shown in the UI.
364         VideoMode mode;
365
366         char buf[256];
367         snprintf(buf, sizeof(buf), "%ux%u", sws_last_width, sws_last_height);
368         mode.name = buf;
369         
370         mode.autodetect = false;
371         mode.width = sws_last_width;
372         mode.height = sws_last_height;
373         mode.frame_rate_num = 60;
374         mode.frame_rate_den = 1;
375         mode.interlaced = false;
376
377         return {{ 0, mode }};
378 }
379
380 void FFmpegCapture::producer_thread_func()
381 {
382         char thread_name[16];
383         snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
384         pthread_setname_np(pthread_self(), thread_name);
385
386         while (!producer_thread_should_quit.should_quit()) {
387                 string filename_copy;
388                 {
389                         lock_guard<mutex> lock(filename_mu);
390                         filename_copy = filename;
391                 }
392
393                 string pathname;
394                 if (srt_sock == -1) {
395                         pathname = search_for_file(filename_copy);
396                 } else {
397                         pathname = description;
398                 }
399                 if (pathname.empty()) {
400                         send_disconnected_frame();
401                         if (play_once) {
402                                 break;
403                         }
404                         producer_thread_should_quit.sleep_for(seconds(1));
405                         fprintf(stderr, "%s not found, sleeping one second and trying again...\n", filename_copy.c_str());
406                         continue;
407                 }
408                 should_interrupt = false;
409                 if (!play_video(pathname)) {
410                         // Error.
411                         send_disconnected_frame();
412                         if (play_once) {
413                                 break;
414                         }
415                         fprintf(stderr, "Error when playing %s, sleeping one second and trying again...\n", pathname.c_str());
416                         producer_thread_should_quit.sleep_for(seconds(1));
417                         continue;
418                 }
419
420                 if (play_once) {
421                         send_disconnected_frame();
422                         break;
423                 }
424
425                 // Probably just EOF, will exit the loop above on next test.
426         }
427
428         if (has_dequeue_callbacks) {
429                 dequeue_cleanup_callback();
430                 has_dequeue_callbacks = false;
431         }
432 }
433
434 void FFmpegCapture::send_disconnected_frame()
435 {
436         // Send an empty frame to signal that we have no signal anymore.
437         FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame();
438         size_t frame_width = width == 0 ? global_flags.width : width;
439         size_t frame_height = height == 0 ? global_flags.height : height;
440         if (video_frame.data) {
441                 VideoFormat video_format;
442                 video_format.width = frame_width;
443                 video_format.height = frame_height;
444                 video_format.frame_rate_nom = 60;
445                 video_format.frame_rate_den = 1;
446                 video_format.is_connected = false;
447                 if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
448                         video_format.stride = frame_width * 4;
449                         video_frame.len = frame_width * frame_height * 4;
450                         memset(video_frame.data, 0, video_frame.len);
451                 } else {
452                         video_format.stride = frame_width;
453                         current_frame_ycbcr_format.luma_coefficients = YCBCR_REC_709;
454                         current_frame_ycbcr_format.full_range = true;
455                         current_frame_ycbcr_format.num_levels = 256;
456                         current_frame_ycbcr_format.chroma_subsampling_x = 2;
457                         current_frame_ycbcr_format.chroma_subsampling_y = 2;
458                         current_frame_ycbcr_format.cb_x_position = 0.0f;
459                         current_frame_ycbcr_format.cb_y_position = 0.0f;
460                         current_frame_ycbcr_format.cr_x_position = 0.0f;
461                         current_frame_ycbcr_format.cr_y_position = 0.0f;
462                         video_frame.len = frame_width * frame_height * 2;
463                         memset(video_frame.data, 0, frame_width * frame_height);
464                         memset(video_frame.data + frame_width * frame_height, 128, frame_width * frame_height);  // Valid for both NV12 and planar.
465                 }
466
467                 if (frame_callback != nullptr) {
468                         frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
469                                 video_frame, /*video_offset=*/0, video_format,
470                                 FrameAllocator::Frame(), /*audio_offset=*/0, AudioFormat());
471                 }
472                 last_frame_was_connected = false;
473         }
474
475         if (play_once) {
476                 disconnected = true;
477                 if (card_disconnected_callback != nullptr) {
478                         card_disconnected_callback();
479                 }
480         }
481 }
482
483 template<AVHWDeviceType type>
484 AVPixelFormat get_hw_format(AVCodecContext *ctx, const AVPixelFormat *fmt)
485 {
486         bool found_config_of_right_type = false;
487         for (int i = 0;; ++i) {  // Termination condition inside loop.
488                 const AVCodecHWConfig *config = avcodec_get_hw_config(ctx->codec, i);
489                 if (config == nullptr) {  // End of list.
490                         break;
491                 }
492                 if (!(config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) ||
493                     config->device_type != type) {
494                         // Not interesting for us.
495                         continue;
496                 }
497
498                 // We have a config of the right type, but does it actually support
499                 // the pixel format we want? (Seemingly, FFmpeg's way of signaling errors
500                 // is to just replace the pixel format with a software-decoded one,
501                 // such as yuv420p.)
502                 found_config_of_right_type = true;
503                 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
504                         if (config->pix_fmt == *fmt_ptr) {
505                                 fprintf(stderr, "Initialized '%s' hardware decoding for codec '%s'.\n",
506                                         av_hwdevice_get_type_name(type), ctx->codec->name);
507                                 if (ctx->profile == FF_PROFILE_H264_BASELINE) {
508                                         fprintf(stderr, "WARNING: Stream claims to be H.264 Baseline, which is generally poorly supported in hardware decoders.\n");
509                                         fprintf(stderr, "         Consider encoding it as Constrained Baseline, Main or High instead.\n");
510                                         fprintf(stderr, "         Decoding might fail and fall back to software.\n");
511                                 }
512                                 return config->pix_fmt;
513                         }
514                 }
515                 fprintf(stderr, "Decoder '%s' supports only these pixel formats:", ctx->codec->name);
516                 unordered_set<AVPixelFormat> seen;
517                 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
518                         if (!seen.count(*fmt_ptr)) {
519                                 fprintf(stderr, " %s", av_get_pix_fmt_name(*fmt_ptr));
520                                 seen.insert(*fmt_ptr);
521                         }
522                 }
523                 fprintf(stderr, " (wanted %s for hardware acceleration)\n", av_get_pix_fmt_name(config->pix_fmt));
524
525         }
526
527         if (!found_config_of_right_type) {
528                 fprintf(stderr, "Decoder '%s' does not support device type '%s'.\n", ctx->codec->name, av_hwdevice_get_type_name(type));
529         }
530
531         // We found no VA-API formats, so take the first software format.
532         for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
533                 if ((av_pix_fmt_desc_get(*fmt_ptr)->flags & AV_PIX_FMT_FLAG_HWACCEL) == 0) {
534                         fprintf(stderr, "Falling back to software format %s.\n", av_get_pix_fmt_name(*fmt_ptr));
535                         return *fmt_ptr;
536                 }
537         }
538
539         // Fallback: Just return anything. (Should never really happen.)
540         return fmt[0];
541 }
542
543 bool FFmpegCapture::play_video(const string &pathname)
544 {
545         // Note: Call before open, not after; otherwise, there's a race.
546         // (There is now, too, but it tips the correct way. We could use fstat()
547         // if we had the file descriptor.)
548         timespec last_modified;
549         struct stat buf;
550         if (stat(pathname.c_str(), &buf) != 0) {
551                 // Probably some sort of protocol, so can't stat.
552                 last_modified.tv_sec = -1;
553         } else {
554                 last_modified = buf.st_mtim;
555         }
556         last_colorspace = static_cast<AVColorSpace>(-1);
557         last_chroma_location = static_cast<AVChromaLocation>(-1);
558
559         AVFormatContextWithCloser format_ctx;
560         if (srt_sock == -1) {
561                 // Regular file (or stream).
562                 frame_timeout_started = steady_clock::now();
563                 frame_timeout_valid = true;
564                 format_ctx = avformat_open_input_unique(pathname.c_str(), /*fmt=*/nullptr,
565                         /*options=*/nullptr,
566                         AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
567                 frame_timeout_valid = false;
568         } else {
569 #ifdef HAVE_SRT
570                 // SRT socket, already opened.
571                 const AVInputFormat *mpegts_fmt = av_find_input_format("mpegts");
572                 format_ctx = avformat_open_input_unique(&FFmpegCapture::read_srt_thunk, this,
573                         mpegts_fmt, /*options=*/nullptr,
574                         AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
575 #else
576                 assert(false);
577 #endif
578         }
579         if (format_ctx == nullptr) {
580                 fprintf(stderr, "%s: Error opening file\n", pathname.c_str());
581                 return false;
582         }
583
584         if (avformat_find_stream_info(format_ctx.get(), nullptr) < 0) {
585                 fprintf(stderr, "%s: Error finding stream info\n", pathname.c_str());
586                 return false;
587         }
588
589         int video_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_VIDEO);
590         if (video_stream_index == -1) {
591                 fprintf(stderr, "%s: No video stream found\n", pathname.c_str());
592                 return false;
593         }
594
595         int audio_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_AUDIO);
596         int subtitle_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_SUBTITLE);
597         has_last_subtitle = false;
598
599         // Open video decoder.
600         const AVCodecParameters *video_codecpar = format_ctx->streams[video_stream_index]->codecpar;
601         const AVCodec *video_codec = avcodec_find_decoder(video_codecpar->codec_id);
602
603         video_timebase = format_ctx->streams[video_stream_index]->time_base;
604         AVCodecContextWithDeleter video_codec_ctx = avcodec_alloc_context3_unique(nullptr);
605         if (avcodec_parameters_to_context(video_codec_ctx.get(), video_codecpar) < 0) {
606                 fprintf(stderr, "%s: Cannot fill video codec parameters\n", pathname.c_str());
607                 return false;
608         }
609         if (video_codec == nullptr) {
610                 fprintf(stderr, "%s: Cannot find video decoder\n", pathname.c_str());
611                 return false;
612         }
613
614         // Seemingly, it's not too easy to make something that just initializes
615         // “whatever goes”, so we don't get CUDA or VULKAN or whatever here
616         // without enumerating through several different types.
617         // VA-API and VDPAU will do for now. We prioritize VDPAU for the
618         // simple reason that there's a VA-API-via-VDPAU emulation for NVidia
619         // cards that seems to work, but just hangs when trying to transfer the frame.
620         //
621         // Note that we don't actually check codec support beforehand,
622         // so if you have a low-end VDPAU device but a high-end VA-API device,
623         // you lose out on the extra codec support from the latter.
624         AVBufferRef *hw_device_ctx = nullptr;
625         if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VDPAU, nullptr, nullptr, 0) >= 0) {
626                 video_codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
627                 video_codec_ctx->get_format = get_hw_format<AV_HWDEVICE_TYPE_VDPAU>;
628         } else if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, nullptr, nullptr, 0) >= 0) {
629                 video_codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
630                 video_codec_ctx->get_format = get_hw_format<AV_HWDEVICE_TYPE_VAAPI>;
631         } else {
632                 fprintf(stderr, "Failed to initialize VA-API or VDPAU for FFmpeg acceleration. Decoding video in software.\n");
633         }
634
635         if (avcodec_open2(video_codec_ctx.get(), video_codec, nullptr) < 0) {
636                 fprintf(stderr, "%s: Cannot open video decoder\n", pathname.c_str());
637                 return false;
638         }
639         unique_ptr<AVCodecContext, decltype(avcodec_close)*> video_codec_ctx_cleanup(
640                 video_codec_ctx.get(), avcodec_close);
641
642         // Used in decode_ycbcr_format().
643         is_mjpeg = video_codecpar->codec_id == AV_CODEC_ID_MJPEG;
644
645         // Open audio decoder, if we have audio.
646         AVCodecContextWithDeleter audio_codec_ctx;
647         if (audio_stream_index != -1) {
648                 audio_codec_ctx = avcodec_alloc_context3_unique(nullptr);
649                 const AVCodecParameters *audio_codecpar = format_ctx->streams[audio_stream_index]->codecpar;
650                 audio_timebase = format_ctx->streams[audio_stream_index]->time_base;
651                 if (avcodec_parameters_to_context(audio_codec_ctx.get(), audio_codecpar) < 0) {
652                         fprintf(stderr, "%s: Cannot fill audio codec parameters\n", pathname.c_str());
653                         return false;
654                 }
655                 const AVCodec *audio_codec = avcodec_find_decoder(audio_codecpar->codec_id);
656                 if (audio_codec == nullptr) {
657                         fprintf(stderr, "%s: Cannot find audio decoder\n", pathname.c_str());
658                         return false;
659                 }
660                 if (avcodec_open2(audio_codec_ctx.get(), audio_codec, nullptr) < 0) {
661                         fprintf(stderr, "%s: Cannot open audio decoder\n", pathname.c_str());
662                         return false;
663                 }
664         }
665         unique_ptr<AVCodecContext, decltype(avcodec_close)*> audio_codec_ctx_cleanup(
666                 audio_codec_ctx.get(), avcodec_close);
667
668         internal_rewind();
669
670         // Main loop.
671         bool first_frame = true;
672         int consecutive_errors = 0;
673         while (!producer_thread_should_quit.should_quit()) {
674                 if (process_queued_commands(format_ctx.get(), pathname, last_modified, /*rewound=*/nullptr)) {
675                         return true;
676                 }
677                 if (should_interrupt.load()) {
678                         // Check as a failsafe, so that we don't need to rely on avio if we don't have to.
679                         return false;
680                 }
681                 UniqueFrame audio_frame = audio_frame_allocator->alloc_frame();
682                 AudioFormat audio_format;
683
684                 int64_t audio_pts;
685                 bool error;
686                 frame_timeout_started = steady_clock::now();
687                 frame_timeout_valid = true;
688                 AVFrameWithDeleter frame = decode_frame(format_ctx.get(), video_codec_ctx.get(), audio_codec_ctx.get(),
689                         pathname, video_stream_index, audio_stream_index, subtitle_stream_index, audio_frame.get(), &audio_format, &audio_pts, &error);
690                 frame_timeout_valid = false;
691                 if (should_interrupt.load()) {
692                         // Abort no matter whether we got a frame or not.
693                         return false;
694                 }
695                 if (error) {
696                         if (++consecutive_errors >= 100) {
697                                 fprintf(stderr, "More than 100 consecutive error video frames, aborting playback.\n");
698                                 return false;
699                         } else {
700                                 continue;
701                         }
702                 } else {
703                         consecutive_errors = 0;
704                 }
705                 if (frame == nullptr) {
706                         // EOF. Loop back to the start if we can.
707                         if (format_ctx->pb != nullptr && format_ctx->pb->seekable == 0) {
708                                 // Not seekable (but seemingly, sometimes av_seek_frame() would return 0 anyway,
709                                 // so don't try).
710                                 return true;
711                         }
712                         if (av_seek_frame(format_ctx.get(), /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
713                                 fprintf(stderr, "%s: Rewind failed, not looping.\n", pathname.c_str());
714                                 return true;
715                         }
716                         if (video_codec_ctx != nullptr) {
717                                 avcodec_flush_buffers(video_codec_ctx.get());
718                         }
719                         if (audio_codec_ctx != nullptr) {
720                                 avcodec_flush_buffers(audio_codec_ctx.get());
721                         }
722                         // If the file has changed since last time, return to get it reloaded.
723                         // Note that depending on how you move the file into place, you might
724                         // end up corrupting the one you're already playing, so this path
725                         // might not trigger.
726                         if (changed_since(pathname, last_modified)) {
727                                 return true;
728                         }
729                         internal_rewind();
730                         continue;
731                 }
732
733                 VideoFormat video_format = construct_video_format(frame.get(), video_timebase);
734                 if (video_format.frame_rate_nom == 0 || video_format.frame_rate_den == 0) {
735                         // Invalid frame rate; try constructing it from the previous frame length.
736                         // (This is especially important if we are the master card, for SRT,
737                         // since it affects audio. Not all senders have good timebases
738                         // (e.g., Larix rounds first to timebase 1000 and then multiplies by
739                         // 90 from there, it seems), but it's much better to have an oscillating
740                         // value than just locking at 60.
741                         if (last_pts != 0 && frame->pts > last_pts) {
742                                 int64_t pts_diff = frame->pts - last_pts;
743                                 video_format.frame_rate_nom = video_timebase.den;
744                                 video_format.frame_rate_den = video_timebase.num * pts_diff;
745                         } else {
746                                 video_format.frame_rate_nom = 60;
747                                 video_format.frame_rate_den = 1;
748                         }
749                 }
750                 UniqueFrame video_frame = make_video_frame(frame.get(), pathname, &error);
751                 if (error) {
752                         return false;
753                 }
754
755                 for ( ;; ) {
756                         if (last_pts == 0 && pts_origin == 0) {
757                                 pts_origin = frame->pts;        
758                         }
759                         steady_clock::time_point now = steady_clock::now();
760                         if (play_as_fast_as_possible) {
761                                 video_frame->received_timestamp = now;
762                                 audio_frame->received_timestamp = now;
763                                 next_frame_start = now;
764                         } else {
765                                 next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
766                                 if (first_frame && last_frame_was_connected) {
767                                         // If reconnect took more than one second, this is probably a live feed,
768                                         // and we should reset the resampler. (Or the rate is really, really low,
769                                         // in which case a reset on the first frame is fine anyway.)
770                                         if (duration<double>(next_frame_start - last_frame).count() >= 1.0) {
771                                                 last_frame_was_connected = false;
772                                         }
773                                 }
774                                 video_frame->received_timestamp = next_frame_start;
775
776                                 // The easiest way to get all the rate conversions etc. right is to move the
777                                 // audio PTS into the video PTS timebase and go from there. (We'll get some
778                                 // rounding issues, but they should not be a big problem.)
779                                 int64_t audio_pts_as_video_pts = av_rescale_q(audio_pts, audio_timebase, video_timebase);
780                                 audio_frame->received_timestamp = compute_frame_start(audio_pts_as_video_pts, pts_origin, video_timebase, start, rate);
781
782                                 if (audio_frame->len != 0) {
783                                         // The received timestamps in Nageru are measured after we've just received the frame.
784                                         // However, pts (especially audio pts) is at the _beginning_ of the frame.
785                                         // If we have locked audio, the distinction doesn't really matter, as pts is
786                                         // on a relative scale and a fixed offset is fine. But if we don't, we will have
787                                         // a different number of samples each time, which will cause huge audio jitter
788                                         // and throw off the resampler.
789                                         //
790                                         // In a sense, we should have compensated by adding the frame and audio lengths
791                                         // to video_frame->received_timestamp and audio_frame->received_timestamp respectively,
792                                         // but that would mean extra waiting in sleep_until(). All we need is that they
793                                         // are correct relative to each other, though (and to the other frames we send),
794                                         // so just align the end of the audio frame, and we're fine.
795                                         size_t num_samples = (audio_frame->len * 8) / audio_format.bits_per_sample / audio_format.num_channels;
796                                         double offset = double(num_samples) / OUTPUT_FREQUENCY -
797                                                 double(video_format.frame_rate_den) / video_format.frame_rate_nom;
798                                         audio_frame->received_timestamp += duration_cast<steady_clock::duration>(duration<double>(offset));
799                                 }
800
801                                 if (duration<double>(now - next_frame_start).count() >= 0.1) {
802                                         // If we don't have enough CPU to keep up, or if we have a live stream
803                                         // where the initial origin was somehow wrong, we could be behind indefinitely.
804                                         // In particular, this will give the audio resampler problems as it tries
805                                         // to speed up to reduce the delay, hitting the low end of the buffer every time.
806                                         fprintf(stderr, "%s: Playback %.0f ms behind, resetting time scale\n",
807                                                 pathname.c_str(),
808                                                 1e3 * duration<double>(now - next_frame_start).count());
809                                         pts_origin = frame->pts;
810                                         start = next_frame_start = now;
811                                         timecode += TYPICAL_FPS * 2 + 1;
812                                 }
813                         }
814                         bool finished_wakeup;
815                         if (play_as_fast_as_possible) {
816                                 finished_wakeup = !producer_thread_should_quit.should_quit();
817                         } else {
818                                 finished_wakeup = producer_thread_should_quit.sleep_until(next_frame_start);
819                         }
820                         if (finished_wakeup) {
821                                 if (audio_frame->len > 0) {
822                                         assert(audio_pts != -1);
823                                 }
824                                 if (!last_frame_was_connected) {
825                                         // We're recovering from an error (or really slow load, see above).
826                                         // Make sure to get the audio resampler reset. (This is a hack;
827                                         // ideally, the frame callback should just accept a way to signal
828                                         // audio discontinuity.)
829                                         timecode += TYPICAL_FPS * 2 + 1;
830                                 }
831                                 last_neutral_color = get_neutral_color(frame->metadata);
832                                 if (frame_callback != nullptr) {
833                                         frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
834                                                 video_frame.get_and_release(), 0, video_format,
835                                                 audio_frame.get_and_release(), 0, audio_format);
836                                 }
837                                 first_frame = false;
838                                 last_frame = steady_clock::now();
839                                 last_frame_was_connected = true;
840                                 break;
841                         } else {
842                                 if (producer_thread_should_quit.should_quit()) break;
843
844                                 bool rewound = false;
845                                 if (process_queued_commands(format_ctx.get(), pathname, last_modified, &rewound)) {
846                                         return true;
847                                 }
848                                 // If we just rewound, drop this frame on the floor and be done.
849                                 if (rewound) {
850                                         break;
851                                 }
852                                 // OK, we didn't, so probably a rate change. Recalculate next_frame_start,
853                                 // but if it's now in the past, we'll reset the origin, so that we don't
854                                 // generate a huge backlog of frames that we need to run through quickly.
855                                 next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
856                                 steady_clock::time_point now = steady_clock::now();
857                                 if (next_frame_start < now) {
858                                         pts_origin = frame->pts;
859                                         start = next_frame_start = now;
860                                 }
861                         }
862                 }
863                 last_pts = frame->pts;
864         }
865         return true;
866 }
867
868 void FFmpegCapture::internal_rewind()
869 {                               
870         pts_origin = last_pts = 0;
871         start = next_frame_start = steady_clock::now();
872 }
873
874 bool FFmpegCapture::process_queued_commands(AVFormatContext *format_ctx, const std::string &pathname, timespec last_modified, bool *rewound)
875 {
876         // Process any queued commands from other threads.
877         vector<QueuedCommand> commands;
878         {
879                 lock_guard<mutex> lock(queue_mu);
880                 swap(commands, command_queue);
881         }
882         for (const QueuedCommand &cmd : commands) {
883                 switch (cmd.command) {
884                 case QueuedCommand::REWIND:
885                         if (av_seek_frame(format_ctx, /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
886                                 fprintf(stderr, "%s: Rewind failed, stopping play.\n", pathname.c_str());
887                         }
888                         // If the file has changed since last time, return to get it reloaded.
889                         // Note that depending on how you move the file into place, you might
890                         // end up corrupting the one you're already playing, so this path
891                         // might not trigger.
892                         if (changed_since(pathname, last_modified)) {
893                                 return true;
894                         }
895                         internal_rewind();
896                         if (rewound != nullptr) {
897                                 *rewound = true;
898                         }
899                         break;
900
901                 case QueuedCommand::CHANGE_RATE:
902                         // Change the origin to the last played frame.
903                         start = compute_frame_start(last_pts, pts_origin, video_timebase, start, rate);
904                         pts_origin = last_pts;
905                         rate = cmd.new_rate;
906                         play_as_fast_as_possible = (rate >= 10.0);
907                         break;
908                 }
909         }
910         return false;
911 }
912
913 namespace {
914
915 }  // namespace
916
917 AVFrameWithDeleter FFmpegCapture::decode_frame(AVFormatContext *format_ctx, AVCodecContext *video_codec_ctx, AVCodecContext *audio_codec_ctx,
918         const std::string &pathname, int video_stream_index, int audio_stream_index, int subtitle_stream_index,
919         FrameAllocator::Frame *audio_frame, AudioFormat *audio_format, int64_t *audio_pts, bool *error)
920 {
921         *error = false;
922
923         // Read packets until we have a frame or there are none left.
924         bool frame_finished = false;
925         AVFrameWithDeleter audio_avframe = av_frame_alloc_unique();
926         AVFrameWithDeleter video_avframe = av_frame_alloc_unique();
927         bool eof = false;
928         *audio_pts = -1;
929         bool has_audio = false;
930         do {
931                 AVPacketWithDeleter pkt = av_packet_alloc_unique();
932                 pkt->data = nullptr;
933                 pkt->size = 0;
934                 if (av_read_frame(format_ctx, pkt.get()) == 0) {
935                         if (pkt->stream_index == audio_stream_index && audio_callback != nullptr) {
936                                 audio_callback(pkt.get(), format_ctx->streams[audio_stream_index]->time_base);
937                         }
938                         if (pkt->stream_index == video_stream_index && video_callback != nullptr) {
939                                 video_callback(pkt.get(), format_ctx->streams[video_stream_index]->time_base);
940                         }
941                         if (pkt->stream_index == video_stream_index && global_flags.transcode_video) {
942                                 if (avcodec_send_packet(video_codec_ctx, pkt.get()) < 0) {
943                                         fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str());
944                                         *error = true;
945                                         return AVFrameWithDeleter(nullptr);
946                                 }
947                         } else if (pkt->stream_index == audio_stream_index && global_flags.transcode_audio) {
948                                 has_audio = true;
949                                 if (avcodec_send_packet(audio_codec_ctx, pkt.get()) < 0) {
950                                         fprintf(stderr, "%s: Cannot send packet to audio codec.\n", pathname.c_str());
951                                         *error = true;
952                                         return AVFrameWithDeleter(nullptr);
953                                 }
954                         } else if (pkt->stream_index == subtitle_stream_index) {
955                                 last_subtitle = string(reinterpret_cast<const char *>(pkt->data), pkt->size);
956                                 has_last_subtitle = true;
957                         }
958                 } else {
959                         eof = true;  // Or error, but ignore that for the time being.
960                 }
961
962                 // Decode audio, if any.
963                 if (has_audio) {
964                         for ( ;; ) {
965                                 int err = avcodec_receive_frame(audio_codec_ctx, audio_avframe.get());
966                                 if (err == 0) {
967                                         if (*audio_pts == -1) {
968                                                 *audio_pts = audio_avframe->pts;
969                                         }
970                                         convert_audio(audio_avframe.get(), audio_frame, audio_format);
971                                 } else if (err == AVERROR(EAGAIN)) {
972                                         break;
973                                 } else {
974                                         fprintf(stderr, "%s: Cannot receive frame from audio codec.\n", pathname.c_str());
975                                         *error = true;
976                                         return AVFrameWithDeleter(nullptr);
977                                 }
978                         }
979                 }
980
981                 // Decode video, if we have a frame.
982                 int err = avcodec_receive_frame(video_codec_ctx, video_avframe.get());
983                 if (err == 0) {
984                         if (video_avframe->format == AV_PIX_FMT_VAAPI ||
985                             video_avframe->format == AV_PIX_FMT_VDPAU) {
986                                 // Get the frame down to the CPU. (TODO: See if we can keep it
987                                 // on the GPU all the way, since it will be going up again later.
988                                 // However, this only works if the OpenGL GPU is the same one.)
989                                 AVFrameWithDeleter sw_frame = av_frame_alloc_unique();
990                                 int err = av_hwframe_transfer_data(sw_frame.get(), video_avframe.get(), 0);
991                                 if (err != 0) {
992                                         fprintf(stderr, "%s: Cannot transfer hardware video frame to software.\n", pathname.c_str());
993                                         *error = true;
994                                         return AVFrameWithDeleter(nullptr);
995                                 }
996                                 sw_frame->pts = video_avframe->pts;
997                                 sw_frame->pkt_duration = video_avframe->pkt_duration;
998                                 video_avframe = move(sw_frame);
999                         }
1000                         frame_finished = true;
1001                         break;
1002                 } else if (err != AVERROR(EAGAIN)) {
1003                         fprintf(stderr, "%s: Cannot receive frame from video codec.\n", pathname.c_str());
1004                         *error = true;
1005                         return AVFrameWithDeleter(nullptr);
1006                 }
1007         } while (!eof);
1008
1009         if (frame_finished)
1010                 return video_avframe;
1011         else
1012                 return AVFrameWithDeleter(nullptr);
1013 }
1014
1015 void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator::Frame *audio_frame, AudioFormat *audio_format)
1016 {
1017         // Decide on a format. If there already is one in this audio frame,
1018         // we're pretty much forced to use it. If not, we try to find an exact match.
1019         // If that still doesn't work, we default to 32-bit signed chunked
1020         // (float would be nice, but there's really no way to signal that yet).
1021         AVSampleFormat dst_format;
1022         if (audio_format->bits_per_sample == 0) {
1023                 switch (audio_avframe->format) {
1024                 case AV_SAMPLE_FMT_S16:
1025                 case AV_SAMPLE_FMT_S16P:
1026                         audio_format->bits_per_sample = 16;
1027                         dst_format = AV_SAMPLE_FMT_S16;
1028                         break;
1029                 case AV_SAMPLE_FMT_S32:
1030                 case AV_SAMPLE_FMT_S32P:
1031                 default:
1032                         audio_format->bits_per_sample = 32;
1033                         dst_format = AV_SAMPLE_FMT_S32;
1034                         break;
1035                 }
1036         } else if (audio_format->bits_per_sample == 16) {
1037                 dst_format = AV_SAMPLE_FMT_S16;
1038         } else if (audio_format->bits_per_sample == 32) {
1039                 dst_format = AV_SAMPLE_FMT_S32;
1040         } else {
1041                 assert(false);
1042         }
1043         audio_format->num_channels = 2;
1044
1045         AVChannelLayout channel_layout = audio_avframe->ch_layout;
1046         if (!av_channel_layout_check(&channel_layout) ||
1047             channel_layout.order == AV_CHANNEL_ORDER_UNSPEC) {
1048                 av_channel_layout_default(&channel_layout, audio_avframe->ch_layout.nb_channels);
1049         }
1050
1051         if (resampler == nullptr ||
1052             audio_avframe->format != last_src_format ||
1053             dst_format != last_dst_format ||
1054             av_channel_layout_compare(&channel_layout, &last_channel_layout) !=  0||
1055             audio_avframe->sample_rate != last_sample_rate) {
1056                 // TODO: When we get C++20, use AV_CHANNEL_LAYOUT_STEREO_DOWNMIX.
1057                 AVChannelLayout stereo_downmix;
1058                 stereo_downmix.order = AV_CHANNEL_ORDER_NATIVE;
1059                 stereo_downmix.nb_channels = 2;
1060                 stereo_downmix.u.mask = AV_CH_LAYOUT_STEREO_DOWNMIX;
1061
1062                 swr_free(&resampler);
1063                 resampler = nullptr;
1064                 int err = swr_alloc_set_opts2(&resampler,
1065                                               /*out_ch_layout=*/&stereo_downmix,
1066                                               /*out_sample_fmt=*/dst_format,
1067                                               /*out_sample_rate=*/OUTPUT_FREQUENCY,
1068                                               /*in_ch_layout=*/&channel_layout,
1069                                               /*in_sample_fmt=*/AVSampleFormat(audio_avframe->format),
1070                                               /*in_sample_rate=*/audio_avframe->sample_rate,
1071                                               /*log_offset=*/0,
1072                                               /*log_ctx=*/nullptr);
1073
1074                 if (err != 0 || resampler == nullptr) {
1075                         fprintf(stderr, "Allocating resampler failed.\n");
1076                         abort();
1077                 }
1078
1079                 if (swr_init(resampler) < 0) {
1080                         fprintf(stderr, "Could not open resample context.\n");
1081                         abort();
1082                 }
1083
1084                 last_src_format = AVSampleFormat(audio_avframe->format);
1085                 last_dst_format = dst_format;
1086                 last_channel_layout = channel_layout;
1087                 last_sample_rate = audio_avframe->sample_rate;
1088         }
1089
1090         size_t bytes_per_sample = (audio_format->bits_per_sample / 8) * 2;
1091         size_t num_samples_room = (audio_frame->size - audio_frame->len) / bytes_per_sample;
1092
1093         uint8_t *data = audio_frame->data + audio_frame->len;
1094         int out_samples = swr_convert(resampler, &data, num_samples_room,
1095                 const_cast<const uint8_t **>(audio_avframe->data), audio_avframe->nb_samples);
1096         if (out_samples < 0) {
1097                 fprintf(stderr, "Audio conversion failed.\n");
1098                 abort();
1099         }
1100
1101         audio_frame->len += out_samples * bytes_per_sample;
1102 }
1103
1104 VideoFormat FFmpegCapture::construct_video_format(const AVFrame *frame, AVRational video_timebase)
1105 {
1106         VideoFormat video_format;
1107         video_format.width = frame_width(frame);
1108         video_format.height = frame_height(frame);
1109         if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
1110                 video_format.stride = frame_width(frame) * 4;
1111         } else if (pixel_format == FFmpegCapture::PixelFormat_NV12) {
1112                 video_format.stride = frame_width(frame);
1113         } else {
1114                 assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
1115                 video_format.stride = frame_width(frame);
1116         }
1117         video_format.frame_rate_nom = video_timebase.den;
1118         video_format.frame_rate_den = frame->pkt_duration * video_timebase.num;
1119         video_format.has_signal = true;
1120         video_format.is_connected = true;
1121         return video_format;
1122 }
1123
1124 UniqueFrame FFmpegCapture::make_video_frame(const AVFrame *frame, const string &pathname, bool *error)
1125 {
1126         *error = false;
1127
1128         UniqueFrame video_frame(video_frame_allocator->alloc_frame());
1129         if (video_frame->data == nullptr) {
1130                 return video_frame;
1131         }
1132
1133         if (sws_ctx == nullptr ||
1134             sws_last_width != frame->width ||
1135             sws_last_height != frame->height ||
1136             sws_last_src_format != frame->format) {
1137                 sws_dst_format = decide_dst_format(AVPixelFormat(frame->format), pixel_format);
1138                 sws_ctx.reset(
1139                         sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format),
1140                                 frame_width(frame), frame_height(frame), sws_dst_format,
1141                                 SWS_BICUBIC, nullptr, nullptr, nullptr));
1142                 sws_last_width = frame->width;
1143                 sws_last_height = frame->height;
1144                 sws_last_src_format = frame->format;
1145         }
1146         if (sws_ctx == nullptr) {
1147                 fprintf(stderr, "%s: Could not create scaler context\n", pathname.c_str());
1148                 *error = true;
1149                 return video_frame;
1150         }
1151
1152         uint8_t *pic_data[4] = { nullptr, nullptr, nullptr, nullptr };
1153         int linesizes[4] = { 0, 0, 0, 0 };
1154         if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
1155                 pic_data[0] = video_frame->data;
1156                 linesizes[0] = frame_width(frame) * 4;
1157                 video_frame->len = (frame_width(frame) * 4) * frame_height(frame);
1158         } else if (pixel_format == PixelFormat_NV12) {
1159                 pic_data[0] = video_frame->data;
1160                 linesizes[0] = frame_width(frame);
1161
1162                 pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
1163                 linesizes[1] = frame_width(frame);
1164
1165                 video_frame->len = (frame_width(frame) * 2) * frame_height(frame);
1166
1167                 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
1168                 current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location);
1169         } else {
1170                 assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
1171                 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
1172
1173                 int chroma_width = AV_CEIL_RSHIFT(int(frame_width(frame)), desc->log2_chroma_w);
1174                 int chroma_height = AV_CEIL_RSHIFT(int(frame_height(frame)), desc->log2_chroma_h);
1175
1176                 pic_data[0] = video_frame->data;
1177                 linesizes[0] = frame_width(frame);
1178
1179                 pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
1180                 linesizes[1] = chroma_width;
1181
1182                 pic_data[2] = pic_data[1] + chroma_width * chroma_height;
1183                 linesizes[2] = chroma_width;
1184
1185                 video_frame->len = frame_width(frame) * frame_height(frame) + 2 * chroma_width * chroma_height;
1186
1187                 current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location);
1188         }
1189
1190         // FIXME: Currently, if the video is too high-res for one of the allocated
1191         // frames, we simply refuse to scale it here to avoid crashes. It would be better
1192         // if we could somehow signal getting larger frames, especially as 4K is a thing now.
1193         if (video_frame->len > FRAME_SIZE) {
1194                 fprintf(stderr, "%s: Decoded frame would be larger than supported FRAME_SIZE (%zu > %u), not decoding.\n", pathname.c_str(), video_frame->len, FRAME_SIZE);
1195                 *error = true;
1196                 return video_frame;
1197         }
1198
1199         sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes);
1200
1201         return video_frame;
1202 }
1203
1204 int FFmpegCapture::interrupt_cb_thunk(void *opaque)
1205 {
1206         return reinterpret_cast<FFmpegCapture *>(opaque)->interrupt_cb();
1207 }
1208
1209 int FFmpegCapture::interrupt_cb()
1210 {
1211         // If ten seconds is gone without anything happening, we assume that
1212         // we are in a network stream that died and FFmpeg just didn't
1213         // pick it up (or perhaps it just hung, keeping the connection open).
1214         // Called back approximately every 100 ms if something is hanging,
1215         // so we get more than enough accuracy for our purposes.
1216         if (!should_interrupt && frame_timeout_valid &&
1217             duration<double>(steady_clock::now() - frame_timeout_started).count() >= 10.0) {
1218                 string filename_copy;
1219                 {
1220                         lock_guard<mutex> lock(filename_mu);
1221                         filename_copy = filename;
1222                 }
1223                 fprintf(stderr, "%s: No frame for more than 10 seconds, restarting stream.\n", filename.c_str());
1224                 should_interrupt = true;
1225         }
1226         return should_interrupt.load();
1227 }
1228
1229 unsigned FFmpegCapture::frame_width(const AVFrame *frame) const
1230 {
1231         if (width == 0) {
1232                 return frame->width;
1233         } else {
1234                 return width;
1235         }
1236 }
1237
1238 unsigned FFmpegCapture::frame_height(const AVFrame *frame) const
1239 {
1240         if (height == 0) {
1241                 return frame->height;
1242         } else {
1243                 return height;
1244         }
1245 }
1246
1247 #ifdef HAVE_SRT
1248 int FFmpegCapture::read_srt_thunk(void *opaque, uint8_t *buf, int buf_size)
1249 {
1250         return reinterpret_cast<FFmpegCapture *>(opaque)->read_srt(buf, buf_size);
1251 }
1252
1253 int FFmpegCapture::read_srt(uint8_t *buf, int buf_size)
1254 {
1255         SRT_MSGCTRL mc = srt_msgctrl_default;
1256         return srt_recvmsg2(srt_sock, reinterpret_cast<char *>(buf), buf_size, &mc);
1257 }
1258 #endif