]> git.sesse.net Git - nageru/blob - nageru/ffmpeg_capture.cpp
1a9a295cd28d988233821294116ebf9ff2f50abf
[nageru] / nageru / ffmpeg_capture.cpp
1 #include "ffmpeg_capture.h"
2
3 #include <assert.h>
4 #include <pthread.h>
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11
12 extern "C" {
13 #include <libavcodec/avcodec.h>
14 #include <libavformat/avformat.h>
15 #include <libavutil/avutil.h>
16 #include <libavutil/error.h>
17 #include <libavutil/frame.h>
18 #include <libavutil/imgutils.h>
19 #include <libavutil/mem.h>
20 #include <libavutil/pixfmt.h>
21 #include <libavutil/opt.h>
22 #include <libswscale/swscale.h>
23 }
24
25 #include <chrono>
26 #include <cstdint>
27 #include <utility>
28 #include <vector>
29 #include <unordered_set>
30
31 #include <Eigen/Core>
32 #include <Eigen/LU>
33 #include <movit/colorspace_conversion_effect.h>
34
35 #include "bmusb/bmusb.h"
36 #include "shared/ffmpeg_raii.h"
37 #include "ffmpeg_util.h"
38 #include "flags.h"
39 #include "ref_counted_frame.h"
40 #include "shared/timebase.h"
41
42 #ifdef HAVE_SRT
43 #include <srt/srt.h>
44 #endif
45
46 using namespace std;
47 using namespace std::chrono;
48 using namespace bmusb;
49 using namespace movit;
50 using namespace Eigen;
51
52 // Avoid deprecation warnings, but we don't want to drop FFmpeg 5.1 support just yet.
53 #if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 30, 100)
54 #define pkt_duration duration
55 #endif
56
57 namespace {
58
59 steady_clock::time_point compute_frame_start(int64_t frame_pts, int64_t pts_origin, const AVRational &video_timebase, const steady_clock::time_point &origin, double rate)
60 {
61         const duration<double> pts((frame_pts - pts_origin) * double(video_timebase.num) / double(video_timebase.den));
62         return origin + duration_cast<steady_clock::duration>(pts / rate);
63 }
64
65 bool changed_since(const std::string &pathname, const timespec &ts)
66 {
67         if (ts.tv_sec < 0) {
68                 return false;
69         }
70         struct stat buf;
71         if (stat(pathname.c_str(), &buf) != 0) {
72                 fprintf(stderr, "%s: Couldn't check for new version, leaving the old in place.\n", pathname.c_str());
73                 return false;
74         }
75         return (buf.st_mtim.tv_sec != ts.tv_sec || buf.st_mtim.tv_nsec != ts.tv_nsec);
76 }
77
78 bool is_full_range(const AVPixFmtDescriptor *desc)
79 {
80         // This is horrible, but there's no better way that I know of.
81         return (strchr(desc->name, 'j') != nullptr);
82 }
83
84 AVPixelFormat decide_dst_format(AVPixelFormat src_format, bmusb::PixelFormat dst_format_type)
85 {
86         if (dst_format_type == bmusb::PixelFormat_8BitBGRA) {
87                 return AV_PIX_FMT_BGRA;
88         }
89         if (dst_format_type == FFmpegCapture::PixelFormat_NV12) {
90                 return AV_PIX_FMT_NV12;
91         }
92
93         assert(dst_format_type == bmusb::PixelFormat_8BitYCbCrPlanar);
94
95         // If this is a non-Y'CbCr format, just convert to 4:4:4 Y'CbCr
96         // and be done with it. It's too strange to spend a lot of time on.
97         // (Let's hope there's no alpha.)
98         const AVPixFmtDescriptor *src_desc = av_pix_fmt_desc_get(src_format);
99         if (src_desc == nullptr ||
100             src_desc->nb_components != 3 ||
101             (src_desc->flags & AV_PIX_FMT_FLAG_RGB)) {
102                 return AV_PIX_FMT_YUV444P;
103         }
104
105         // The best for us would be Cb and Cr together if possible,
106         // but FFmpeg doesn't support that except in the special case of
107         // NV12, so we need to go to planar even for the case of NV12.
108         // Thus, look for the closest (but no worse) 8-bit planar Y'CbCr format
109         // that matches in color range. (This will also include the case of
110         // the source format already being acceptable.)
111         bool src_full_range = is_full_range(src_desc);
112         const char *best_format = "yuv444p";
113         unsigned best_score = numeric_limits<unsigned>::max();
114         for (const AVPixFmtDescriptor *desc = av_pix_fmt_desc_next(nullptr);
115              desc;
116              desc = av_pix_fmt_desc_next(desc)) {
117                 // Find planar Y'CbCr formats only.
118                 if (desc->nb_components != 3) continue;
119                 if (desc->flags & AV_PIX_FMT_FLAG_RGB) continue;
120                 if (!(desc->flags & AV_PIX_FMT_FLAG_PLANAR)) continue;
121                 if (desc->comp[0].plane != 0 ||
122                     desc->comp[1].plane != 1 ||
123                     desc->comp[2].plane != 2) continue;
124
125                 // 8-bit formats only.
126                 if (desc->flags & AV_PIX_FMT_FLAG_BE) continue;
127                 if (desc->comp[0].depth != 8) continue;
128
129                 // Same or better chroma resolution only.
130                 int chroma_w_diff = src_desc->log2_chroma_w - desc->log2_chroma_w;
131                 int chroma_h_diff = src_desc->log2_chroma_h - desc->log2_chroma_h;
132                 if (chroma_w_diff < 0 || chroma_h_diff < 0)
133                         continue;
134
135                 // Matching full/limited range only.
136                 if (is_full_range(desc) != src_full_range)
137                         continue;
138
139                 // Pick something with as little excess chroma resolution as possible.
140                 unsigned score = (1 << (chroma_w_diff)) << chroma_h_diff;
141                 if (score < best_score) {
142                         best_score = score;
143                         best_format = desc->name;
144                 }
145         }
146         return av_get_pix_fmt(best_format);
147 }
148
149 YCbCrFormat decode_ycbcr_format(const AVPixFmtDescriptor *desc, const AVFrame *frame, bool is_mjpeg, AVColorSpace *last_colorspace, AVChromaLocation *last_chroma_location)
150 {
151         YCbCrFormat format;
152         AVColorSpace colorspace = frame->colorspace;
153         switch (colorspace) {
154         case AVCOL_SPC_BT709:
155                 format.luma_coefficients = YCBCR_REC_709;
156                 break;
157         case AVCOL_SPC_BT470BG:
158         case AVCOL_SPC_SMPTE170M:
159         case AVCOL_SPC_SMPTE240M:
160                 format.luma_coefficients = YCBCR_REC_601;
161                 break;
162         case AVCOL_SPC_BT2020_NCL:
163                 format.luma_coefficients = YCBCR_REC_2020;
164                 break;
165         case AVCOL_SPC_UNSPECIFIED:
166                 format.luma_coefficients = (frame->height >= 720 ? YCBCR_REC_709 : YCBCR_REC_601);
167                 break;
168         default:
169                 if (colorspace != *last_colorspace) {
170                         fprintf(stderr, "Unknown Y'CbCr coefficient enum %d from FFmpeg; choosing Rec. 709.\n",
171                                 colorspace);
172                 }
173                 format.luma_coefficients = YCBCR_REC_709;
174                 break;
175         }
176         *last_colorspace = colorspace;
177
178         format.full_range = is_full_range(desc);
179         format.num_levels = 1 << desc->comp[0].depth;
180         format.chroma_subsampling_x = 1 << desc->log2_chroma_w;
181         format.chroma_subsampling_y = 1 << desc->log2_chroma_h;
182
183         switch (frame->chroma_location) {
184         case AVCHROMA_LOC_LEFT:
185                 format.cb_x_position = 0.0;
186                 format.cb_y_position = 0.5;
187                 break;
188         case AVCHROMA_LOC_CENTER:
189                 format.cb_x_position = 0.5;
190                 format.cb_y_position = 0.5;
191                 break;
192         case AVCHROMA_LOC_TOPLEFT:
193                 format.cb_x_position = 0.0;
194                 format.cb_y_position = 0.0;
195                 break;
196         case AVCHROMA_LOC_TOP:
197                 format.cb_x_position = 0.5;
198                 format.cb_y_position = 0.0;
199                 break;
200         case AVCHROMA_LOC_BOTTOMLEFT:
201                 format.cb_x_position = 0.0;
202                 format.cb_y_position = 1.0;
203                 break;
204         case AVCHROMA_LOC_BOTTOM:
205                 format.cb_x_position = 0.5;
206                 format.cb_y_position = 1.0;
207                 break;
208         default:
209                 if (frame->chroma_location != *last_chroma_location) {
210                         fprintf(stderr, "Unknown chroma location coefficient enum %d from FFmpeg; choosing center.\n",
211                                 frame->chroma_location);
212                 }
213                 format.cb_x_position = 0.5;
214                 format.cb_y_position = 0.5;
215                 break;
216         }
217         *last_chroma_location = frame->chroma_location;
218
219         if (is_mjpeg && !format.full_range) {
220                 // Limited-range MJPEG is only detected by FFmpeg whenever a special
221                 // JPEG comment is set, which means that in practice, the stream is
222                 // almost certainly generated by Futatabi. Override FFmpeg's forced
223                 // MJPEG defaults (it disregards the values set in the mux) with what
224                 // Futatabi sets.
225                 format.luma_coefficients = YCBCR_REC_709;
226                 format.cb_x_position = 0.0;
227                 format.cb_y_position = 0.5;
228         }
229
230         format.cr_x_position = format.cb_x_position;
231         format.cr_y_position = format.cb_y_position;
232         return format;
233 }
234
235 RGBTriplet get_neutral_color(AVDictionary *metadata)
236 {
237         if (metadata == nullptr) {
238                 return RGBTriplet(1.0f, 1.0f, 1.0f);
239         }
240         AVDictionaryEntry *entry = av_dict_get(metadata, "WhitePoint", nullptr, 0);
241         if (entry == nullptr) {
242                 return RGBTriplet(1.0f, 1.0f, 1.0f);
243         }
244
245         unsigned x_nom, x_den, y_nom, y_den;
246         if (sscanf(entry->value, " %u:%u , %u:%u", &x_nom, &x_den, &y_nom, &y_den) != 4) {
247                 fprintf(stderr, "WARNING: Unable to parse white point '%s', using default white point\n", entry->value);
248                 return RGBTriplet(1.0f, 1.0f, 1.0f);
249         }
250
251         double x = double(x_nom) / x_den;
252         double y = double(y_nom) / y_den;
253         double z = 1.0 - x - y;
254
255         Matrix3d rgb_to_xyz_matrix = movit::ColorspaceConversionEffect::get_xyz_matrix(COLORSPACE_sRGB);
256         Vector3d rgb = rgb_to_xyz_matrix.inverse() * Vector3d(x, y, z);
257
258         return RGBTriplet(rgb[0], rgb[1], rgb[2]);
259 }
260
261 }  // namespace
262
263 FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
264         : filename(filename), width(width), height(height), video_timebase{1, 1}
265 {
266         description = "Video: " + filename;
267
268         last_frame = steady_clock::now();
269
270         avformat_network_init();  // In case someone wants this.
271 }
272
273 #ifdef HAVE_SRT
274 FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id)
275         : srt_sock(srt_sock),
276           width(0),  // Don't resize; SRT streams typically have stable resolution, and should behave much like regular cards in general.
277           height(0),
278           pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar),
279           video_timebase{1, 1}
280 {
281         if (stream_id.empty()) {
282                 description = "SRT stream";
283         } else {
284                 description = stream_id;
285         }
286         play_as_fast_as_possible = true;
287         play_once = true;
288         last_frame = steady_clock::now();
289 }
290 #endif
291
292 FFmpegCapture::~FFmpegCapture()
293 {
294         if (has_dequeue_callbacks) {
295                 dequeue_cleanup_callback();
296         }
297         swr_free(&resampler);
298 #ifdef HAVE_SRT
299         if (srt_sock != -1) {
300                 srt_close(srt_sock);
301         }
302 #endif
303 }
304
305 void FFmpegCapture::configure_card()
306 {
307         if (video_frame_allocator == nullptr) {
308                 owned_video_frame_allocator.reset(new MallocFrameAllocator(FRAME_SIZE, NUM_QUEUED_VIDEO_FRAMES));
309                 set_video_frame_allocator(owned_video_frame_allocator.get());
310         }
311         if (audio_frame_allocator == nullptr) {
312                 // Audio can come out in pretty large chunks, so increase from the default 1 MB.
313                 owned_audio_frame_allocator.reset(new MallocFrameAllocator(1 << 20, NUM_QUEUED_AUDIO_FRAMES));
314                 set_audio_frame_allocator(owned_audio_frame_allocator.get());
315         }
316 }
317
318 void FFmpegCapture::start_bm_capture()
319 {
320         if (running) {
321                 return;
322         }
323         running = true;
324         producer_thread_should_quit.unquit();
325         producer_thread = thread(&FFmpegCapture::producer_thread_func, this);
326 }
327
328 void FFmpegCapture::stop_dequeue_thread()
329 {
330         if (!running) {
331                 return;
332         }
333         running = false;
334         producer_thread_should_quit.quit();
335         producer_thread.join();
336 }
337
338 std::map<uint32_t, VideoMode> FFmpegCapture::get_available_video_modes() const
339 {
340         // Note: This will never really be shown in the UI.
341         VideoMode mode;
342
343         char buf[256];
344         snprintf(buf, sizeof(buf), "%ux%u", sws_last_width, sws_last_height);
345         mode.name = buf;
346         
347         mode.autodetect = false;
348         mode.width = sws_last_width;
349         mode.height = sws_last_height;
350         mode.frame_rate_num = 60;
351         mode.frame_rate_den = 1;
352         mode.interlaced = false;
353
354         return {{ 0, mode }};
355 }
356
357 void FFmpegCapture::producer_thread_func()
358 {
359         char thread_name[16];
360         snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
361         pthread_setname_np(pthread_self(), thread_name);
362
363         while (!producer_thread_should_quit.should_quit()) {
364                 string filename_copy;
365                 {
366                         lock_guard<mutex> lock(filename_mu);
367                         filename_copy = filename;
368                 }
369
370                 string pathname;
371                 if (srt_sock == -1) {
372                         pathname = search_for_file(filename_copy);
373                 } else {
374                         pathname = description;
375                 }
376                 if (pathname.empty()) {
377                         send_disconnected_frame();
378                         if (play_once) {
379                                 break;
380                         }
381                         producer_thread_should_quit.sleep_for(seconds(1));
382                         fprintf(stderr, "%s not found, sleeping one second and trying again...\n", filename_copy.c_str());
383                         continue;
384                 }
385                 should_interrupt = false;
386                 if (!play_video(pathname)) {
387                         // Error.
388                         send_disconnected_frame();
389                         if (play_once) {
390                                 break;
391                         }
392                         fprintf(stderr, "Error when playing %s, sleeping one second and trying again...\n", pathname.c_str());
393                         producer_thread_should_quit.sleep_for(seconds(1));
394                         continue;
395                 }
396
397                 if (play_once) {
398                         send_disconnected_frame();
399                         break;
400                 }
401
402                 // Probably just EOF, will exit the loop above on next test.
403         }
404
405         if (has_dequeue_callbacks) {
406                 dequeue_cleanup_callback();
407                 has_dequeue_callbacks = false;
408         }
409 }
410
411 void FFmpegCapture::send_disconnected_frame()
412 {
413         // Send an empty frame to signal that we have no signal anymore.
414         FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame();
415         size_t frame_width = width == 0 ? global_flags.width : width;
416         size_t frame_height = height == 0 ? global_flags.height : height;
417         if (video_frame.data) {
418                 VideoFormat video_format;
419                 video_format.width = frame_width;
420                 video_format.height = frame_height;
421                 video_format.frame_rate_nom = 60;
422                 video_format.frame_rate_den = 1;
423                 video_format.is_connected = false;
424                 if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
425                         video_format.stride = frame_width * 4;
426                         video_frame.len = frame_width * frame_height * 4;
427                         memset(video_frame.data, 0, video_frame.len);
428                 } else {
429                         video_format.stride = frame_width;
430                         current_frame_ycbcr_format.luma_coefficients = YCBCR_REC_709;
431                         current_frame_ycbcr_format.full_range = true;
432                         current_frame_ycbcr_format.num_levels = 256;
433                         current_frame_ycbcr_format.chroma_subsampling_x = 2;
434                         current_frame_ycbcr_format.chroma_subsampling_y = 2;
435                         current_frame_ycbcr_format.cb_x_position = 0.0f;
436                         current_frame_ycbcr_format.cb_y_position = 0.0f;
437                         current_frame_ycbcr_format.cr_x_position = 0.0f;
438                         current_frame_ycbcr_format.cr_y_position = 0.0f;
439                         video_frame.len = frame_width * frame_height * 2;
440                         memset(video_frame.data, 0, frame_width * frame_height);
441                         memset(video_frame.data + frame_width * frame_height, 128, frame_width * frame_height);  // Valid for both NV12 and planar.
442                 }
443
444                 if (frame_callback != nullptr) {
445                         frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
446                                 video_frame, /*video_offset=*/0, video_format,
447                                 FrameAllocator::Frame(), /*audio_offset=*/0, AudioFormat());
448                 }
449                 last_frame_was_connected = false;
450         }
451
452         if (play_once) {
453                 disconnected = true;
454                 if (card_disconnected_callback != nullptr) {
455                         card_disconnected_callback();
456                 }
457         }
458 }
459
460 template<AVHWDeviceType type>
461 AVPixelFormat get_hw_format(AVCodecContext *ctx, const AVPixelFormat *fmt)
462 {
463         bool found_config_of_right_type = false;
464         for (int i = 0;; ++i) {  // Termination condition inside loop.
465                 const AVCodecHWConfig *config = avcodec_get_hw_config(ctx->codec, i);
466                 if (config == nullptr) {  // End of list.
467                         break;
468                 }
469                 if (!(config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) ||
470                     config->device_type != type) {
471                         // Not interesting for us.
472                         continue;
473                 }
474
475                 // We have a config of the right type, but does it actually support
476                 // the pixel format we want? (Seemingly, FFmpeg's way of signaling errors
477                 // is to just replace the pixel format with a software-decoded one,
478                 // such as yuv420p.)
479                 found_config_of_right_type = true;
480                 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
481                         if (config->pix_fmt == *fmt_ptr) {
482                                 fprintf(stderr, "Initialized '%s' hardware decoding for codec '%s'.\n",
483                                         av_hwdevice_get_type_name(type), ctx->codec->name);
484                                 if (ctx->profile == FF_PROFILE_H264_BASELINE) {
485                                         fprintf(stderr, "WARNING: Stream claims to be H.264 Baseline, which is generally poorly supported in hardware decoders.\n");
486                                         fprintf(stderr, "         Consider encoding it as Constrained Baseline, Main or High instead.\n");
487                                         fprintf(stderr, "         Decoding might fail and fall back to software.\n");
488                                 }
489                                 return config->pix_fmt;
490                         }
491                 }
492                 fprintf(stderr, "Decoder '%s' supports only these pixel formats:", ctx->codec->name);
493                 unordered_set<AVPixelFormat> seen;
494                 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
495                         if (!seen.count(*fmt_ptr)) {
496                                 fprintf(stderr, " %s", av_get_pix_fmt_name(*fmt_ptr));
497                                 seen.insert(*fmt_ptr);
498                         }
499                 }
500                 fprintf(stderr, " (wanted %s for hardware acceleration)\n", av_get_pix_fmt_name(config->pix_fmt));
501
502         }
503
504         if (!found_config_of_right_type) {
505                 fprintf(stderr, "Decoder '%s' does not support device type '%s'.\n", ctx->codec->name, av_hwdevice_get_type_name(type));
506         }
507
508         // We found no VA-API formats, so take the first software format.
509         for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
510                 if ((av_pix_fmt_desc_get(*fmt_ptr)->flags & AV_PIX_FMT_FLAG_HWACCEL) == 0) {
511                         fprintf(stderr, "Falling back to software format %s.\n", av_get_pix_fmt_name(*fmt_ptr));
512                         return *fmt_ptr;
513                 }
514         }
515
516         // Fallback: Just return anything. (Should never really happen.)
517         return fmt[0];
518 }
519
520 bool FFmpegCapture::play_video(const string &pathname)
521 {
522         // Note: Call before open, not after; otherwise, there's a race.
523         // (There is now, too, but it tips the correct way. We could use fstat()
524         // if we had the file descriptor.)
525         timespec last_modified;
526         struct stat buf;
527         if (stat(pathname.c_str(), &buf) != 0) {
528                 // Probably some sort of protocol, so can't stat.
529                 last_modified.tv_sec = -1;
530         } else {
531                 last_modified = buf.st_mtim;
532         }
533         last_colorspace = static_cast<AVColorSpace>(-1);
534         last_chroma_location = static_cast<AVChromaLocation>(-1);
535
536         AVFormatContextWithCloser format_ctx;
537         if (srt_sock == -1) {
538                 // Regular file (or stream).
539                 frame_timeout_started = steady_clock::now();
540                 frame_timeout_valid = true;
541                 format_ctx = avformat_open_input_unique(pathname.c_str(), /*fmt=*/nullptr,
542                         /*options=*/nullptr,
543                         AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
544                 frame_timeout_valid = false;
545         } else {
546 #ifdef HAVE_SRT
547                 // SRT socket, already opened.
548                 const AVInputFormat *mpegts_fmt = av_find_input_format("mpegts");
549                 format_ctx = avformat_open_input_unique(&FFmpegCapture::read_srt_thunk, this,
550                         mpegts_fmt, /*options=*/nullptr,
551                         AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
552 #else
553                 assert(false);
554 #endif
555         }
556         if (format_ctx == nullptr) {
557                 fprintf(stderr, "%s: Error opening file\n", pathname.c_str());
558                 return false;
559         }
560
561         if (avformat_find_stream_info(format_ctx.get(), nullptr) < 0) {
562                 fprintf(stderr, "%s: Error finding stream info\n", pathname.c_str());
563                 return false;
564         }
565
566         int video_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_VIDEO);
567         if (video_stream_index == -1) {
568                 fprintf(stderr, "%s: No video stream found\n", pathname.c_str());
569                 return false;
570         }
571
572         int audio_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_AUDIO);
573         int subtitle_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_SUBTITLE);
574         has_last_subtitle = false;
575
576         // Open video decoder.
577         const AVCodecParameters *video_codecpar = format_ctx->streams[video_stream_index]->codecpar;
578         const AVCodec *video_codec = avcodec_find_decoder(video_codecpar->codec_id);
579
580         video_timebase = format_ctx->streams[video_stream_index]->time_base;
581         AVCodecContextWithDeleter video_codec_ctx = avcodec_alloc_context3_unique(nullptr);
582         if (avcodec_parameters_to_context(video_codec_ctx.get(), video_codecpar) < 0) {
583                 fprintf(stderr, "%s: Cannot fill video codec parameters\n", pathname.c_str());
584                 return false;
585         }
586         if (video_codec == nullptr) {
587                 fprintf(stderr, "%s: Cannot find video decoder\n", pathname.c_str());
588                 return false;
589         }
590
591         // Seemingly, it's not too easy to make something that just initializes
592         // “whatever goes”, so we don't get CUDA or VULKAN or whatever here
593         // without enumerating through several different types.
594         // VA-API and VDPAU will do for now. We prioritize VDPAU for the
595         // simple reason that there's a VA-API-via-VDPAU emulation for NVidia
596         // cards that seems to work, but just hangs when trying to transfer the frame.
597         //
598         // Note that we don't actually check codec support beforehand,
599         // so if you have a low-end VDPAU device but a high-end VA-API device,
600         // you lose out on the extra codec support from the latter.
601         AVBufferRef *hw_device_ctx = nullptr;
602         if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VDPAU, nullptr, nullptr, 0) >= 0) {
603                 video_codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
604                 video_codec_ctx->get_format = get_hw_format<AV_HWDEVICE_TYPE_VDPAU>;
605         } else if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, nullptr, nullptr, 0) >= 0) {
606                 video_codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
607                 video_codec_ctx->get_format = get_hw_format<AV_HWDEVICE_TYPE_VAAPI>;
608         } else {
609                 fprintf(stderr, "Failed to initialize VA-API or VDPAU for FFmpeg acceleration. Decoding video in software.\n");
610         }
611
612         if (avcodec_open2(video_codec_ctx.get(), video_codec, nullptr) < 0) {
613                 fprintf(stderr, "%s: Cannot open video decoder\n", pathname.c_str());
614                 return false;
615         }
616         unique_ptr<AVCodecContext, decltype(avcodec_close)*> video_codec_ctx_cleanup(
617                 video_codec_ctx.get(), avcodec_close);
618
619         // Used in decode_ycbcr_format().
620         is_mjpeg = video_codecpar->codec_id == AV_CODEC_ID_MJPEG;
621
622         // Open audio decoder, if we have audio.
623         AVCodecContextWithDeleter audio_codec_ctx;
624         if (audio_stream_index != -1) {
625                 audio_codec_ctx = avcodec_alloc_context3_unique(nullptr);
626                 const AVCodecParameters *audio_codecpar = format_ctx->streams[audio_stream_index]->codecpar;
627                 audio_timebase = format_ctx->streams[audio_stream_index]->time_base;
628                 if (avcodec_parameters_to_context(audio_codec_ctx.get(), audio_codecpar) < 0) {
629                         fprintf(stderr, "%s: Cannot fill audio codec parameters\n", pathname.c_str());
630                         return false;
631                 }
632                 const AVCodec *audio_codec = avcodec_find_decoder(audio_codecpar->codec_id);
633                 if (audio_codec == nullptr) {
634                         fprintf(stderr, "%s: Cannot find audio decoder\n", pathname.c_str());
635                         return false;
636                 }
637                 if (avcodec_open2(audio_codec_ctx.get(), audio_codec, nullptr) < 0) {
638                         fprintf(stderr, "%s: Cannot open audio decoder\n", pathname.c_str());
639                         return false;
640                 }
641         }
642         unique_ptr<AVCodecContext, decltype(avcodec_close)*> audio_codec_ctx_cleanup(
643                 audio_codec_ctx.get(), avcodec_close);
644
645         internal_rewind();
646
647         // Main loop.
648         bool first_frame = true;
649         int consecutive_errors = 0;
650         while (!producer_thread_should_quit.should_quit()) {
651                 if (process_queued_commands(format_ctx.get(), pathname, last_modified, /*rewound=*/nullptr)) {
652                         return true;
653                 }
654                 if (should_interrupt.load()) {
655                         // Check as a failsafe, so that we don't need to rely on avio if we don't have to.
656                         return false;
657                 }
658                 UniqueFrame audio_frame = audio_frame_allocator->alloc_frame();
659                 AudioFormat audio_format;
660
661                 int64_t audio_pts;
662                 bool error;
663                 frame_timeout_started = steady_clock::now();
664                 frame_timeout_valid = true;
665                 AVFrameWithDeleter frame = decode_frame(format_ctx.get(), video_codec_ctx.get(), audio_codec_ctx.get(),
666                         pathname, video_stream_index, audio_stream_index, subtitle_stream_index, audio_frame.get(), &audio_format, &audio_pts, &error);
667                 frame_timeout_valid = false;
668                 if (should_interrupt.load()) {
669                         // Abort no matter whether we got a frame or not.
670                         return false;
671                 }
672                 if (error) {
673                         if (++consecutive_errors >= 100) {
674                                 fprintf(stderr, "More than 100 consecutive error video frames, aborting playback.\n");
675                                 return false;
676                         } else {
677                                 continue;
678                         }
679                 } else {
680                         consecutive_errors = 0;
681                 }
682                 if (frame == nullptr) {
683                         // EOF. Loop back to the start if we can.
684                         if (format_ctx->pb != nullptr && format_ctx->pb->seekable == 0) {
685                                 // Not seekable (but seemingly, sometimes av_seek_frame() would return 0 anyway,
686                                 // so don't try).
687                                 return true;
688                         }
689                         if (av_seek_frame(format_ctx.get(), /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
690                                 fprintf(stderr, "%s: Rewind failed, not looping.\n", pathname.c_str());
691                                 return true;
692                         }
693                         if (video_codec_ctx != nullptr) {
694                                 avcodec_flush_buffers(video_codec_ctx.get());
695                         }
696                         if (audio_codec_ctx != nullptr) {
697                                 avcodec_flush_buffers(audio_codec_ctx.get());
698                         }
699                         // If the file has changed since last time, return to get it reloaded.
700                         // Note that depending on how you move the file into place, you might
701                         // end up corrupting the one you're already playing, so this path
702                         // might not trigger.
703                         if (changed_since(pathname, last_modified)) {
704                                 return true;
705                         }
706                         internal_rewind();
707                         continue;
708                 }
709
710                 VideoFormat video_format = construct_video_format(frame.get(), video_timebase);
711                 if (video_format.frame_rate_nom == 0 || video_format.frame_rate_den == 0) {
712                         // Invalid frame rate; try constructing it from the previous frame length.
713                         // (This is especially important if we are the master card, for SRT,
714                         // since it affects audio. Not all senders have good timebases
715                         // (e.g., Larix rounds first to timebase 1000 and then multiplies by
716                         // 90 from there, it seems), but it's much better to have an oscillating
717                         // value than just locking at 60.
718                         if (last_pts != 0 && frame->pts > last_pts) {
719                                 int64_t pts_diff = frame->pts - last_pts;
720                                 video_format.frame_rate_nom = video_timebase.den;
721                                 video_format.frame_rate_den = video_timebase.num * pts_diff;
722                         } else {
723                                 video_format.frame_rate_nom = 60;
724                                 video_format.frame_rate_den = 1;
725                         }
726                 }
727                 UniqueFrame video_frame = make_video_frame(frame.get(), pathname, &error);
728                 if (error) {
729                         return false;
730                 }
731
732                 for ( ;; ) {
733                         if (last_pts == 0 && pts_origin == 0) {
734                                 pts_origin = frame->pts;        
735                         }
736                         steady_clock::time_point now = steady_clock::now();
737                         if (play_as_fast_as_possible) {
738                                 video_frame->received_timestamp = now;
739                                 audio_frame->received_timestamp = now;
740                                 next_frame_start = now;
741                         } else {
742                                 next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
743                                 if (first_frame && last_frame_was_connected) {
744                                         // If reconnect took more than one second, this is probably a live feed,
745                                         // and we should reset the resampler. (Or the rate is really, really low,
746                                         // in which case a reset on the first frame is fine anyway.)
747                                         if (duration<double>(next_frame_start - last_frame).count() >= 1.0) {
748                                                 last_frame_was_connected = false;
749                                         }
750                                 }
751                                 video_frame->received_timestamp = next_frame_start;
752
753                                 // The easiest way to get all the rate conversions etc. right is to move the
754                                 // audio PTS into the video PTS timebase and go from there. (We'll get some
755                                 // rounding issues, but they should not be a big problem.)
756                                 int64_t audio_pts_as_video_pts = av_rescale_q(audio_pts, audio_timebase, video_timebase);
757                                 audio_frame->received_timestamp = compute_frame_start(audio_pts_as_video_pts, pts_origin, video_timebase, start, rate);
758
759                                 if (audio_frame->len != 0) {
760                                         // The received timestamps in Nageru are measured after we've just received the frame.
761                                         // However, pts (especially audio pts) is at the _beginning_ of the frame.
762                                         // If we have locked audio, the distinction doesn't really matter, as pts is
763                                         // on a relative scale and a fixed offset is fine. But if we don't, we will have
764                                         // a different number of samples each time, which will cause huge audio jitter
765                                         // and throw off the resampler.
766                                         //
767                                         // In a sense, we should have compensated by adding the frame and audio lengths
768                                         // to video_frame->received_timestamp and audio_frame->received_timestamp respectively,
769                                         // but that would mean extra waiting in sleep_until(). All we need is that they
770                                         // are correct relative to each other, though (and to the other frames we send),
771                                         // so just align the end of the audio frame, and we're fine.
772                                         size_t num_samples = (audio_frame->len * 8) / audio_format.bits_per_sample / audio_format.num_channels;
773                                         double offset = double(num_samples) / OUTPUT_FREQUENCY -
774                                                 double(video_format.frame_rate_den) / video_format.frame_rate_nom;
775                                         audio_frame->received_timestamp += duration_cast<steady_clock::duration>(duration<double>(offset));
776                                 }
777
778                                 if (duration<double>(now - next_frame_start).count() >= 0.1) {
779                                         // If we don't have enough CPU to keep up, or if we have a live stream
780                                         // where the initial origin was somehow wrong, we could be behind indefinitely.
781                                         // In particular, this will give the audio resampler problems as it tries
782                                         // to speed up to reduce the delay, hitting the low end of the buffer every time.
783                                         fprintf(stderr, "%s: Playback %.0f ms behind, resetting time scale\n",
784                                                 pathname.c_str(),
785                                                 1e3 * duration<double>(now - next_frame_start).count());
786                                         pts_origin = frame->pts;
787                                         start = next_frame_start = now;
788                                         timecode += TYPICAL_FPS * 2 + 1;
789                                 }
790                         }
791                         bool finished_wakeup;
792                         if (play_as_fast_as_possible) {
793                                 finished_wakeup = !producer_thread_should_quit.should_quit();
794                         } else {
795                                 finished_wakeup = producer_thread_should_quit.sleep_until(next_frame_start);
796                         }
797                         if (finished_wakeup) {
798                                 if (audio_frame->len > 0) {
799                                         assert(audio_pts != -1);
800                                 }
801                                 if (!last_frame_was_connected) {
802                                         // We're recovering from an error (or really slow load, see above).
803                                         // Make sure to get the audio resampler reset. (This is a hack;
804                                         // ideally, the frame callback should just accept a way to signal
805                                         // audio discontinuity.)
806                                         timecode += TYPICAL_FPS * 2 + 1;
807                                 }
808                                 last_neutral_color = get_neutral_color(frame->metadata);
809                                 if (frame_callback != nullptr) {
810                                         frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
811                                                 video_frame.get_and_release(), 0, video_format,
812                                                 audio_frame.get_and_release(), 0, audio_format);
813                                 }
814                                 first_frame = false;
815                                 last_frame = steady_clock::now();
816                                 last_frame_was_connected = true;
817                                 break;
818                         } else {
819                                 if (producer_thread_should_quit.should_quit()) break;
820
821                                 bool rewound = false;
822                                 if (process_queued_commands(format_ctx.get(), pathname, last_modified, &rewound)) {
823                                         return true;
824                                 }
825                                 // If we just rewound, drop this frame on the floor and be done.
826                                 if (rewound) {
827                                         break;
828                                 }
829                                 // OK, we didn't, so probably a rate change. Recalculate next_frame_start,
830                                 // but if it's now in the past, we'll reset the origin, so that we don't
831                                 // generate a huge backlog of frames that we need to run through quickly.
832                                 next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
833                                 steady_clock::time_point now = steady_clock::now();
834                                 if (next_frame_start < now) {
835                                         pts_origin = frame->pts;
836                                         start = next_frame_start = now;
837                                 }
838                         }
839                 }
840                 last_pts = frame->pts;
841         }
842         return true;
843 }
844
845 void FFmpegCapture::internal_rewind()
846 {                               
847         pts_origin = last_pts = 0;
848         start = next_frame_start = steady_clock::now();
849 }
850
851 bool FFmpegCapture::process_queued_commands(AVFormatContext *format_ctx, const std::string &pathname, timespec last_modified, bool *rewound)
852 {
853         // Process any queued commands from other threads.
854         vector<QueuedCommand> commands;
855         {
856                 lock_guard<mutex> lock(queue_mu);
857                 swap(commands, command_queue);
858         }
859         for (const QueuedCommand &cmd : commands) {
860                 switch (cmd.command) {
861                 case QueuedCommand::REWIND:
862                         if (av_seek_frame(format_ctx, /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
863                                 fprintf(stderr, "%s: Rewind failed, stopping play.\n", pathname.c_str());
864                         }
865                         // If the file has changed since last time, return to get it reloaded.
866                         // Note that depending on how you move the file into place, you might
867                         // end up corrupting the one you're already playing, so this path
868                         // might not trigger.
869                         if (changed_since(pathname, last_modified)) {
870                                 return true;
871                         }
872                         internal_rewind();
873                         if (rewound != nullptr) {
874                                 *rewound = true;
875                         }
876                         break;
877
878                 case QueuedCommand::CHANGE_RATE:
879                         // Change the origin to the last played frame.
880                         start = compute_frame_start(last_pts, pts_origin, video_timebase, start, rate);
881                         pts_origin = last_pts;
882                         rate = cmd.new_rate;
883                         play_as_fast_as_possible = (rate >= 10.0);
884                         break;
885                 }
886         }
887         return false;
888 }
889
890 namespace {
891
892 }  // namespace
893
894 AVFrameWithDeleter FFmpegCapture::decode_frame(AVFormatContext *format_ctx, AVCodecContext *video_codec_ctx, AVCodecContext *audio_codec_ctx,
895         const std::string &pathname, int video_stream_index, int audio_stream_index, int subtitle_stream_index,
896         FrameAllocator::Frame *audio_frame, AudioFormat *audio_format, int64_t *audio_pts, bool *error)
897 {
898         *error = false;
899
900         // Read packets until we have a frame or there are none left.
901         bool frame_finished = false;
902         AVFrameWithDeleter audio_avframe = av_frame_alloc_unique();
903         AVFrameWithDeleter video_avframe = av_frame_alloc_unique();
904         bool eof = false;
905         *audio_pts = -1;
906         bool has_audio = false;
907         do {
908                 AVPacketWithDeleter pkt = av_packet_alloc_unique();
909                 pkt->data = nullptr;
910                 pkt->size = 0;
911                 if (av_read_frame(format_ctx, pkt.get()) == 0) {
912                         if (pkt->stream_index == audio_stream_index && audio_callback != nullptr) {
913                                 audio_callback(pkt.get(), format_ctx->streams[audio_stream_index]->time_base);
914                         }
915                         if (pkt->stream_index == video_stream_index && video_callback != nullptr) {
916                                 video_callback(pkt.get(), format_ctx->streams[video_stream_index]->time_base);
917                         }
918                         if (pkt->stream_index == video_stream_index && global_flags.transcode_video) {
919                                 if (avcodec_send_packet(video_codec_ctx, pkt.get()) < 0) {
920                                         fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str());
921                                         *error = true;
922                                         return AVFrameWithDeleter(nullptr);
923                                 }
924                         } else if (pkt->stream_index == audio_stream_index && global_flags.transcode_audio) {
925                                 has_audio = true;
926                                 if (avcodec_send_packet(audio_codec_ctx, pkt.get()) < 0) {
927                                         fprintf(stderr, "%s: Cannot send packet to audio codec.\n", pathname.c_str());
928                                         *error = true;
929                                         return AVFrameWithDeleter(nullptr);
930                                 }
931                         } else if (pkt->stream_index == subtitle_stream_index) {
932                                 last_subtitle = string(reinterpret_cast<const char *>(pkt->data), pkt->size);
933                                 has_last_subtitle = true;
934                         }
935                 } else {
936                         eof = true;  // Or error, but ignore that for the time being.
937                 }
938
939                 // Decode audio, if any.
940                 if (has_audio) {
941                         for ( ;; ) {
942                                 int err = avcodec_receive_frame(audio_codec_ctx, audio_avframe.get());
943                                 if (err == 0) {
944                                         if (*audio_pts == -1) {
945                                                 *audio_pts = audio_avframe->pts;
946                                         }
947                                         convert_audio(audio_avframe.get(), audio_frame, audio_format);
948                                 } else if (err == AVERROR(EAGAIN)) {
949                                         break;
950                                 } else {
951                                         fprintf(stderr, "%s: Cannot receive frame from audio codec.\n", pathname.c_str());
952                                         *error = true;
953                                         return AVFrameWithDeleter(nullptr);
954                                 }
955                         }
956                 }
957
958                 // Decode video, if we have a frame.
959                 int err = avcodec_receive_frame(video_codec_ctx, video_avframe.get());
960                 if (err == 0) {
961                         if (video_avframe->format == AV_PIX_FMT_VAAPI ||
962                             video_avframe->format == AV_PIX_FMT_VDPAU) {
963                                 // Get the frame down to the CPU. (TODO: See if we can keep it
964                                 // on the GPU all the way, since it will be going up again later.
965                                 // However, this only works if the OpenGL GPU is the same one.)
966                                 AVFrameWithDeleter sw_frame = av_frame_alloc_unique();
967                                 int err = av_hwframe_transfer_data(sw_frame.get(), video_avframe.get(), 0);
968                                 if (err != 0) {
969                                         fprintf(stderr, "%s: Cannot transfer hardware video frame to software.\n", pathname.c_str());
970                                         *error = true;
971                                         return AVFrameWithDeleter(nullptr);
972                                 }
973                                 sw_frame->pts = video_avframe->pts;
974                                 sw_frame->pkt_duration = video_avframe->pkt_duration;
975                                 video_avframe = move(sw_frame);
976                         }
977                         frame_finished = true;
978                         break;
979                 } else if (err != AVERROR(EAGAIN)) {
980                         fprintf(stderr, "%s: Cannot receive frame from video codec.\n", pathname.c_str());
981                         *error = true;
982                         return AVFrameWithDeleter(nullptr);
983                 }
984         } while (!eof);
985
986         if (frame_finished)
987                 return video_avframe;
988         else
989                 return AVFrameWithDeleter(nullptr);
990 }
991
992 void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator::Frame *audio_frame, AudioFormat *audio_format)
993 {
994         // Decide on a format. If there already is one in this audio frame,
995         // we're pretty much forced to use it. If not, we try to find an exact match.
996         // If that still doesn't work, we default to 32-bit signed chunked
997         // (float would be nice, but there's really no way to signal that yet).
998         AVSampleFormat dst_format;
999         if (audio_format->bits_per_sample == 0) {
1000                 switch (audio_avframe->format) {
1001                 case AV_SAMPLE_FMT_S16:
1002                 case AV_SAMPLE_FMT_S16P:
1003                         audio_format->bits_per_sample = 16;
1004                         dst_format = AV_SAMPLE_FMT_S16;
1005                         break;
1006                 case AV_SAMPLE_FMT_S32:
1007                 case AV_SAMPLE_FMT_S32P:
1008                 default:
1009                         audio_format->bits_per_sample = 32;
1010                         dst_format = AV_SAMPLE_FMT_S32;
1011                         break;
1012                 }
1013         } else if (audio_format->bits_per_sample == 16) {
1014                 dst_format = AV_SAMPLE_FMT_S16;
1015         } else if (audio_format->bits_per_sample == 32) {
1016                 dst_format = AV_SAMPLE_FMT_S32;
1017         } else {
1018                 assert(false);
1019         }
1020         audio_format->num_channels = 2;
1021
1022         AVChannelLayout channel_layout = audio_avframe->ch_layout;
1023         if (!av_channel_layout_check(&channel_layout) ||
1024             channel_layout.order == AV_CHANNEL_ORDER_UNSPEC) {
1025                 av_channel_layout_default(&channel_layout, audio_avframe->ch_layout.nb_channels);
1026         }
1027
1028         if (resampler == nullptr ||
1029             audio_avframe->format != last_src_format ||
1030             dst_format != last_dst_format ||
1031             av_channel_layout_compare(&channel_layout, &last_channel_layout) !=  0||
1032             audio_avframe->sample_rate != last_sample_rate) {
1033                 // TODO: When we get C++20, use AV_CHANNEL_LAYOUT_STEREO_DOWNMIX.
1034                 AVChannelLayout stereo_downmix;
1035                 stereo_downmix.order = AV_CHANNEL_ORDER_NATIVE;
1036                 stereo_downmix.nb_channels = 2;
1037                 stereo_downmix.u.mask = AV_CH_LAYOUT_STEREO_DOWNMIX;
1038
1039                 swr_free(&resampler);
1040                 resampler = nullptr;
1041                 int err = swr_alloc_set_opts2(&resampler,
1042                                               /*out_ch_layout=*/&stereo_downmix,
1043                                               /*out_sample_fmt=*/dst_format,
1044                                               /*out_sample_rate=*/OUTPUT_FREQUENCY,
1045                                               /*in_ch_layout=*/&channel_layout,
1046                                               /*in_sample_fmt=*/AVSampleFormat(audio_avframe->format),
1047                                               /*in_sample_rate=*/audio_avframe->sample_rate,
1048                                               /*log_offset=*/0,
1049                                               /*log_ctx=*/nullptr);
1050
1051                 if (err != 0 || resampler == nullptr) {
1052                         fprintf(stderr, "Allocating resampler failed.\n");
1053                         abort();
1054                 }
1055
1056                 if (swr_init(resampler) < 0) {
1057                         fprintf(stderr, "Could not open resample context.\n");
1058                         abort();
1059                 }
1060
1061                 last_src_format = AVSampleFormat(audio_avframe->format);
1062                 last_dst_format = dst_format;
1063                 last_channel_layout = channel_layout;
1064                 last_sample_rate = audio_avframe->sample_rate;
1065         }
1066
1067         size_t bytes_per_sample = (audio_format->bits_per_sample / 8) * 2;
1068         size_t num_samples_room = (audio_frame->size - audio_frame->len) / bytes_per_sample;
1069
1070         uint8_t *data = audio_frame->data + audio_frame->len;
1071         int out_samples = swr_convert(resampler, &data, num_samples_room,
1072                 const_cast<const uint8_t **>(audio_avframe->data), audio_avframe->nb_samples);
1073         if (out_samples < 0) {
1074                 fprintf(stderr, "Audio conversion failed.\n");
1075                 abort();
1076         }
1077
1078         audio_frame->len += out_samples * bytes_per_sample;
1079 }
1080
1081 VideoFormat FFmpegCapture::construct_video_format(const AVFrame *frame, AVRational video_timebase)
1082 {
1083         VideoFormat video_format;
1084         video_format.width = frame_width(frame);
1085         video_format.height = frame_height(frame);
1086         if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
1087                 video_format.stride = frame_width(frame) * 4;
1088         } else if (pixel_format == FFmpegCapture::PixelFormat_NV12) {
1089                 video_format.stride = frame_width(frame);
1090         } else {
1091                 assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
1092                 video_format.stride = frame_width(frame);
1093         }
1094         video_format.frame_rate_nom = video_timebase.den;
1095         video_format.frame_rate_den = frame->pkt_duration * video_timebase.num;
1096         video_format.has_signal = true;
1097         video_format.is_connected = true;
1098         return video_format;
1099 }
1100
1101 UniqueFrame FFmpegCapture::make_video_frame(const AVFrame *frame, const string &pathname, bool *error)
1102 {
1103         *error = false;
1104
1105         UniqueFrame video_frame(video_frame_allocator->alloc_frame());
1106         if (video_frame->data == nullptr) {
1107                 return video_frame;
1108         }
1109
1110         if (sws_ctx == nullptr ||
1111             sws_last_width != frame->width ||
1112             sws_last_height != frame->height ||
1113             sws_last_src_format != frame->format) {
1114                 sws_dst_format = decide_dst_format(AVPixelFormat(frame->format), pixel_format);
1115                 sws_ctx.reset(
1116                         sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format),
1117                                 frame_width(frame), frame_height(frame), sws_dst_format,
1118                                 SWS_BICUBIC, nullptr, nullptr, nullptr));
1119                 sws_last_width = frame->width;
1120                 sws_last_height = frame->height;
1121                 sws_last_src_format = frame->format;
1122         }
1123         if (sws_ctx == nullptr) {
1124                 fprintf(stderr, "%s: Could not create scaler context\n", pathname.c_str());
1125                 *error = true;
1126                 return video_frame;
1127         }
1128
1129         uint8_t *pic_data[4] = { nullptr, nullptr, nullptr, nullptr };
1130         int linesizes[4] = { 0, 0, 0, 0 };
1131         if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
1132                 pic_data[0] = video_frame->data;
1133                 linesizes[0] = frame_width(frame) * 4;
1134                 video_frame->len = (frame_width(frame) * 4) * frame_height(frame);
1135         } else if (pixel_format == PixelFormat_NV12) {
1136                 pic_data[0] = video_frame->data;
1137                 linesizes[0] = frame_width(frame);
1138
1139                 pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
1140                 linesizes[1] = frame_width(frame);
1141
1142                 video_frame->len = (frame_width(frame) * 2) * frame_height(frame);
1143
1144                 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
1145                 current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location);
1146         } else {
1147                 assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
1148                 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
1149
1150                 int chroma_width = AV_CEIL_RSHIFT(int(frame_width(frame)), desc->log2_chroma_w);
1151                 int chroma_height = AV_CEIL_RSHIFT(int(frame_height(frame)), desc->log2_chroma_h);
1152
1153                 pic_data[0] = video_frame->data;
1154                 linesizes[0] = frame_width(frame);
1155
1156                 pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
1157                 linesizes[1] = chroma_width;
1158
1159                 pic_data[2] = pic_data[1] + chroma_width * chroma_height;
1160                 linesizes[2] = chroma_width;
1161
1162                 video_frame->len = frame_width(frame) * frame_height(frame) + 2 * chroma_width * chroma_height;
1163
1164                 current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location);
1165         }
1166
1167         // FIXME: Currently, if the video is too high-res for one of the allocated
1168         // frames, we simply refuse to scale it here to avoid crashes. It would be better
1169         // if we could somehow signal getting larger frames, especially as 4K is a thing now.
1170         if (video_frame->len > FRAME_SIZE) {
1171                 fprintf(stderr, "%s: Decoded frame would be larger than supported FRAME_SIZE (%zu > %u), not decoding.\n", pathname.c_str(), video_frame->len, FRAME_SIZE);
1172                 *error = true;
1173                 return video_frame;
1174         }
1175
1176         sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes);
1177
1178         return video_frame;
1179 }
1180
1181 int FFmpegCapture::interrupt_cb_thunk(void *opaque)
1182 {
1183         return reinterpret_cast<FFmpegCapture *>(opaque)->interrupt_cb();
1184 }
1185
1186 int FFmpegCapture::interrupt_cb()
1187 {
1188         // If ten seconds is gone without anything happening, we assume that
1189         // we are in a network stream that died and FFmpeg just didn't
1190         // pick it up (or perhaps it just hung, keeping the connection open).
1191         // Called back approximately every 100 ms if something is hanging,
1192         // so we get more than enough accuracy for our purposes.
1193         if (!should_interrupt && frame_timeout_valid &&
1194             duration<double>(steady_clock::now() - frame_timeout_started).count() >= 10.0) {
1195                 string filename_copy;
1196                 {
1197                         lock_guard<mutex> lock(filename_mu);
1198                         filename_copy = filename;
1199                 }
1200                 fprintf(stderr, "%s: No frame for more than 10 seconds, restarting stream.\n", filename.c_str());
1201                 should_interrupt = true;
1202         }
1203         return should_interrupt.load();
1204 }
1205
1206 unsigned FFmpegCapture::frame_width(const AVFrame *frame) const
1207 {
1208         if (width == 0) {
1209                 return frame->width;
1210         } else {
1211                 return width;
1212         }
1213 }
1214
1215 unsigned FFmpegCapture::frame_height(const AVFrame *frame) const
1216 {
1217         if (height == 0) {
1218                 return frame->height;
1219         } else {
1220                 return height;
1221         }
1222 }
1223
1224 #ifdef HAVE_SRT
1225 int FFmpegCapture::read_srt_thunk(void *opaque, uint8_t *buf, int buf_size)
1226 {
1227         return reinterpret_cast<FFmpegCapture *>(opaque)->read_srt(buf, buf_size);
1228 }
1229
1230 int FFmpegCapture::read_srt(uint8_t *buf, int buf_size)
1231 {
1232         SRT_MSGCTRL mc = srt_msgctrl_default;
1233         return srt_recvmsg2(srt_sock, reinterpret_cast<char *>(buf), buf_size, &mc);
1234 }
1235 #endif