]> git.sesse.net Git - nageru/blob - nageru/ffmpeg_capture.cpp
79fba7b9ab4ebf4135e8e9f34c145e78a676e7b4
[nageru] / nageru / ffmpeg_capture.cpp
1 #include "ffmpeg_capture.h"
2
3 #include <assert.h>
4 #include <pthread.h>
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11
12 extern "C" {
13 #include <libavcodec/avcodec.h>
14 #include <libavformat/avformat.h>
15 #include <libavutil/avutil.h>
16 #include <libavutil/error.h>
17 #include <libavutil/frame.h>
18 #include <libavutil/imgutils.h>
19 #include <libavutil/mem.h>
20 #include <libavutil/pixfmt.h>
21 #include <libavutil/opt.h>
22 #include <libswscale/swscale.h>
23 }
24
25 #include <chrono>
26 #include <cstdint>
27 #include <utility>
28 #include <vector>
29 #include <unordered_set>
30
31 #include <Eigen/Core>
32 #include <Eigen/LU>
33 #include <movit/colorspace_conversion_effect.h>
34
35 #include "bmusb/bmusb.h"
36 #include "shared/ffmpeg_raii.h"
37 #include "ffmpeg_util.h"
38 #include "flags.h"
39 #include "image_input.h"
40 #include "ref_counted_frame.h"
41 #include "shared/timebase.h"
42
43 #ifdef HAVE_SRT
44 #include <srt/srt.h>
45 #endif
46
47 using namespace std;
48 using namespace std::chrono;
49 using namespace bmusb;
50 using namespace movit;
51 using namespace Eigen;
52
53 namespace {
54
55 steady_clock::time_point compute_frame_start(int64_t frame_pts, int64_t pts_origin, const AVRational &video_timebase, const steady_clock::time_point &origin, double rate)
56 {
57         const duration<double> pts((frame_pts - pts_origin) * double(video_timebase.num) / double(video_timebase.den));
58         return origin + duration_cast<steady_clock::duration>(pts / rate);
59 }
60
61 bool changed_since(const std::string &pathname, const timespec &ts)
62 {
63         if (ts.tv_sec < 0) {
64                 return false;
65         }
66         struct stat buf;
67         if (stat(pathname.c_str(), &buf) != 0) {
68                 fprintf(stderr, "%s: Couldn't check for new version, leaving the old in place.\n", pathname.c_str());
69                 return false;
70         }
71         return (buf.st_mtim.tv_sec != ts.tv_sec || buf.st_mtim.tv_nsec != ts.tv_nsec);
72 }
73
74 bool is_full_range(const AVPixFmtDescriptor *desc)
75 {
76         // This is horrible, but there's no better way that I know of.
77         return (strchr(desc->name, 'j') != nullptr);
78 }
79
80 AVPixelFormat decide_dst_format(AVPixelFormat src_format, bmusb::PixelFormat dst_format_type)
81 {
82         if (dst_format_type == bmusb::PixelFormat_8BitBGRA) {
83                 return AV_PIX_FMT_BGRA;
84         }
85         if (dst_format_type == FFmpegCapture::PixelFormat_NV12) {
86                 return AV_PIX_FMT_NV12;
87         }
88
89         assert(dst_format_type == bmusb::PixelFormat_8BitYCbCrPlanar);
90
91         // If this is a non-Y'CbCr format, just convert to 4:4:4 Y'CbCr
92         // and be done with it. It's too strange to spend a lot of time on.
93         // (Let's hope there's no alpha.)
94         const AVPixFmtDescriptor *src_desc = av_pix_fmt_desc_get(src_format);
95         if (src_desc == nullptr ||
96             src_desc->nb_components != 3 ||
97             (src_desc->flags & AV_PIX_FMT_FLAG_RGB)) {
98                 return AV_PIX_FMT_YUV444P;
99         }
100
101         // The best for us would be Cb and Cr together if possible,
102         // but FFmpeg doesn't support that except in the special case of
103         // NV12, so we need to go to planar even for the case of NV12.
104         // Thus, look for the closest (but no worse) 8-bit planar Y'CbCr format
105         // that matches in color range. (This will also include the case of
106         // the source format already being acceptable.)
107         bool src_full_range = is_full_range(src_desc);
108         const char *best_format = "yuv444p";
109         unsigned best_score = numeric_limits<unsigned>::max();
110         for (const AVPixFmtDescriptor *desc = av_pix_fmt_desc_next(nullptr);
111              desc;
112              desc = av_pix_fmt_desc_next(desc)) {
113                 // Find planar Y'CbCr formats only.
114                 if (desc->nb_components != 3) continue;
115                 if (desc->flags & AV_PIX_FMT_FLAG_RGB) continue;
116                 if (!(desc->flags & AV_PIX_FMT_FLAG_PLANAR)) continue;
117                 if (desc->comp[0].plane != 0 ||
118                     desc->comp[1].plane != 1 ||
119                     desc->comp[2].plane != 2) continue;
120
121                 // 8-bit formats only.
122                 if (desc->flags & AV_PIX_FMT_FLAG_BE) continue;
123                 if (desc->comp[0].depth != 8) continue;
124
125                 // Same or better chroma resolution only.
126                 int chroma_w_diff = src_desc->log2_chroma_w - desc->log2_chroma_w;
127                 int chroma_h_diff = src_desc->log2_chroma_h - desc->log2_chroma_h;
128                 if (chroma_w_diff < 0 || chroma_h_diff < 0)
129                         continue;
130
131                 // Matching full/limited range only.
132                 if (is_full_range(desc) != src_full_range)
133                         continue;
134
135                 // Pick something with as little excess chroma resolution as possible.
136                 unsigned score = (1 << (chroma_w_diff)) << chroma_h_diff;
137                 if (score < best_score) {
138                         best_score = score;
139                         best_format = desc->name;
140                 }
141         }
142         return av_get_pix_fmt(best_format);
143 }
144
145 YCbCrFormat decode_ycbcr_format(const AVPixFmtDescriptor *desc, const AVFrame *frame, bool is_mjpeg, AVColorSpace *last_colorspace, AVChromaLocation *last_chroma_location)
146 {
147         YCbCrFormat format;
148         AVColorSpace colorspace = frame->colorspace;
149         switch (colorspace) {
150         case AVCOL_SPC_BT709:
151                 format.luma_coefficients = YCBCR_REC_709;
152                 break;
153         case AVCOL_SPC_BT470BG:
154         case AVCOL_SPC_SMPTE170M:
155         case AVCOL_SPC_SMPTE240M:
156                 format.luma_coefficients = YCBCR_REC_601;
157                 break;
158         case AVCOL_SPC_BT2020_NCL:
159                 format.luma_coefficients = YCBCR_REC_2020;
160                 break;
161         case AVCOL_SPC_UNSPECIFIED:
162                 format.luma_coefficients = (frame->height >= 720 ? YCBCR_REC_709 : YCBCR_REC_601);
163                 break;
164         default:
165                 if (colorspace != *last_colorspace) {
166                         fprintf(stderr, "Unknown Y'CbCr coefficient enum %d from FFmpeg; choosing Rec. 709.\n",
167                                 colorspace);
168                 }
169                 format.luma_coefficients = YCBCR_REC_709;
170                 break;
171         }
172         *last_colorspace = colorspace;
173
174         format.full_range = is_full_range(desc);
175         format.num_levels = 1 << desc->comp[0].depth;
176         format.chroma_subsampling_x = 1 << desc->log2_chroma_w;
177         format.chroma_subsampling_y = 1 << desc->log2_chroma_h;
178
179         switch (frame->chroma_location) {
180         case AVCHROMA_LOC_LEFT:
181                 format.cb_x_position = 0.0;
182                 format.cb_y_position = 0.5;
183                 break;
184         case AVCHROMA_LOC_CENTER:
185                 format.cb_x_position = 0.5;
186                 format.cb_y_position = 0.5;
187                 break;
188         case AVCHROMA_LOC_TOPLEFT:
189                 format.cb_x_position = 0.0;
190                 format.cb_y_position = 0.0;
191                 break;
192         case AVCHROMA_LOC_TOP:
193                 format.cb_x_position = 0.5;
194                 format.cb_y_position = 0.0;
195                 break;
196         case AVCHROMA_LOC_BOTTOMLEFT:
197                 format.cb_x_position = 0.0;
198                 format.cb_y_position = 1.0;
199                 break;
200         case AVCHROMA_LOC_BOTTOM:
201                 format.cb_x_position = 0.5;
202                 format.cb_y_position = 1.0;
203                 break;
204         default:
205                 if (frame->chroma_location != *last_chroma_location) {
206                         fprintf(stderr, "Unknown chroma location coefficient enum %d from FFmpeg; choosing center.\n",
207                                 frame->chroma_location);
208                 }
209                 format.cb_x_position = 0.5;
210                 format.cb_y_position = 0.5;
211                 break;
212         }
213         *last_chroma_location = frame->chroma_location;
214
215         if (is_mjpeg && !format.full_range) {
216                 // Limited-range MJPEG is only detected by FFmpeg whenever a special
217                 // JPEG comment is set, which means that in practice, the stream is
218                 // almost certainly generated by Futatabi. Override FFmpeg's forced
219                 // MJPEG defaults (it disregards the values set in the mux) with what
220                 // Futatabi sets.
221                 format.luma_coefficients = YCBCR_REC_709;
222                 format.cb_x_position = 0.0;
223                 format.cb_y_position = 0.5;
224         }
225
226         format.cr_x_position = format.cb_x_position;
227         format.cr_y_position = format.cb_y_position;
228         return format;
229 }
230
231 RGBTriplet get_neutral_color(AVDictionary *metadata)
232 {
233         if (metadata == nullptr) {
234                 return RGBTriplet(1.0f, 1.0f, 1.0f);
235         }
236         AVDictionaryEntry *entry = av_dict_get(metadata, "WhitePoint", nullptr, 0);
237         if (entry == nullptr) {
238                 return RGBTriplet(1.0f, 1.0f, 1.0f);
239         }
240
241         unsigned x_nom, x_den, y_nom, y_den;
242         if (sscanf(entry->value, " %u:%u , %u:%u", &x_nom, &x_den, &y_nom, &y_den) != 4) {
243                 fprintf(stderr, "WARNING: Unable to parse white point '%s', using default white point\n", entry->value);
244                 return RGBTriplet(1.0f, 1.0f, 1.0f);
245         }
246
247         double x = double(x_nom) / x_den;
248         double y = double(y_nom) / y_den;
249         double z = 1.0 - x - y;
250
251         Matrix3d rgb_to_xyz_matrix = movit::ColorspaceConversionEffect::get_xyz_matrix(COLORSPACE_sRGB);
252         Vector3d rgb = rgb_to_xyz_matrix.inverse() * Vector3d(x, y, z);
253
254         return RGBTriplet(rgb[0], rgb[1], rgb[2]);
255 }
256
257 }  // namespace
258
259 FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
260         : filename(filename), width(width), height(height), video_timebase{1, 1}
261 {
262         description = "Video: " + filename;
263
264         last_frame = steady_clock::now();
265
266         avformat_network_init();  // In case someone wants this.
267 }
268
269 #ifdef HAVE_SRT
270 FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id)
271         : srt_sock(srt_sock),
272           width(0),  // Don't resize; SRT streams typically have stable resolution, and should behave much like regular cards in general.
273           height(0),
274           pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar),
275           video_timebase{1, 1}
276 {
277         if (stream_id.empty()) {
278                 description = "SRT stream";
279         } else {
280                 description = stream_id;
281         }
282         play_as_fast_as_possible = true;
283         play_once = true;
284         last_frame = steady_clock::now();
285 }
286 #endif
287
288 FFmpegCapture::~FFmpegCapture()
289 {
290         if (has_dequeue_callbacks) {
291                 dequeue_cleanup_callback();
292         }
293         swr_free(&resampler);
294 #ifdef HAVE_SRT
295         if (srt_sock != -1) {
296                 srt_close(srt_sock);
297         }
298 #endif
299 }
300
301 void FFmpegCapture::configure_card()
302 {
303         if (video_frame_allocator == nullptr) {
304                 owned_video_frame_allocator.reset(new MallocFrameAllocator(FRAME_SIZE, NUM_QUEUED_VIDEO_FRAMES));
305                 set_video_frame_allocator(owned_video_frame_allocator.get());
306         }
307         if (audio_frame_allocator == nullptr) {
308                 // Audio can come out in pretty large chunks, so increase from the default 1 MB.
309                 owned_audio_frame_allocator.reset(new MallocFrameAllocator(1 << 20, NUM_QUEUED_AUDIO_FRAMES));
310                 set_audio_frame_allocator(owned_audio_frame_allocator.get());
311         }
312 }
313
314 void FFmpegCapture::start_bm_capture()
315 {
316         if (running) {
317                 return;
318         }
319         running = true;
320         producer_thread_should_quit.unquit();
321         producer_thread = thread(&FFmpegCapture::producer_thread_func, this);
322 }
323
324 void FFmpegCapture::stop_dequeue_thread()
325 {
326         if (!running) {
327                 return;
328         }
329         running = false;
330         producer_thread_should_quit.quit();
331         producer_thread.join();
332 }
333
334 std::map<uint32_t, VideoMode> FFmpegCapture::get_available_video_modes() const
335 {
336         // Note: This will never really be shown in the UI.
337         VideoMode mode;
338
339         char buf[256];
340         snprintf(buf, sizeof(buf), "%ux%u", sws_last_width, sws_last_height);
341         mode.name = buf;
342         
343         mode.autodetect = false;
344         mode.width = sws_last_width;
345         mode.height = sws_last_height;
346         mode.frame_rate_num = 60;
347         mode.frame_rate_den = 1;
348         mode.interlaced = false;
349
350         return {{ 0, mode }};
351 }
352
353 void FFmpegCapture::producer_thread_func()
354 {
355         char thread_name[16];
356         snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
357         pthread_setname_np(pthread_self(), thread_name);
358
359         while (!producer_thread_should_quit.should_quit()) {
360                 string filename_copy;
361                 {
362                         lock_guard<mutex> lock(filename_mu);
363                         filename_copy = filename;
364                 }
365
366                 string pathname;
367                 if (srt_sock == -1) {
368                         pathname = search_for_file(filename_copy);
369                 } else {
370                         pathname = description;
371                 }
372                 if (pathname.empty()) {
373                         send_disconnected_frame();
374                         if (play_once) {
375                                 break;
376                         }
377                         producer_thread_should_quit.sleep_for(seconds(1));
378                         fprintf(stderr, "%s not found, sleeping one second and trying again...\n", filename_copy.c_str());
379                         continue;
380                 }
381                 should_interrupt = false;
382                 if (!play_video(pathname)) {
383                         // Error.
384                         send_disconnected_frame();
385                         if (play_once) {
386                                 break;
387                         }
388                         fprintf(stderr, "Error when playing %s, sleeping one second and trying again...\n", pathname.c_str());
389                         producer_thread_should_quit.sleep_for(seconds(1));
390                         continue;
391                 }
392
393                 if (play_once) {
394                         send_disconnected_frame();
395                         break;
396                 }
397
398                 // Probably just EOF, will exit the loop above on next test.
399         }
400
401         if (has_dequeue_callbacks) {
402                 dequeue_cleanup_callback();
403                 has_dequeue_callbacks = false;
404         }
405 }
406
407 void FFmpegCapture::send_disconnected_frame()
408 {
409         // Send an empty frame to signal that we have no signal anymore.
410         FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame();
411         size_t frame_width = width == 0 ? global_flags.width : width;
412         size_t frame_height = height == 0 ? global_flags.height : height;
413         if (video_frame.data) {
414                 VideoFormat video_format;
415                 video_format.width = frame_width;
416                 video_format.height = frame_height;
417                 video_format.frame_rate_nom = 60;
418                 video_format.frame_rate_den = 1;
419                 video_format.is_connected = false;
420                 if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
421                         video_format.stride = frame_width * 4;
422                         video_frame.len = frame_width * frame_height * 4;
423                         memset(video_frame.data, 0, video_frame.len);
424                 } else {
425                         video_format.stride = frame_width;
426                         current_frame_ycbcr_format.luma_coefficients = YCBCR_REC_709;
427                         current_frame_ycbcr_format.full_range = true;
428                         current_frame_ycbcr_format.num_levels = 256;
429                         current_frame_ycbcr_format.chroma_subsampling_x = 2;
430                         current_frame_ycbcr_format.chroma_subsampling_y = 2;
431                         current_frame_ycbcr_format.cb_x_position = 0.0f;
432                         current_frame_ycbcr_format.cb_y_position = 0.0f;
433                         current_frame_ycbcr_format.cr_x_position = 0.0f;
434                         current_frame_ycbcr_format.cr_y_position = 0.0f;
435                         video_frame.len = frame_width * frame_height * 2;
436                         memset(video_frame.data, 0, frame_width * frame_height);
437                         memset(video_frame.data + frame_width * frame_height, 128, frame_width * frame_height);  // Valid for both NV12 and planar.
438                 }
439
440                 if (frame_callback != nullptr) {
441                         frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
442                                 video_frame, /*video_offset=*/0, video_format,
443                                 FrameAllocator::Frame(), /*audio_offset=*/0, AudioFormat());
444                 }
445                 last_frame_was_connected = false;
446         }
447
448         if (play_once) {
449                 disconnected = true;
450                 if (card_disconnected_callback != nullptr) {
451                         card_disconnected_callback();
452                 }
453         }
454 }
455
456 template<AVHWDeviceType type>
457 AVPixelFormat get_hw_format(AVCodecContext *ctx, const AVPixelFormat *fmt)
458 {
459         bool found_config_of_right_type = false;
460         for (int i = 0;; ++i) {  // Termination condition inside loop.
461                 const AVCodecHWConfig *config = avcodec_get_hw_config(ctx->codec, i);
462                 if (config == nullptr) {  // End of list.
463                         break;
464                 }
465                 if (!(config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) ||
466                     config->device_type != type) {
467                         // Not interesting for us.
468                         continue;
469                 }
470
471                 // We have a config of the right type, but does it actually support
472                 // the pixel format we want? (Seemingly, FFmpeg's way of signaling errors
473                 // is to just replace the pixel format with a software-decoded one,
474                 // such as yuv420p.)
475                 found_config_of_right_type = true;
476                 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
477                         if (config->pix_fmt == *fmt_ptr) {
478                                 fprintf(stderr, "Initialized '%s' hardware decoding for codec '%s'.\n",
479                                         av_hwdevice_get_type_name(type), ctx->codec->name);
480                                 if (ctx->profile == FF_PROFILE_H264_BASELINE) {
481                                         fprintf(stderr, "WARNING: Stream claims to be H.264 Baseline, which is generally poorly supported in hardware decoders.\n");
482                                         fprintf(stderr, "         Consider encoding it as Constrained Baseline, Main or High instead.\n");
483                                         fprintf(stderr, "         Decoding might fail and fall back to software.\n");
484                                 }
485                                 return config->pix_fmt;
486                         }
487                 }
488                 fprintf(stderr, "Decoder '%s' supports only these pixel formats:", ctx->codec->name);
489                 unordered_set<AVPixelFormat> seen;
490                 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
491                         if (!seen.count(*fmt_ptr)) {
492                                 fprintf(stderr, " %s", av_get_pix_fmt_name(*fmt_ptr));
493                                 seen.insert(*fmt_ptr);
494                         }
495                 }
496                 fprintf(stderr, " (wanted %s for hardware acceleration)\n", av_get_pix_fmt_name(config->pix_fmt));
497
498         }
499
500         if (!found_config_of_right_type) {
501                 fprintf(stderr, "Decoder '%s' does not support device type '%s'.\n", ctx->codec->name, av_hwdevice_get_type_name(type));
502         }
503
504         // We found no VA-API formats, so take the first software format.
505         for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
506                 if ((av_pix_fmt_desc_get(*fmt_ptr)->flags & AV_PIX_FMT_FLAG_HWACCEL) == 0) {
507                         fprintf(stderr, "Falling back to software format %s.\n", av_get_pix_fmt_name(*fmt_ptr));
508                         return *fmt_ptr;
509                 }
510         }
511
512         // Fallback: Just return anything. (Should never really happen.)
513         return fmt[0];
514 }
515
516 bool FFmpegCapture::play_video(const string &pathname)
517 {
518         // Note: Call before open, not after; otherwise, there's a race.
519         // (There is now, too, but it tips the correct way. We could use fstat()
520         // if we had the file descriptor.)
521         timespec last_modified;
522         struct stat buf;
523         if (stat(pathname.c_str(), &buf) != 0) {
524                 // Probably some sort of protocol, so can't stat.
525                 last_modified.tv_sec = -1;
526         } else {
527                 last_modified = buf.st_mtim;
528         }
529         last_colorspace = static_cast<AVColorSpace>(-1);
530         last_chroma_location = static_cast<AVChromaLocation>(-1);
531
532         AVFormatContextWithCloser format_ctx;
533         if (srt_sock == -1) {
534                 // Regular file.
535                 format_ctx = avformat_open_input_unique(pathname.c_str(), /*fmt=*/nullptr,
536                         /*options=*/nullptr,
537                         AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
538         } else {
539 #ifdef HAVE_SRT
540                 // SRT socket, already opened.
541                 const AVInputFormat *mpegts_fmt = av_find_input_format("mpegts");
542                 format_ctx = avformat_open_input_unique(&FFmpegCapture::read_srt_thunk, this,
543                         mpegts_fmt, /*options=*/nullptr,
544                         AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
545 #else
546                 assert(false);
547 #endif
548         }
549         if (format_ctx == nullptr) {
550                 fprintf(stderr, "%s: Error opening file\n", pathname.c_str());
551                 return false;
552         }
553
554         if (avformat_find_stream_info(format_ctx.get(), nullptr) < 0) {
555                 fprintf(stderr, "%s: Error finding stream info\n", pathname.c_str());
556                 return false;
557         }
558
559         int video_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_VIDEO);
560         if (video_stream_index == -1) {
561                 fprintf(stderr, "%s: No video stream found\n", pathname.c_str());
562                 return false;
563         }
564
565         int audio_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_AUDIO);
566         int subtitle_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_SUBTITLE);
567         has_last_subtitle = false;
568
569         // Open video decoder.
570         const AVCodecParameters *video_codecpar = format_ctx->streams[video_stream_index]->codecpar;
571         const AVCodec *video_codec = avcodec_find_decoder(video_codecpar->codec_id);
572
573         video_timebase = format_ctx->streams[video_stream_index]->time_base;
574         AVCodecContextWithDeleter video_codec_ctx = avcodec_alloc_context3_unique(nullptr);
575         if (avcodec_parameters_to_context(video_codec_ctx.get(), video_codecpar) < 0) {
576                 fprintf(stderr, "%s: Cannot fill video codec parameters\n", pathname.c_str());
577                 return false;
578         }
579         if (video_codec == nullptr) {
580                 fprintf(stderr, "%s: Cannot find video decoder\n", pathname.c_str());
581                 return false;
582         }
583
584         // Seemingly, it's not too easy to make something that just initializes
585         // “whatever goes”, so we don't get CUDA or VULKAN or whatever here
586         // without enumerating through several different types.
587         // VA-API and VDPAU will do for now. We prioritize VDPAU for the
588         // simple reason that there's a VA-API-via-VDPAU emulation for NVidia
589         // cards that seems to work, but just hangs when trying to transfer the frame.
590         //
591         // Note that we don't actually check codec support beforehand,
592         // so if you have a low-end VDPAU device but a high-end VA-API device,
593         // you lose out on the extra codec support from the latter.
594         AVBufferRef *hw_device_ctx = nullptr;
595         if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VDPAU, nullptr, nullptr, 0) >= 0) {
596                 video_codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
597                 video_codec_ctx->get_format = get_hw_format<AV_HWDEVICE_TYPE_VDPAU>;
598         } else if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, nullptr, nullptr, 0) >= 0) {
599                 video_codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
600                 video_codec_ctx->get_format = get_hw_format<AV_HWDEVICE_TYPE_VAAPI>;
601         } else {
602                 fprintf(stderr, "Failed to initialize VA-API or VDPAU for FFmpeg acceleration. Decoding video in software.\n");
603         }
604
605         if (avcodec_open2(video_codec_ctx.get(), video_codec, nullptr) < 0) {
606                 fprintf(stderr, "%s: Cannot open video decoder\n", pathname.c_str());
607                 return false;
608         }
609         unique_ptr<AVCodecContext, decltype(avcodec_close)*> video_codec_ctx_cleanup(
610                 video_codec_ctx.get(), avcodec_close);
611
612         // Used in decode_ycbcr_format().
613         is_mjpeg = video_codecpar->codec_id == AV_CODEC_ID_MJPEG;
614
615         // Open audio decoder, if we have audio.
616         AVCodecContextWithDeleter audio_codec_ctx;
617         if (audio_stream_index != -1) {
618                 audio_codec_ctx = avcodec_alloc_context3_unique(nullptr);
619                 const AVCodecParameters *audio_codecpar = format_ctx->streams[audio_stream_index]->codecpar;
620                 audio_timebase = format_ctx->streams[audio_stream_index]->time_base;
621                 if (avcodec_parameters_to_context(audio_codec_ctx.get(), audio_codecpar) < 0) {
622                         fprintf(stderr, "%s: Cannot fill audio codec parameters\n", pathname.c_str());
623                         return false;
624                 }
625                 const AVCodec *audio_codec = avcodec_find_decoder(audio_codecpar->codec_id);
626                 if (audio_codec == nullptr) {
627                         fprintf(stderr, "%s: Cannot find audio decoder\n", pathname.c_str());
628                         return false;
629                 }
630                 if (avcodec_open2(audio_codec_ctx.get(), audio_codec, nullptr) < 0) {
631                         fprintf(stderr, "%s: Cannot open audio decoder\n", pathname.c_str());
632                         return false;
633                 }
634         }
635         unique_ptr<AVCodecContext, decltype(avcodec_close)*> audio_codec_ctx_cleanup(
636                 audio_codec_ctx.get(), avcodec_close);
637
638         internal_rewind();
639
640         // Main loop.
641         bool first_frame = true;
642         int consecutive_errors = 0;
643         while (!producer_thread_should_quit.should_quit()) {
644                 if (process_queued_commands(format_ctx.get(), pathname, last_modified, /*rewound=*/nullptr)) {
645                         return true;
646                 }
647                 if (should_interrupt.load()) {
648                         // Check as a failsafe, so that we don't need to rely on avio if we don't have to.
649                         return false;
650                 }
651                 UniqueFrame audio_frame = audio_frame_allocator->alloc_frame();
652                 AudioFormat audio_format;
653
654                 int64_t audio_pts;
655                 bool error;
656                 AVFrameWithDeleter frame = decode_frame(format_ctx.get(), video_codec_ctx.get(), audio_codec_ctx.get(),
657                         pathname, video_stream_index, audio_stream_index, subtitle_stream_index, audio_frame.get(), &audio_format, &audio_pts, &error);
658                 if (error) {
659                         if (++consecutive_errors >= 100) {
660                                 fprintf(stderr, "More than 100 consecutive error video frames, aborting playback.\n");
661                                 return false;
662                         } else {
663                                 continue;
664                         }
665                 } else {
666                         consecutive_errors = 0;
667                 }
668                 if (frame == nullptr) {
669                         // EOF. Loop back to the start if we can.
670                         if (format_ctx->pb != nullptr && format_ctx->pb->seekable == 0) {
671                                 // Not seekable (but seemingly, sometimes av_seek_frame() would return 0 anyway,
672                                 // so don't try).
673                                 return true;
674                         }
675                         if (av_seek_frame(format_ctx.get(), /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
676                                 fprintf(stderr, "%s: Rewind failed, not looping.\n", pathname.c_str());
677                                 return true;
678                         }
679                         if (video_codec_ctx != nullptr) {
680                                 avcodec_flush_buffers(video_codec_ctx.get());
681                         }
682                         if (audio_codec_ctx != nullptr) {
683                                 avcodec_flush_buffers(audio_codec_ctx.get());
684                         }
685                         // If the file has changed since last time, return to get it reloaded.
686                         // Note that depending on how you move the file into place, you might
687                         // end up corrupting the one you're already playing, so this path
688                         // might not trigger.
689                         if (changed_since(pathname, last_modified)) {
690                                 return true;
691                         }
692                         internal_rewind();
693                         continue;
694                 }
695
696                 VideoFormat video_format = construct_video_format(frame.get(), video_timebase);
697                 if (video_format.frame_rate_nom == 0 || video_format.frame_rate_den == 0) {
698                         // Invalid frame rate; try constructing it from the previous frame length.
699                         // (This is especially important if we are the master card, for SRT,
700                         // since it affects audio. Not all senders have good timebases
701                         // (e.g., Larix rounds first to timebase 1000 and then multiplies by
702                         // 90 from there, it seems), but it's much better to have an oscillating
703                         // value than just locking at 60.
704                         if (last_pts != 0 && frame->pts > last_pts) {
705                                 int64_t pts_diff = frame->pts - last_pts;
706                                 video_format.frame_rate_nom = video_timebase.den;
707                                 video_format.frame_rate_den = video_timebase.num * pts_diff;
708                         } else {
709                                 video_format.frame_rate_nom = 60;
710                                 video_format.frame_rate_den = 1;
711                         }
712                 }
713                 UniqueFrame video_frame = make_video_frame(frame.get(), pathname, &error);
714                 if (error) {
715                         return false;
716                 }
717
718                 for ( ;; ) {
719                         if (last_pts == 0 && pts_origin == 0) {
720                                 pts_origin = frame->pts;        
721                         }
722                         steady_clock::time_point now = steady_clock::now();
723                         if (play_as_fast_as_possible) {
724                                 video_frame->received_timestamp = now;
725                                 audio_frame->received_timestamp = now;
726                                 next_frame_start = now;
727                         } else {
728                                 next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
729                                 if (first_frame && last_frame_was_connected) {
730                                         // If reconnect took more than one second, this is probably a live feed,
731                                         // and we should reset the resampler. (Or the rate is really, really low,
732                                         // in which case a reset on the first frame is fine anyway.)
733                                         if (duration<double>(next_frame_start - last_frame).count() >= 1.0) {
734                                                 last_frame_was_connected = false;
735                                         }
736                                 }
737                                 video_frame->received_timestamp = next_frame_start;
738
739                                 // The easiest way to get all the rate conversions etc. right is to move the
740                                 // audio PTS into the video PTS timebase and go from there. (We'll get some
741                                 // rounding issues, but they should not be a big problem.)
742                                 int64_t audio_pts_as_video_pts = av_rescale_q(audio_pts, audio_timebase, video_timebase);
743                                 audio_frame->received_timestamp = compute_frame_start(audio_pts_as_video_pts, pts_origin, video_timebase, start, rate);
744
745                                 if (audio_frame->len != 0) {
746                                         // The received timestamps in Nageru are measured after we've just received the frame.
747                                         // However, pts (especially audio pts) is at the _beginning_ of the frame.
748                                         // If we have locked audio, the distinction doesn't really matter, as pts is
749                                         // on a relative scale and a fixed offset is fine. But if we don't, we will have
750                                         // a different number of samples each time, which will cause huge audio jitter
751                                         // and throw off the resampler.
752                                         //
753                                         // In a sense, we should have compensated by adding the frame and audio lengths
754                                         // to video_frame->received_timestamp and audio_frame->received_timestamp respectively,
755                                         // but that would mean extra waiting in sleep_until(). All we need is that they
756                                         // are correct relative to each other, though (and to the other frames we send),
757                                         // so just align the end of the audio frame, and we're fine.
758                                         size_t num_samples = (audio_frame->len * 8) / audio_format.bits_per_sample / audio_format.num_channels;
759                                         double offset = double(num_samples) / OUTPUT_FREQUENCY -
760                                                 double(video_format.frame_rate_den) / video_format.frame_rate_nom;
761                                         audio_frame->received_timestamp += duration_cast<steady_clock::duration>(duration<double>(offset));
762                                 }
763
764                                 if (duration<double>(now - next_frame_start).count() >= 0.1) {
765                                         // If we don't have enough CPU to keep up, or if we have a live stream
766                                         // where the initial origin was somehow wrong, we could be behind indefinitely.
767                                         // In particular, this will give the audio resampler problems as it tries
768                                         // to speed up to reduce the delay, hitting the low end of the buffer every time.
769                                         fprintf(stderr, "%s: Playback %.0f ms behind, resetting time scale\n",
770                                                 pathname.c_str(),
771                                                 1e3 * duration<double>(now - next_frame_start).count());
772                                         pts_origin = frame->pts;
773                                         start = next_frame_start = now;
774                                         timecode += TYPICAL_FPS * 2 + 1;
775                                 }
776                         }
777                         bool finished_wakeup;
778                         if (play_as_fast_as_possible) {
779                                 finished_wakeup = !producer_thread_should_quit.should_quit();
780                         } else {
781                                 finished_wakeup = producer_thread_should_quit.sleep_until(next_frame_start);
782                         }
783                         if (finished_wakeup) {
784                                 if (audio_frame->len > 0) {
785                                         assert(audio_pts != -1);
786                                 }
787                                 if (!last_frame_was_connected) {
788                                         // We're recovering from an error (or really slow load, see above).
789                                         // Make sure to get the audio resampler reset. (This is a hack;
790                                         // ideally, the frame callback should just accept a way to signal
791                                         // audio discontinuity.)
792                                         timecode += TYPICAL_FPS * 2 + 1;
793                                 }
794                                 last_neutral_color = get_neutral_color(frame->metadata);
795                                 if (frame_callback != nullptr) {
796                                         frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
797                                                 video_frame.get_and_release(), 0, video_format,
798                                                 audio_frame.get_and_release(), 0, audio_format);
799                                 }
800                                 first_frame = false;
801                                 last_frame = steady_clock::now();
802                                 last_frame_was_connected = true;
803                                 break;
804                         } else {
805                                 if (producer_thread_should_quit.should_quit()) break;
806
807                                 bool rewound = false;
808                                 if (process_queued_commands(format_ctx.get(), pathname, last_modified, &rewound)) {
809                                         return true;
810                                 }
811                                 // If we just rewound, drop this frame on the floor and be done.
812                                 if (rewound) {
813                                         break;
814                                 }
815                                 // OK, we didn't, so probably a rate change. Recalculate next_frame_start,
816                                 // but if it's now in the past, we'll reset the origin, so that we don't
817                                 // generate a huge backlog of frames that we need to run through quickly.
818                                 next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
819                                 steady_clock::time_point now = steady_clock::now();
820                                 if (next_frame_start < now) {
821                                         pts_origin = frame->pts;
822                                         start = next_frame_start = now;
823                                 }
824                         }
825                 }
826                 last_pts = frame->pts;
827         }
828         return true;
829 }
830
831 void FFmpegCapture::internal_rewind()
832 {                               
833         pts_origin = last_pts = 0;
834         start = next_frame_start = steady_clock::now();
835 }
836
837 bool FFmpegCapture::process_queued_commands(AVFormatContext *format_ctx, const std::string &pathname, timespec last_modified, bool *rewound)
838 {
839         // Process any queued commands from other threads.
840         vector<QueuedCommand> commands;
841         {
842                 lock_guard<mutex> lock(queue_mu);
843                 swap(commands, command_queue);
844         }
845         for (const QueuedCommand &cmd : commands) {
846                 switch (cmd.command) {
847                 case QueuedCommand::REWIND:
848                         if (av_seek_frame(format_ctx, /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
849                                 fprintf(stderr, "%s: Rewind failed, stopping play.\n", pathname.c_str());
850                         }
851                         // If the file has changed since last time, return to get it reloaded.
852                         // Note that depending on how you move the file into place, you might
853                         // end up corrupting the one you're already playing, so this path
854                         // might not trigger.
855                         if (changed_since(pathname, last_modified)) {
856                                 return true;
857                         }
858                         internal_rewind();
859                         if (rewound != nullptr) {
860                                 *rewound = true;
861                         }
862                         break;
863
864                 case QueuedCommand::CHANGE_RATE:
865                         // Change the origin to the last played frame.
866                         start = compute_frame_start(last_pts, pts_origin, video_timebase, start, rate);
867                         pts_origin = last_pts;
868                         rate = cmd.new_rate;
869                         play_as_fast_as_possible = (rate >= 10.0);
870                         break;
871                 }
872         }
873         return false;
874 }
875
876 namespace {
877
878 }  // namespace
879
880 AVFrameWithDeleter FFmpegCapture::decode_frame(AVFormatContext *format_ctx, AVCodecContext *video_codec_ctx, AVCodecContext *audio_codec_ctx,
881         const std::string &pathname, int video_stream_index, int audio_stream_index, int subtitle_stream_index,
882         FrameAllocator::Frame *audio_frame, AudioFormat *audio_format, int64_t *audio_pts, bool *error)
883 {
884         *error = false;
885
886         // Read packets until we have a frame or there are none left.
887         bool frame_finished = false;
888         AVFrameWithDeleter audio_avframe = av_frame_alloc_unique();
889         AVFrameWithDeleter video_avframe = av_frame_alloc_unique();
890         bool eof = false;
891         *audio_pts = -1;
892         bool has_audio = false;
893         do {
894                 AVPacket pkt;
895                 unique_ptr<AVPacket, decltype(av_packet_unref)*> pkt_cleanup(
896                         &pkt, av_packet_unref);
897                 av_init_packet(&pkt);
898                 pkt.data = nullptr;
899                 pkt.size = 0;
900                 if (av_read_frame(format_ctx, &pkt) == 0) {
901                         if (pkt.stream_index == audio_stream_index && audio_callback != nullptr) {
902                                 audio_callback(&pkt, format_ctx->streams[audio_stream_index]->time_base);
903                         }
904                         if (pkt.stream_index == video_stream_index && video_callback != nullptr) {
905                                 video_callback(&pkt, format_ctx->streams[video_stream_index]->time_base);
906                         }
907                         if (pkt.stream_index == video_stream_index && global_flags.transcode_video) {
908                                 if (avcodec_send_packet(video_codec_ctx, &pkt) < 0) {
909                                         fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str());
910                                         *error = true;
911                                         return AVFrameWithDeleter(nullptr);
912                                 }
913                         } else if (pkt.stream_index == audio_stream_index && global_flags.transcode_audio) {
914                                 has_audio = true;
915                                 if (avcodec_send_packet(audio_codec_ctx, &pkt) < 0) {
916                                         fprintf(stderr, "%s: Cannot send packet to audio codec.\n", pathname.c_str());
917                                         *error = true;
918                                         return AVFrameWithDeleter(nullptr);
919                                 }
920                         } else if (pkt.stream_index == subtitle_stream_index) {
921                                 last_subtitle = string(reinterpret_cast<const char *>(pkt.data), pkt.size);
922                                 has_last_subtitle = true;
923                         }
924                 } else {
925                         eof = true;  // Or error, but ignore that for the time being.
926                 }
927
928                 // Decode audio, if any.
929                 if (has_audio) {
930                         for ( ;; ) {
931                                 int err = avcodec_receive_frame(audio_codec_ctx, audio_avframe.get());
932                                 if (err == 0) {
933                                         if (*audio_pts == -1) {
934                                                 *audio_pts = audio_avframe->pts;
935                                         }
936                                         convert_audio(audio_avframe.get(), audio_frame, audio_format);
937                                 } else if (err == AVERROR(EAGAIN)) {
938                                         break;
939                                 } else {
940                                         fprintf(stderr, "%s: Cannot receive frame from audio codec.\n", pathname.c_str());
941                                         *error = true;
942                                         return AVFrameWithDeleter(nullptr);
943                                 }
944                         }
945                 }
946
947                 // Decode video, if we have a frame.
948                 int err = avcodec_receive_frame(video_codec_ctx, video_avframe.get());
949                 if (err == 0) {
950                         if (video_avframe->format == AV_PIX_FMT_VAAPI ||
951                             video_avframe->format == AV_PIX_FMT_VDPAU) {
952                                 // Get the frame down to the CPU. (TODO: See if we can keep it
953                                 // on the GPU all the way, since it will be going up again later.
954                                 // However, this only works if the OpenGL GPU is the same one.)
955                                 AVFrameWithDeleter sw_frame = av_frame_alloc_unique();
956                                 int err = av_hwframe_transfer_data(sw_frame.get(), video_avframe.get(), 0);
957                                 if (err != 0) {
958                                         fprintf(stderr, "%s: Cannot transfer hardware video frame to software.\n", pathname.c_str());
959                                         *error = true;
960                                         return AVFrameWithDeleter(nullptr);
961                                 }
962                                 sw_frame->pts = video_avframe->pts;
963                                 sw_frame->pkt_duration = video_avframe->pkt_duration;
964                                 video_avframe = move(sw_frame);
965                         }
966                         frame_finished = true;
967                         break;
968                 } else if (err != AVERROR(EAGAIN)) {
969                         fprintf(stderr, "%s: Cannot receive frame from video codec.\n", pathname.c_str());
970                         *error = true;
971                         return AVFrameWithDeleter(nullptr);
972                 }
973         } while (!eof);
974
975         if (frame_finished)
976                 return video_avframe;
977         else
978                 return AVFrameWithDeleter(nullptr);
979 }
980
981 void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator::Frame *audio_frame, AudioFormat *audio_format)
982 {
983         // Decide on a format. If there already is one in this audio frame,
984         // we're pretty much forced to use it. If not, we try to find an exact match.
985         // If that still doesn't work, we default to 32-bit signed chunked
986         // (float would be nice, but there's really no way to signal that yet).
987         AVSampleFormat dst_format;
988         if (audio_format->bits_per_sample == 0) {
989                 switch (audio_avframe->format) {
990                 case AV_SAMPLE_FMT_S16:
991                 case AV_SAMPLE_FMT_S16P:
992                         audio_format->bits_per_sample = 16;
993                         dst_format = AV_SAMPLE_FMT_S16;
994                         break;
995                 case AV_SAMPLE_FMT_S32:
996                 case AV_SAMPLE_FMT_S32P:
997                 default:
998                         audio_format->bits_per_sample = 32;
999                         dst_format = AV_SAMPLE_FMT_S32;
1000                         break;
1001                 }
1002         } else if (audio_format->bits_per_sample == 16) {
1003                 dst_format = AV_SAMPLE_FMT_S16;
1004         } else if (audio_format->bits_per_sample == 32) {
1005                 dst_format = AV_SAMPLE_FMT_S32;
1006         } else {
1007                 assert(false);
1008         }
1009         audio_format->num_channels = 2;
1010
1011         int64_t channel_layout = audio_avframe->channel_layout;
1012         if (channel_layout == 0) {
1013                 channel_layout = av_get_default_channel_layout(audio_avframe->channels);
1014         }
1015
1016         if (resampler == nullptr ||
1017             audio_avframe->format != last_src_format ||
1018             dst_format != last_dst_format ||
1019             channel_layout != last_channel_layout ||
1020             audio_avframe->sample_rate != last_sample_rate) {
1021                 swr_free(&resampler);
1022                 resampler = swr_alloc_set_opts(nullptr,
1023                                                /*out_ch_layout=*/AV_CH_LAYOUT_STEREO_DOWNMIX,
1024                                                /*out_sample_fmt=*/dst_format,
1025                                                /*out_sample_rate=*/OUTPUT_FREQUENCY,
1026                                                /*in_ch_layout=*/channel_layout,
1027                                                /*in_sample_fmt=*/AVSampleFormat(audio_avframe->format),
1028                                                /*in_sample_rate=*/audio_avframe->sample_rate,
1029                                                /*log_offset=*/0,
1030                                                /*log_ctx=*/nullptr);
1031
1032                 if (resampler == nullptr) {
1033                         fprintf(stderr, "Allocating resampler failed.\n");
1034                         abort();
1035                 }
1036
1037                 if (swr_init(resampler) < 0) {
1038                         fprintf(stderr, "Could not open resample context.\n");
1039                         abort();
1040                 }
1041
1042                 last_src_format = AVSampleFormat(audio_avframe->format);
1043                 last_dst_format = dst_format;
1044                 last_channel_layout = channel_layout;
1045                 last_sample_rate = audio_avframe->sample_rate;
1046         }
1047
1048         size_t bytes_per_sample = (audio_format->bits_per_sample / 8) * 2;
1049         size_t num_samples_room = (audio_frame->size - audio_frame->len) / bytes_per_sample;
1050
1051         uint8_t *data = audio_frame->data + audio_frame->len;
1052         int out_samples = swr_convert(resampler, &data, num_samples_room,
1053                 const_cast<const uint8_t **>(audio_avframe->data), audio_avframe->nb_samples);
1054         if (out_samples < 0) {
1055                 fprintf(stderr, "Audio conversion failed.\n");
1056                 abort();
1057         }
1058
1059         audio_frame->len += out_samples * bytes_per_sample;
1060 }
1061
1062 VideoFormat FFmpegCapture::construct_video_format(const AVFrame *frame, AVRational video_timebase)
1063 {
1064         VideoFormat video_format;
1065         video_format.width = frame_width(frame);
1066         video_format.height = frame_height(frame);
1067         if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
1068                 video_format.stride = frame_width(frame) * 4;
1069         } else if (pixel_format == FFmpegCapture::PixelFormat_NV12) {
1070                 video_format.stride = frame_width(frame);
1071         } else {
1072                 assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
1073                 video_format.stride = frame_width(frame);
1074         }
1075         video_format.frame_rate_nom = video_timebase.den;
1076         video_format.frame_rate_den = frame->pkt_duration * video_timebase.num;
1077         video_format.has_signal = true;
1078         video_format.is_connected = true;
1079         return video_format;
1080 }
1081
1082 UniqueFrame FFmpegCapture::make_video_frame(const AVFrame *frame, const string &pathname, bool *error)
1083 {
1084         *error = false;
1085
1086         UniqueFrame video_frame(video_frame_allocator->alloc_frame());
1087         if (video_frame->data == nullptr) {
1088                 return video_frame;
1089         }
1090
1091         if (sws_ctx == nullptr ||
1092             sws_last_width != frame->width ||
1093             sws_last_height != frame->height ||
1094             sws_last_src_format != frame->format) {
1095                 sws_dst_format = decide_dst_format(AVPixelFormat(frame->format), pixel_format);
1096                 sws_ctx.reset(
1097                         sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format),
1098                                 frame_width(frame), frame_height(frame), sws_dst_format,
1099                                 SWS_BICUBIC, nullptr, nullptr, nullptr));
1100                 sws_last_width = frame->width;
1101                 sws_last_height = frame->height;
1102                 sws_last_src_format = frame->format;
1103         }
1104         if (sws_ctx == nullptr) {
1105                 fprintf(stderr, "%s: Could not create scaler context\n", pathname.c_str());
1106                 *error = true;
1107                 return video_frame;
1108         }
1109
1110         uint8_t *pic_data[4] = { nullptr, nullptr, nullptr, nullptr };
1111         int linesizes[4] = { 0, 0, 0, 0 };
1112         if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
1113                 pic_data[0] = video_frame->data;
1114                 linesizes[0] = frame_width(frame) * 4;
1115                 video_frame->len = (frame_width(frame) * 4) * frame_height(frame);
1116         } else if (pixel_format == PixelFormat_NV12) {
1117                 pic_data[0] = video_frame->data;
1118                 linesizes[0] = frame_width(frame);
1119
1120                 pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
1121                 linesizes[1] = frame_width(frame);
1122
1123                 video_frame->len = (frame_width(frame) * 2) * frame_height(frame);
1124
1125                 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
1126                 current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location);
1127         } else {
1128                 assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
1129                 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
1130
1131                 int chroma_width = AV_CEIL_RSHIFT(int(frame_width(frame)), desc->log2_chroma_w);
1132                 int chroma_height = AV_CEIL_RSHIFT(int(frame_height(frame)), desc->log2_chroma_h);
1133
1134                 pic_data[0] = video_frame->data;
1135                 linesizes[0] = frame_width(frame);
1136
1137                 pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
1138                 linesizes[1] = chroma_width;
1139
1140                 pic_data[2] = pic_data[1] + chroma_width * chroma_height;
1141                 linesizes[2] = chroma_width;
1142
1143                 video_frame->len = frame_width(frame) * frame_height(frame) + 2 * chroma_width * chroma_height;
1144
1145                 current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location);
1146         }
1147
1148         // FIXME: Currently, if the video is too high-res for one of the allocated
1149         // frames, we simply refuse to scale it here to avoid crashes. It would be better
1150         // if we could somehow signal getting larger frames, especially as 4K is a thing now.
1151         if (video_frame->len > FRAME_SIZE) {
1152                 fprintf(stderr, "%s: Decoded frame would be larger than supported FRAME_SIZE (%zu > %u), not decoding.\n", pathname.c_str(), video_frame->len, FRAME_SIZE);
1153                 *error = true;
1154                 return video_frame;
1155         }
1156
1157         sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes);
1158
1159         return video_frame;
1160 }
1161
1162 int FFmpegCapture::interrupt_cb_thunk(void *opaque)
1163 {
1164         return reinterpret_cast<FFmpegCapture *>(opaque)->interrupt_cb();
1165 }
1166
1167 int FFmpegCapture::interrupt_cb()
1168 {
1169         return should_interrupt.load();
1170 }
1171
1172 unsigned FFmpegCapture::frame_width(const AVFrame *frame) const
1173 {
1174         if (width == 0) {
1175                 return frame->width;
1176         } else {
1177                 return width;
1178         }
1179 }
1180
1181 unsigned FFmpegCapture::frame_height(const AVFrame *frame) const
1182 {
1183         if (height == 0) {
1184                 return frame->height;
1185         } else {
1186                 return height;
1187         }
1188 }
1189
1190 #ifdef HAVE_SRT
1191 int FFmpegCapture::read_srt_thunk(void *opaque, uint8_t *buf, int buf_size)
1192 {
1193         return reinterpret_cast<FFmpegCapture *>(opaque)->read_srt(buf, buf_size);
1194 }
1195
1196 int FFmpegCapture::read_srt(uint8_t *buf, int buf_size)
1197 {
1198         SRT_MSGCTRL mc = srt_msgctrl_default;
1199         return srt_recvmsg2(srt_sock, reinterpret_cast<char *>(buf), buf_size, &mc);
1200 }
1201 #endif