]> git.sesse.net Git - nageru/blob - nageru/ffmpeg_capture.cpp
Improve selection of software formats on hwaccel fallback.
[nageru] / nageru / ffmpeg_capture.cpp
1 #include "ffmpeg_capture.h"
2
3 #include <assert.h>
4 #include <pthread.h>
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11
12 extern "C" {
13 #include <libavcodec/avcodec.h>
14 #include <libavformat/avformat.h>
15 #include <libavutil/avutil.h>
16 #include <libavutil/error.h>
17 #include <libavutil/frame.h>
18 #include <libavutil/imgutils.h>
19 #include <libavutil/mem.h>
20 #include <libavutil/pixfmt.h>
21 #include <libavutil/opt.h>
22 #include <libswscale/swscale.h>
23 }
24
25 #include <chrono>
26 #include <cstdint>
27 #include <utility>
28 #include <vector>
29 #include <unordered_set>
30
31 #include <Eigen/Core>
32 #include <Eigen/LU>
33 #include <movit/colorspace_conversion_effect.h>
34
35 #include "bmusb/bmusb.h"
36 #include "shared/ffmpeg_raii.h"
37 #include "ffmpeg_util.h"
38 #include "flags.h"
39 #include "image_input.h"
40 #include "ref_counted_frame.h"
41 #include "shared/timebase.h"
42
43 #ifdef HAVE_SRT
44 #include <srt/srt.h>
45 #endif
46
47 #define FRAME_SIZE (8 << 20)  // 8 MB.
48
49 using namespace std;
50 using namespace std::chrono;
51 using namespace bmusb;
52 using namespace movit;
53 using namespace Eigen;
54
55 namespace {
56
57 steady_clock::time_point compute_frame_start(int64_t frame_pts, int64_t pts_origin, const AVRational &video_timebase, const steady_clock::time_point &origin, double rate)
58 {
59         const duration<double> pts((frame_pts - pts_origin) * double(video_timebase.num) / double(video_timebase.den));
60         return origin + duration_cast<steady_clock::duration>(pts / rate);
61 }
62
63 bool changed_since(const std::string &pathname, const timespec &ts)
64 {
65         if (ts.tv_sec < 0) {
66                 return false;
67         }
68         struct stat buf;
69         if (stat(pathname.c_str(), &buf) != 0) {
70                 fprintf(stderr, "%s: Couldn't check for new version, leaving the old in place.\n", pathname.c_str());
71                 return false;
72         }
73         return (buf.st_mtim.tv_sec != ts.tv_sec || buf.st_mtim.tv_nsec != ts.tv_nsec);
74 }
75
76 bool is_full_range(const AVPixFmtDescriptor *desc)
77 {
78         // This is horrible, but there's no better way that I know of.
79         return (strchr(desc->name, 'j') != nullptr);
80 }
81
82 AVPixelFormat decide_dst_format(AVPixelFormat src_format, bmusb::PixelFormat dst_format_type)
83 {
84         if (dst_format_type == bmusb::PixelFormat_8BitBGRA) {
85                 return AV_PIX_FMT_BGRA;
86         }
87         if (dst_format_type == FFmpegCapture::PixelFormat_NV12) {
88                 return AV_PIX_FMT_NV12;
89         }
90
91         assert(dst_format_type == bmusb::PixelFormat_8BitYCbCrPlanar);
92
93         // If this is a non-Y'CbCr format, just convert to 4:4:4 Y'CbCr
94         // and be done with it. It's too strange to spend a lot of time on.
95         // (Let's hope there's no alpha.)
96         const AVPixFmtDescriptor *src_desc = av_pix_fmt_desc_get(src_format);
97         if (src_desc == nullptr ||
98             src_desc->nb_components != 3 ||
99             (src_desc->flags & AV_PIX_FMT_FLAG_RGB)) {
100                 return AV_PIX_FMT_YUV444P;
101         }
102
103         // The best for us would be Cb and Cr together if possible,
104         // but FFmpeg doesn't support that except in the special case of
105         // NV12, so we need to go to planar even for the case of NV12.
106         // Thus, look for the closest (but no worse) 8-bit planar Y'CbCr format
107         // that matches in color range. (This will also include the case of
108         // the source format already being acceptable.)
109         bool src_full_range = is_full_range(src_desc);
110         const char *best_format = "yuv444p";
111         unsigned best_score = numeric_limits<unsigned>::max();
112         for (const AVPixFmtDescriptor *desc = av_pix_fmt_desc_next(nullptr);
113              desc;
114              desc = av_pix_fmt_desc_next(desc)) {
115                 // Find planar Y'CbCr formats only.
116                 if (desc->nb_components != 3) continue;
117                 if (desc->flags & AV_PIX_FMT_FLAG_RGB) continue;
118                 if (!(desc->flags & AV_PIX_FMT_FLAG_PLANAR)) continue;
119                 if (desc->comp[0].plane != 0 ||
120                     desc->comp[1].plane != 1 ||
121                     desc->comp[2].plane != 2) continue;
122
123                 // 8-bit formats only.
124                 if (desc->flags & AV_PIX_FMT_FLAG_BE) continue;
125                 if (desc->comp[0].depth != 8) continue;
126
127                 // Same or better chroma resolution only.
128                 int chroma_w_diff = desc->log2_chroma_w - src_desc->log2_chroma_w;
129                 int chroma_h_diff = desc->log2_chroma_h - src_desc->log2_chroma_h;
130                 if (chroma_w_diff < 0 || chroma_h_diff < 0)
131                         continue;
132
133                 // Matching full/limited range only.
134                 if (is_full_range(desc) != src_full_range)
135                         continue;
136
137                 // Pick something with as little excess chroma resolution as possible.
138                 unsigned score = (1 << (chroma_w_diff)) << chroma_h_diff;
139                 if (score < best_score) {
140                         best_score = score;
141                         best_format = desc->name;
142                 }
143         }
144         return av_get_pix_fmt(best_format);
145 }
146
147 YCbCrFormat decode_ycbcr_format(const AVPixFmtDescriptor *desc, const AVFrame *frame, bool is_mjpeg, AVColorSpace *last_colorspace, AVChromaLocation *last_chroma_location)
148 {
149         YCbCrFormat format;
150         AVColorSpace colorspace = frame->colorspace;
151         switch (colorspace) {
152         case AVCOL_SPC_BT709:
153                 format.luma_coefficients = YCBCR_REC_709;
154                 break;
155         case AVCOL_SPC_BT470BG:
156         case AVCOL_SPC_SMPTE170M:
157         case AVCOL_SPC_SMPTE240M:
158                 format.luma_coefficients = YCBCR_REC_601;
159                 break;
160         case AVCOL_SPC_BT2020_NCL:
161                 format.luma_coefficients = YCBCR_REC_2020;
162                 break;
163         case AVCOL_SPC_UNSPECIFIED:
164                 format.luma_coefficients = (frame->height >= 720 ? YCBCR_REC_709 : YCBCR_REC_601);
165                 break;
166         default:
167                 if (colorspace != *last_colorspace) {
168                         fprintf(stderr, "Unknown Y'CbCr coefficient enum %d from FFmpeg; choosing Rec. 709.\n",
169                                 colorspace);
170                 }
171                 format.luma_coefficients = YCBCR_REC_709;
172                 break;
173         }
174         *last_colorspace = colorspace;
175
176         format.full_range = is_full_range(desc);
177         format.num_levels = 1 << desc->comp[0].depth;
178         format.chroma_subsampling_x = 1 << desc->log2_chroma_w;
179         format.chroma_subsampling_y = 1 << desc->log2_chroma_h;
180
181         switch (frame->chroma_location) {
182         case AVCHROMA_LOC_LEFT:
183                 format.cb_x_position = 0.0;
184                 format.cb_y_position = 0.5;
185                 break;
186         case AVCHROMA_LOC_CENTER:
187                 format.cb_x_position = 0.5;
188                 format.cb_y_position = 0.5;
189                 break;
190         case AVCHROMA_LOC_TOPLEFT:
191                 format.cb_x_position = 0.0;
192                 format.cb_y_position = 0.0;
193                 break;
194         case AVCHROMA_LOC_TOP:
195                 format.cb_x_position = 0.5;
196                 format.cb_y_position = 0.0;
197                 break;
198         case AVCHROMA_LOC_BOTTOMLEFT:
199                 format.cb_x_position = 0.0;
200                 format.cb_y_position = 1.0;
201                 break;
202         case AVCHROMA_LOC_BOTTOM:
203                 format.cb_x_position = 0.5;
204                 format.cb_y_position = 1.0;
205                 break;
206         default:
207                 if (frame->chroma_location != *last_chroma_location) {
208                         fprintf(stderr, "Unknown chroma location coefficient enum %d from FFmpeg; choosing center.\n",
209                                 frame->chroma_location);
210                 }
211                 format.cb_x_position = 0.5;
212                 format.cb_y_position = 0.5;
213                 break;
214         }
215         *last_chroma_location = frame->chroma_location;
216
217         if (is_mjpeg && !format.full_range) {
218                 // Limited-range MJPEG is only detected by FFmpeg whenever a special
219                 // JPEG comment is set, which means that in practice, the stream is
220                 // almost certainly generated by Futatabi. Override FFmpeg's forced
221                 // MJPEG defaults (it disregards the values set in the mux) with what
222                 // Futatabi sets.
223                 format.luma_coefficients = YCBCR_REC_709;
224                 format.cb_x_position = 0.0;
225                 format.cb_y_position = 0.5;
226         }
227
228         format.cr_x_position = format.cb_x_position;
229         format.cr_y_position = format.cb_y_position;
230         return format;
231 }
232
233 RGBTriplet get_neutral_color(AVDictionary *metadata)
234 {
235         if (metadata == nullptr) {
236                 return RGBTriplet(1.0f, 1.0f, 1.0f);
237         }
238         AVDictionaryEntry *entry = av_dict_get(metadata, "WhitePoint", nullptr, 0);
239         if (entry == nullptr) {
240                 return RGBTriplet(1.0f, 1.0f, 1.0f);
241         }
242
243         unsigned x_nom, x_den, y_nom, y_den;
244         if (sscanf(entry->value, " %u:%u , %u:%u", &x_nom, &x_den, &y_nom, &y_den) != 4) {
245                 fprintf(stderr, "WARNING: Unable to parse white point '%s', using default white point\n", entry->value);
246                 return RGBTriplet(1.0f, 1.0f, 1.0f);
247         }
248
249         double x = double(x_nom) / x_den;
250         double y = double(y_nom) / y_den;
251         double z = 1.0 - x - y;
252
253         Matrix3d rgb_to_xyz_matrix = movit::ColorspaceConversionEffect::get_xyz_matrix(COLORSPACE_sRGB);
254         Vector3d rgb = rgb_to_xyz_matrix.inverse() * Vector3d(x, y, z);
255
256         return RGBTriplet(rgb[0], rgb[1], rgb[2]);
257 }
258
259 }  // namespace
260
261 FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
262         : filename(filename), width(width), height(height), video_timebase{1, 1}
263 {
264         description = "Video: " + filename;
265
266         last_frame = steady_clock::now();
267
268         avformat_network_init();  // In case someone wants this.
269 }
270
271 #ifdef HAVE_SRT
272 FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id)
273         : srt_sock(srt_sock),
274           width(0),  // Don't resize; SRT streams typically have stable resolution, and should behave much like regular cards in general.
275           height(0),
276           pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar),
277           video_timebase{1, 1}
278 {
279         if (stream_id.empty()) {
280                 description = "SRT stream";
281         } else {
282                 description = stream_id;
283         }
284         play_as_fast_as_possible = true;
285         play_once = true;
286         last_frame = steady_clock::now();
287 }
288 #endif
289
290 FFmpegCapture::~FFmpegCapture()
291 {
292         if (has_dequeue_callbacks) {
293                 dequeue_cleanup_callback();
294         }
295         swr_free(&resampler);
296 #ifdef HAVE_SRT
297         if (srt_sock != -1) {
298                 srt_close(srt_sock);
299         }
300 #endif
301 }
302
303 void FFmpegCapture::configure_card()
304 {
305         if (video_frame_allocator == nullptr) {
306                 owned_video_frame_allocator.reset(new MallocFrameAllocator(FRAME_SIZE, NUM_QUEUED_VIDEO_FRAMES));
307                 set_video_frame_allocator(owned_video_frame_allocator.get());
308         }
309         if (audio_frame_allocator == nullptr) {
310                 // Audio can come out in pretty large chunks, so increase from the default 1 MB.
311                 owned_audio_frame_allocator.reset(new MallocFrameAllocator(1 << 20, NUM_QUEUED_AUDIO_FRAMES));
312                 set_audio_frame_allocator(owned_audio_frame_allocator.get());
313         }
314 }
315
316 void FFmpegCapture::start_bm_capture()
317 {
318         if (running) {
319                 return;
320         }
321         running = true;
322         producer_thread_should_quit.unquit();
323         producer_thread = thread(&FFmpegCapture::producer_thread_func, this);
324 }
325
326 void FFmpegCapture::stop_dequeue_thread()
327 {
328         if (!running) {
329                 return;
330         }
331         running = false;
332         producer_thread_should_quit.quit();
333         producer_thread.join();
334 }
335
336 std::map<uint32_t, VideoMode> FFmpegCapture::get_available_video_modes() const
337 {
338         // Note: This will never really be shown in the UI.
339         VideoMode mode;
340
341         char buf[256];
342         snprintf(buf, sizeof(buf), "%ux%u", sws_last_width, sws_last_height);
343         mode.name = buf;
344         
345         mode.autodetect = false;
346         mode.width = sws_last_width;
347         mode.height = sws_last_height;
348         mode.frame_rate_num = 60;
349         mode.frame_rate_den = 1;
350         mode.interlaced = false;
351
352         return {{ 0, mode }};
353 }
354
355 void FFmpegCapture::producer_thread_func()
356 {
357         char thread_name[16];
358         snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
359         pthread_setname_np(pthread_self(), thread_name);
360
361         while (!producer_thread_should_quit.should_quit()) {
362                 string filename_copy;
363                 {
364                         lock_guard<mutex> lock(filename_mu);
365                         filename_copy = filename;
366                 }
367
368                 string pathname;
369                 if (srt_sock == -1) {
370                         pathname = search_for_file(filename_copy);
371                 } else {
372                         pathname = description;
373                 }
374                 if (pathname.empty()) {
375                         send_disconnected_frame();
376                         if (play_once) {
377                                 break;
378                         }
379                         producer_thread_should_quit.sleep_for(seconds(1));
380                         fprintf(stderr, "%s not found, sleeping one second and trying again...\n", filename_copy.c_str());
381                         continue;
382                 }
383                 should_interrupt = false;
384                 if (!play_video(pathname)) {
385                         // Error.
386                         send_disconnected_frame();
387                         if (play_once) {
388                                 break;
389                         }
390                         fprintf(stderr, "Error when playing %s, sleeping one second and trying again...\n", pathname.c_str());
391                         producer_thread_should_quit.sleep_for(seconds(1));
392                         continue;
393                 }
394
395                 if (play_once) {
396                         send_disconnected_frame();
397                         break;
398                 }
399
400                 // Probably just EOF, will exit the loop above on next test.
401         }
402
403         if (has_dequeue_callbacks) {
404                 dequeue_cleanup_callback();
405                 has_dequeue_callbacks = false;
406         }
407 }
408
409 void FFmpegCapture::send_disconnected_frame()
410 {
411         // Send an empty frame to signal that we have no signal anymore.
412         FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame();
413         size_t frame_width = width == 0 ? global_flags.width : width;
414         size_t frame_height = height == 0 ? global_flags.height : height;
415         if (video_frame.data) {
416                 VideoFormat video_format;
417                 video_format.width = frame_width;
418                 video_format.height = frame_height;
419                 video_format.frame_rate_nom = 60;
420                 video_format.frame_rate_den = 1;
421                 video_format.is_connected = false;
422                 if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
423                         video_format.stride = frame_width * 4;
424                         video_frame.len = frame_width * frame_height * 4;
425                         memset(video_frame.data, 0, video_frame.len);
426                 } else {
427                         video_format.stride = frame_width;
428                         current_frame_ycbcr_format.luma_coefficients = YCBCR_REC_709;
429                         current_frame_ycbcr_format.full_range = true;
430                         current_frame_ycbcr_format.num_levels = 256;
431                         current_frame_ycbcr_format.chroma_subsampling_x = 2;
432                         current_frame_ycbcr_format.chroma_subsampling_y = 2;
433                         current_frame_ycbcr_format.cb_x_position = 0.0f;
434                         current_frame_ycbcr_format.cb_y_position = 0.0f;
435                         current_frame_ycbcr_format.cr_x_position = 0.0f;
436                         current_frame_ycbcr_format.cr_y_position = 0.0f;
437                         video_frame.len = frame_width * frame_height * 2;
438                         memset(video_frame.data, 0, frame_width * frame_height);
439                         memset(video_frame.data + frame_width * frame_height, 128, frame_width * frame_height);  // Valid for both NV12 and planar.
440                 }
441
442                 if (frame_callback != nullptr) {
443                         frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
444                                 video_frame, /*video_offset=*/0, video_format,
445                                 FrameAllocator::Frame(), /*audio_offset=*/0, AudioFormat());
446                 }
447                 last_frame_was_connected = false;
448         }
449
450         if (play_once) {
451                 disconnected = true;
452                 if (card_disconnected_callback != nullptr) {
453                         card_disconnected_callback();
454                 }
455         }
456 }
457
458 template<AVHWDeviceType type>
459 AVPixelFormat get_hw_format(AVCodecContext *ctx, const AVPixelFormat *fmt)
460 {
461         bool found_config_of_right_type = false;
462         for (int i = 0;; ++i) {  // Termination condition inside loop.
463                 const AVCodecHWConfig *config = avcodec_get_hw_config(ctx->codec, i);
464                 if (config == nullptr) {  // End of list.
465                         break;
466                 }
467                 if (!(config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) ||
468                     config->device_type != type) {
469                         // Not interesting for us.
470                         continue;
471                 }
472
473                 // We have a config of the right type, but does it actually support
474                 // the pixel format we want? (Seemingly, FFmpeg's way of signaling errors
475                 // is to just replace the pixel format with a software-decoded one,
476                 // such as yuv420p.)
477                 found_config_of_right_type = true;
478                 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
479                         if (config->pix_fmt == *fmt_ptr) {
480                                 fprintf(stderr, "Initialized '%s' hardware decoding for codec '%s'.\n",
481                                         av_hwdevice_get_type_name(type), ctx->codec->name);
482                                 if (ctx->profile == FF_PROFILE_H264_BASELINE) {
483                                         fprintf(stderr, "WARNING: Stream claims to be H.264 Baseline, which is generally poorly supported in hardware decoders.\n");
484                                         fprintf(stderr, "         Consider encoding it as Constrained Baseline, Main or High instead.\n");
485                                         fprintf(stderr, "         Decoding might fail and fall back to software.\n");
486                                 }
487                                 return config->pix_fmt;
488                         }
489                 }
490                 fprintf(stderr, "Decoder '%s' supports only these pixel formats:", ctx->codec->name);
491                 unordered_set<AVPixelFormat> seen;
492                 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
493                         if (!seen.count(*fmt_ptr)) {
494                                 fprintf(stderr, " %s", av_get_pix_fmt_name(*fmt_ptr));
495                                 seen.insert(*fmt_ptr);
496                         }
497                 }
498                 fprintf(stderr, " (wanted %s for hardware acceleration)\n", av_get_pix_fmt_name(config->pix_fmt));
499
500         }
501
502         if (!found_config_of_right_type) {
503                 fprintf(stderr, "Decoder '%s' does not support device type '%s'.\n", ctx->codec->name, av_hwdevice_get_type_name(type));
504         }
505
506         // We found no VA-API formats, so take the first software format.
507         for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
508                 if ((av_pix_fmt_desc_get(*fmt_ptr)->flags & AV_PIX_FMT_FLAG_HWACCEL) == 0) {
509                         fprintf(stderr, "Falling back to software format %s.\n", av_get_pix_fmt_name(*fmt_ptr));
510                         return *fmt_ptr;
511                 }
512         }
513
514         // Fallback: Just return anything. (Should never really happen.)
515         return fmt[0];
516 }
517
518 bool FFmpegCapture::play_video(const string &pathname)
519 {
520         // Note: Call before open, not after; otherwise, there's a race.
521         // (There is now, too, but it tips the correct way. We could use fstat()
522         // if we had the file descriptor.)
523         timespec last_modified;
524         struct stat buf;
525         if (stat(pathname.c_str(), &buf) != 0) {
526                 // Probably some sort of protocol, so can't stat.
527                 last_modified.tv_sec = -1;
528         } else {
529                 last_modified = buf.st_mtim;
530         }
531         last_colorspace = static_cast<AVColorSpace>(-1);
532         last_chroma_location = static_cast<AVChromaLocation>(-1);
533
534         AVFormatContextWithCloser format_ctx;
535         if (srt_sock == -1) {
536                 // Regular file.
537                 format_ctx = avformat_open_input_unique(pathname.c_str(), /*fmt=*/nullptr,
538                         /*options=*/nullptr,
539                         AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
540         } else {
541 #ifdef HAVE_SRT
542                 // SRT socket, already opened.
543                 const AVInputFormat *mpegts_fmt = av_find_input_format("mpegts");
544                 format_ctx = avformat_open_input_unique(&FFmpegCapture::read_srt_thunk, this,
545                         mpegts_fmt, /*options=*/nullptr,
546                         AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
547 #else
548                 assert(false);
549 #endif
550         }
551         if (format_ctx == nullptr) {
552                 fprintf(stderr, "%s: Error opening file\n", pathname.c_str());
553                 return false;
554         }
555
556         if (avformat_find_stream_info(format_ctx.get(), nullptr) < 0) {
557                 fprintf(stderr, "%s: Error finding stream info\n", pathname.c_str());
558                 return false;
559         }
560
561         int video_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_VIDEO);
562         if (video_stream_index == -1) {
563                 fprintf(stderr, "%s: No video stream found\n", pathname.c_str());
564                 return false;
565         }
566
567         int audio_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_AUDIO);
568         int subtitle_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_SUBTITLE);
569         has_last_subtitle = false;
570
571         // Open video decoder.
572         const AVCodecParameters *video_codecpar = format_ctx->streams[video_stream_index]->codecpar;
573         const AVCodec *video_codec = avcodec_find_decoder(video_codecpar->codec_id);
574
575         video_timebase = format_ctx->streams[video_stream_index]->time_base;
576         AVCodecContextWithDeleter video_codec_ctx = avcodec_alloc_context3_unique(nullptr);
577         if (avcodec_parameters_to_context(video_codec_ctx.get(), video_codecpar) < 0) {
578                 fprintf(stderr, "%s: Cannot fill video codec parameters\n", pathname.c_str());
579                 return false;
580         }
581         if (video_codec == nullptr) {
582                 fprintf(stderr, "%s: Cannot find video decoder\n", pathname.c_str());
583                 return false;
584         }
585
586         // Seemingly, it's not too easy to make something that just initializes
587         // “whatever goes”, so we don't get CUDA or VULKAN or whatever here
588         // without enumerating through several different types.
589         // VA-API and VDPAU will do for now. We prioritize VDPAU for the
590         // simple reason that there's a VA-API-via-VDPAU emulation for NVidia
591         // cards that seems to work, but just hangs when trying to transfer the frame.
592         //
593         // Note that we don't actually check codec support beforehand,
594         // so if you have a low-end VDPAU device but a high-end VA-API device,
595         // you lose out on the extra codec support from the latter.
596         AVBufferRef *hw_device_ctx = nullptr;
597         if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VDPAU, nullptr, nullptr, 0) >= 0) {
598                 video_codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
599                 video_codec_ctx->get_format = get_hw_format<AV_HWDEVICE_TYPE_VDPAU>;
600         } else if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, nullptr, nullptr, 0) >= 0) {
601                 video_codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
602                 video_codec_ctx->get_format = get_hw_format<AV_HWDEVICE_TYPE_VAAPI>;
603         } else {
604                 fprintf(stderr, "Failed to initialize VA-API or VDPAU for FFmpeg acceleration. Decoding video in software.\n");
605         }
606
607         if (avcodec_open2(video_codec_ctx.get(), video_codec, nullptr) < 0) {
608                 fprintf(stderr, "%s: Cannot open video decoder\n", pathname.c_str());
609                 return false;
610         }
611         unique_ptr<AVCodecContext, decltype(avcodec_close)*> video_codec_ctx_cleanup(
612                 video_codec_ctx.get(), avcodec_close);
613
614         // Used in decode_ycbcr_format().
615         is_mjpeg = video_codecpar->codec_id == AV_CODEC_ID_MJPEG;
616
617         // Open audio decoder, if we have audio.
618         AVCodecContextWithDeleter audio_codec_ctx;
619         if (audio_stream_index != -1) {
620                 audio_codec_ctx = avcodec_alloc_context3_unique(nullptr);
621                 const AVCodecParameters *audio_codecpar = format_ctx->streams[audio_stream_index]->codecpar;
622                 audio_timebase = format_ctx->streams[audio_stream_index]->time_base;
623                 if (avcodec_parameters_to_context(audio_codec_ctx.get(), audio_codecpar) < 0) {
624                         fprintf(stderr, "%s: Cannot fill audio codec parameters\n", pathname.c_str());
625                         return false;
626                 }
627                 const AVCodec *audio_codec = avcodec_find_decoder(audio_codecpar->codec_id);
628                 if (audio_codec == nullptr) {
629                         fprintf(stderr, "%s: Cannot find audio decoder\n", pathname.c_str());
630                         return false;
631                 }
632                 if (avcodec_open2(audio_codec_ctx.get(), audio_codec, nullptr) < 0) {
633                         fprintf(stderr, "%s: Cannot open audio decoder\n", pathname.c_str());
634                         return false;
635                 }
636         }
637         unique_ptr<AVCodecContext, decltype(avcodec_close)*> audio_codec_ctx_cleanup(
638                 audio_codec_ctx.get(), avcodec_close);
639
640         internal_rewind();
641
642         // Main loop.
643         bool first_frame = true;
644         int consecutive_errors = 0;
645         while (!producer_thread_should_quit.should_quit()) {
646                 if (process_queued_commands(format_ctx.get(), pathname, last_modified, /*rewound=*/nullptr)) {
647                         return true;
648                 }
649                 if (should_interrupt.load()) {
650                         // Check as a failsafe, so that we don't need to rely on avio if we don't have to.
651                         return false;
652                 }
653                 UniqueFrame audio_frame = audio_frame_allocator->alloc_frame();
654                 AudioFormat audio_format;
655
656                 int64_t audio_pts;
657                 bool error;
658                 AVFrameWithDeleter frame = decode_frame(format_ctx.get(), video_codec_ctx.get(), audio_codec_ctx.get(),
659                         pathname, video_stream_index, audio_stream_index, subtitle_stream_index, audio_frame.get(), &audio_format, &audio_pts, &error);
660                 if (error) {
661                         if (++consecutive_errors >= 100) {
662                                 fprintf(stderr, "More than 100 consecutive video frames, aborting playback.\n");
663                                 return false;
664                         } else {
665                                 continue;
666                         }
667                 } else {
668                         consecutive_errors = 0;
669                 }
670                 if (frame == nullptr) {
671                         // EOF. Loop back to the start if we can.
672                         if (format_ctx->pb != nullptr && format_ctx->pb->seekable == 0) {
673                                 // Not seekable (but seemingly, sometimes av_seek_frame() would return 0 anyway,
674                                 // so don't try).
675                                 return true;
676                         }
677                         if (av_seek_frame(format_ctx.get(), /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
678                                 fprintf(stderr, "%s: Rewind failed, not looping.\n", pathname.c_str());
679                                 return true;
680                         }
681                         if (video_codec_ctx != nullptr) {
682                                 avcodec_flush_buffers(video_codec_ctx.get());
683                         }
684                         if (audio_codec_ctx != nullptr) {
685                                 avcodec_flush_buffers(audio_codec_ctx.get());
686                         }
687                         // If the file has changed since last time, return to get it reloaded.
688                         // Note that depending on how you move the file into place, you might
689                         // end up corrupting the one you're already playing, so this path
690                         // might not trigger.
691                         if (changed_since(pathname, last_modified)) {
692                                 return true;
693                         }
694                         internal_rewind();
695                         continue;
696                 }
697
698                 VideoFormat video_format = construct_video_format(frame.get(), video_timebase);
699                 if (video_format.frame_rate_nom == 0 || video_format.frame_rate_den == 0) {
700                         // Invalid frame rate; try constructing it from the previous frame length.
701                         // (This is especially important if we are the master card, for SRT,
702                         // since it affects audio. Not all senders have good timebases
703                         // (e.g., Larix rounds first to timebase 1000 and then multiplies by
704                         // 90 from there, it seems), but it's much better to have an oscillating
705                         // value than just locking at 60.
706                         if (last_pts != 0 && frame->pts > last_pts) {
707                                 int64_t pts_diff = frame->pts - last_pts;
708                                 video_format.frame_rate_nom = video_timebase.den;
709                                 video_format.frame_rate_den = video_timebase.num * pts_diff;
710                         } else {
711                                 video_format.frame_rate_nom = 60;
712                                 video_format.frame_rate_den = 1;
713                         }
714                 }
715                 UniqueFrame video_frame = make_video_frame(frame.get(), pathname, &error);
716                 if (error) {
717                         return false;
718                 }
719
720                 for ( ;; ) {
721                         if (last_pts == 0 && pts_origin == 0) {
722                                 pts_origin = frame->pts;        
723                         }
724                         steady_clock::time_point now = steady_clock::now();
725                         if (play_as_fast_as_possible) {
726                                 video_frame->received_timestamp = now;
727                                 audio_frame->received_timestamp = now;
728                                 next_frame_start = now;
729                         } else {
730                                 next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
731                                 if (first_frame && last_frame_was_connected) {
732                                         // If reconnect took more than one second, this is probably a live feed,
733                                         // and we should reset the resampler. (Or the rate is really, really low,
734                                         // in which case a reset on the first frame is fine anyway.)
735                                         if (duration<double>(next_frame_start - last_frame).count() >= 1.0) {
736                                                 last_frame_was_connected = false;
737                                         }
738                                 }
739                                 video_frame->received_timestamp = next_frame_start;
740
741                                 // The easiest way to get all the rate conversions etc. right is to move the
742                                 // audio PTS into the video PTS timebase and go from there. (We'll get some
743                                 // rounding issues, but they should not be a big problem.)
744                                 int64_t audio_pts_as_video_pts = av_rescale_q(audio_pts, audio_timebase, video_timebase);
745                                 audio_frame->received_timestamp = compute_frame_start(audio_pts_as_video_pts, pts_origin, video_timebase, start, rate);
746
747                                 if (audio_frame->len != 0) {
748                                         // The received timestamps in Nageru are measured after we've just received the frame.
749                                         // However, pts (especially audio pts) is at the _beginning_ of the frame.
750                                         // If we have locked audio, the distinction doesn't really matter, as pts is
751                                         // on a relative scale and a fixed offset is fine. But if we don't, we will have
752                                         // a different number of samples each time, which will cause huge audio jitter
753                                         // and throw off the resampler.
754                                         //
755                                         // In a sense, we should have compensated by adding the frame and audio lengths
756                                         // to video_frame->received_timestamp and audio_frame->received_timestamp respectively,
757                                         // but that would mean extra waiting in sleep_until(). All we need is that they
758                                         // are correct relative to each other, though (and to the other frames we send),
759                                         // so just align the end of the audio frame, and we're fine.
760                                         size_t num_samples = (audio_frame->len * 8) / audio_format.bits_per_sample / audio_format.num_channels;
761                                         double offset = double(num_samples) / OUTPUT_FREQUENCY -
762                                                 double(video_format.frame_rate_den) / video_format.frame_rate_nom;
763                                         audio_frame->received_timestamp += duration_cast<steady_clock::duration>(duration<double>(offset));
764                                 }
765
766                                 if (duration<double>(now - next_frame_start).count() >= 0.1) {
767                                         // If we don't have enough CPU to keep up, or if we have a live stream
768                                         // where the initial origin was somehow wrong, we could be behind indefinitely.
769                                         // In particular, this will give the audio resampler problems as it tries
770                                         // to speed up to reduce the delay, hitting the low end of the buffer every time.
771                                         fprintf(stderr, "%s: Playback %.0f ms behind, resetting time scale\n",
772                                                 pathname.c_str(),
773                                                 1e3 * duration<double>(now - next_frame_start).count());
774                                         pts_origin = frame->pts;
775                                         start = next_frame_start = now;
776                                         timecode += TYPICAL_FPS * 2 + 1;
777                                 }
778                         }
779                         bool finished_wakeup;
780                         if (play_as_fast_as_possible) {
781                                 finished_wakeup = !producer_thread_should_quit.should_quit();
782                         } else {
783                                 finished_wakeup = producer_thread_should_quit.sleep_until(next_frame_start);
784                         }
785                         if (finished_wakeup) {
786                                 if (audio_frame->len > 0) {
787                                         assert(audio_pts != -1);
788                                 }
789                                 if (!last_frame_was_connected) {
790                                         // We're recovering from an error (or really slow load, see above).
791                                         // Make sure to get the audio resampler reset. (This is a hack;
792                                         // ideally, the frame callback should just accept a way to signal
793                                         // audio discontinuity.)
794                                         timecode += TYPICAL_FPS * 2 + 1;
795                                 }
796                                 last_neutral_color = get_neutral_color(frame->metadata);
797                                 if (frame_callback != nullptr) {
798                                         frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
799                                                 video_frame.get_and_release(), 0, video_format,
800                                                 audio_frame.get_and_release(), 0, audio_format);
801                                 }
802                                 first_frame = false;
803                                 last_frame = steady_clock::now();
804                                 last_frame_was_connected = true;
805                                 break;
806                         } else {
807                                 if (producer_thread_should_quit.should_quit()) break;
808
809                                 bool rewound = false;
810                                 if (process_queued_commands(format_ctx.get(), pathname, last_modified, &rewound)) {
811                                         return true;
812                                 }
813                                 // If we just rewound, drop this frame on the floor and be done.
814                                 if (rewound) {
815                                         break;
816                                 }
817                                 // OK, we didn't, so probably a rate change. Recalculate next_frame_start,
818                                 // but if it's now in the past, we'll reset the origin, so that we don't
819                                 // generate a huge backlog of frames that we need to run through quickly.
820                                 next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
821                                 steady_clock::time_point now = steady_clock::now();
822                                 if (next_frame_start < now) {
823                                         pts_origin = frame->pts;
824                                         start = next_frame_start = now;
825                                 }
826                         }
827                 }
828                 last_pts = frame->pts;
829         }
830         return true;
831 }
832
833 void FFmpegCapture::internal_rewind()
834 {                               
835         pts_origin = last_pts = 0;
836         start = next_frame_start = steady_clock::now();
837 }
838
839 bool FFmpegCapture::process_queued_commands(AVFormatContext *format_ctx, const std::string &pathname, timespec last_modified, bool *rewound)
840 {
841         // Process any queued commands from other threads.
842         vector<QueuedCommand> commands;
843         {
844                 lock_guard<mutex> lock(queue_mu);
845                 swap(commands, command_queue);
846         }
847         for (const QueuedCommand &cmd : commands) {
848                 switch (cmd.command) {
849                 case QueuedCommand::REWIND:
850                         if (av_seek_frame(format_ctx, /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
851                                 fprintf(stderr, "%s: Rewind failed, stopping play.\n", pathname.c_str());
852                         }
853                         // If the file has changed since last time, return to get it reloaded.
854                         // Note that depending on how you move the file into place, you might
855                         // end up corrupting the one you're already playing, so this path
856                         // might not trigger.
857                         if (changed_since(pathname, last_modified)) {
858                                 return true;
859                         }
860                         internal_rewind();
861                         if (rewound != nullptr) {
862                                 *rewound = true;
863                         }
864                         break;
865
866                 case QueuedCommand::CHANGE_RATE:
867                         // Change the origin to the last played frame.
868                         start = compute_frame_start(last_pts, pts_origin, video_timebase, start, rate);
869                         pts_origin = last_pts;
870                         rate = cmd.new_rate;
871                         play_as_fast_as_possible = (rate >= 10.0);
872                         break;
873                 }
874         }
875         return false;
876 }
877
878 namespace {
879
880 }  // namespace
881
882 AVFrameWithDeleter FFmpegCapture::decode_frame(AVFormatContext *format_ctx, AVCodecContext *video_codec_ctx, AVCodecContext *audio_codec_ctx,
883         const std::string &pathname, int video_stream_index, int audio_stream_index, int subtitle_stream_index,
884         FrameAllocator::Frame *audio_frame, AudioFormat *audio_format, int64_t *audio_pts, bool *error)
885 {
886         *error = false;
887
888         // Read packets until we have a frame or there are none left.
889         bool frame_finished = false;
890         AVFrameWithDeleter audio_avframe = av_frame_alloc_unique();
891         AVFrameWithDeleter video_avframe = av_frame_alloc_unique();
892         bool eof = false;
893         *audio_pts = -1;
894         bool has_audio = false;
895         do {
896                 AVPacket pkt;
897                 unique_ptr<AVPacket, decltype(av_packet_unref)*> pkt_cleanup(
898                         &pkt, av_packet_unref);
899                 av_init_packet(&pkt);
900                 pkt.data = nullptr;
901                 pkt.size = 0;
902                 if (av_read_frame(format_ctx, &pkt) == 0) {
903                         if (pkt.stream_index == audio_stream_index && audio_callback != nullptr) {
904                                 audio_callback(&pkt, format_ctx->streams[audio_stream_index]->time_base);
905                         }
906                         if (pkt.stream_index == video_stream_index && video_callback != nullptr) {
907                                 video_callback(&pkt, format_ctx->streams[video_stream_index]->time_base);
908                         }
909                         if (pkt.stream_index == video_stream_index && global_flags.transcode_video) {
910                                 if (avcodec_send_packet(video_codec_ctx, &pkt) < 0) {
911                                         fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str());
912                                         *error = true;
913                                         return AVFrameWithDeleter(nullptr);
914                                 }
915                         } else if (pkt.stream_index == audio_stream_index && global_flags.transcode_audio) {
916                                 has_audio = true;
917                                 if (avcodec_send_packet(audio_codec_ctx, &pkt) < 0) {
918                                         fprintf(stderr, "%s: Cannot send packet to audio codec.\n", pathname.c_str());
919                                         *error = true;
920                                         return AVFrameWithDeleter(nullptr);
921                                 }
922                         } else if (pkt.stream_index == subtitle_stream_index) {
923                                 last_subtitle = string(reinterpret_cast<const char *>(pkt.data), pkt.size);
924                                 has_last_subtitle = true;
925                         }
926                 } else {
927                         eof = true;  // Or error, but ignore that for the time being.
928                 }
929
930                 // Decode audio, if any.
931                 if (has_audio) {
932                         for ( ;; ) {
933                                 int err = avcodec_receive_frame(audio_codec_ctx, audio_avframe.get());
934                                 if (err == 0) {
935                                         if (*audio_pts == -1) {
936                                                 *audio_pts = audio_avframe->pts;
937                                         }
938                                         convert_audio(audio_avframe.get(), audio_frame, audio_format);
939                                 } else if (err == AVERROR(EAGAIN)) {
940                                         break;
941                                 } else {
942                                         fprintf(stderr, "%s: Cannot receive frame from audio codec.\n", pathname.c_str());
943                                         *error = true;
944                                         return AVFrameWithDeleter(nullptr);
945                                 }
946                         }
947                 }
948
949                 // Decode video, if we have a frame.
950                 int err = avcodec_receive_frame(video_codec_ctx, video_avframe.get());
951                 if (err == 0) {
952                         if (video_avframe->format == AV_PIX_FMT_VAAPI ||
953                             video_avframe->format == AV_PIX_FMT_VDPAU) {
954                                 // Get the frame down to the CPU. (TODO: See if we can keep it
955                                 // on the GPU all the way, since it will be going up again later.
956                                 // However, this only works if the OpenGL GPU is the same one.)
957                                 AVFrameWithDeleter sw_frame = av_frame_alloc_unique();
958                                 int err = av_hwframe_transfer_data(sw_frame.get(), video_avframe.get(), 0);
959                                 if (err != 0) {
960                                         fprintf(stderr, "%s: Cannot transfer hardware video frame to software.\n", pathname.c_str());
961                                         *error = true;
962                                         return AVFrameWithDeleter(nullptr);
963                                 }
964                                 sw_frame->pts = video_avframe->pts;
965                                 sw_frame->pkt_duration = video_avframe->pkt_duration;
966                                 video_avframe = move(sw_frame);
967                         }
968                         frame_finished = true;
969                         break;
970                 } else if (err != AVERROR(EAGAIN)) {
971                         fprintf(stderr, "%s: Cannot receive frame from video codec.\n", pathname.c_str());
972                         *error = true;
973                         return AVFrameWithDeleter(nullptr);
974                 }
975         } while (!eof);
976
977         if (frame_finished)
978                 return video_avframe;
979         else
980                 return AVFrameWithDeleter(nullptr);
981 }
982
983 void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator::Frame *audio_frame, AudioFormat *audio_format)
984 {
985         // Decide on a format. If there already is one in this audio frame,
986         // we're pretty much forced to use it. If not, we try to find an exact match.
987         // If that still doesn't work, we default to 32-bit signed chunked
988         // (float would be nice, but there's really no way to signal that yet).
989         AVSampleFormat dst_format;
990         if (audio_format->bits_per_sample == 0) {
991                 switch (audio_avframe->format) {
992                 case AV_SAMPLE_FMT_S16:
993                 case AV_SAMPLE_FMT_S16P:
994                         audio_format->bits_per_sample = 16;
995                         dst_format = AV_SAMPLE_FMT_S16;
996                         break;
997                 case AV_SAMPLE_FMT_S32:
998                 case AV_SAMPLE_FMT_S32P:
999                 default:
1000                         audio_format->bits_per_sample = 32;
1001                         dst_format = AV_SAMPLE_FMT_S32;
1002                         break;
1003                 }
1004         } else if (audio_format->bits_per_sample == 16) {
1005                 dst_format = AV_SAMPLE_FMT_S16;
1006         } else if (audio_format->bits_per_sample == 32) {
1007                 dst_format = AV_SAMPLE_FMT_S32;
1008         } else {
1009                 assert(false);
1010         }
1011         audio_format->num_channels = 2;
1012
1013         int64_t channel_layout = audio_avframe->channel_layout;
1014         if (channel_layout == 0) {
1015                 channel_layout = av_get_default_channel_layout(audio_avframe->channels);
1016         }
1017
1018         if (resampler == nullptr ||
1019             audio_avframe->format != last_src_format ||
1020             dst_format != last_dst_format ||
1021             channel_layout != last_channel_layout ||
1022             audio_avframe->sample_rate != last_sample_rate) {
1023                 swr_free(&resampler);
1024                 resampler = swr_alloc_set_opts(nullptr,
1025                                                /*out_ch_layout=*/AV_CH_LAYOUT_STEREO_DOWNMIX,
1026                                                /*out_sample_fmt=*/dst_format,
1027                                                /*out_sample_rate=*/OUTPUT_FREQUENCY,
1028                                                /*in_ch_layout=*/channel_layout,
1029                                                /*in_sample_fmt=*/AVSampleFormat(audio_avframe->format),
1030                                                /*in_sample_rate=*/audio_avframe->sample_rate,
1031                                                /*log_offset=*/0,
1032                                                /*log_ctx=*/nullptr);
1033
1034                 if (resampler == nullptr) {
1035                         fprintf(stderr, "Allocating resampler failed.\n");
1036                         abort();
1037                 }
1038
1039                 if (swr_init(resampler) < 0) {
1040                         fprintf(stderr, "Could not open resample context.\n");
1041                         abort();
1042                 }
1043
1044                 last_src_format = AVSampleFormat(audio_avframe->format);
1045                 last_dst_format = dst_format;
1046                 last_channel_layout = channel_layout;
1047                 last_sample_rate = audio_avframe->sample_rate;
1048         }
1049
1050         size_t bytes_per_sample = (audio_format->bits_per_sample / 8) * 2;
1051         size_t num_samples_room = (audio_frame->size - audio_frame->len) / bytes_per_sample;
1052
1053         uint8_t *data = audio_frame->data + audio_frame->len;
1054         int out_samples = swr_convert(resampler, &data, num_samples_room,
1055                 const_cast<const uint8_t **>(audio_avframe->data), audio_avframe->nb_samples);
1056         if (out_samples < 0) {
1057                 fprintf(stderr, "Audio conversion failed.\n");
1058                 abort();
1059         }
1060
1061         audio_frame->len += out_samples * bytes_per_sample;
1062 }
1063
1064 VideoFormat FFmpegCapture::construct_video_format(const AVFrame *frame, AVRational video_timebase)
1065 {
1066         VideoFormat video_format;
1067         video_format.width = frame_width(frame);
1068         video_format.height = frame_height(frame);
1069         if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
1070                 video_format.stride = frame_width(frame) * 4;
1071         } else if (pixel_format == FFmpegCapture::PixelFormat_NV12) {
1072                 video_format.stride = frame_width(frame);
1073         } else {
1074                 assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
1075                 video_format.stride = frame_width(frame);
1076         }
1077         video_format.frame_rate_nom = video_timebase.den;
1078         video_format.frame_rate_den = frame->pkt_duration * video_timebase.num;
1079         video_format.has_signal = true;
1080         video_format.is_connected = true;
1081         return video_format;
1082 }
1083
1084 UniqueFrame FFmpegCapture::make_video_frame(const AVFrame *frame, const string &pathname, bool *error)
1085 {
1086         *error = false;
1087
1088         UniqueFrame video_frame(video_frame_allocator->alloc_frame());
1089         if (video_frame->data == nullptr) {
1090                 return video_frame;
1091         }
1092
1093         if (sws_ctx == nullptr ||
1094             sws_last_width != frame->width ||
1095             sws_last_height != frame->height ||
1096             sws_last_src_format != frame->format) {
1097                 sws_dst_format = decide_dst_format(AVPixelFormat(frame->format), pixel_format);
1098                 sws_ctx.reset(
1099                         sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format),
1100                                 frame_width(frame), frame_height(frame), sws_dst_format,
1101                                 SWS_BICUBIC, nullptr, nullptr, nullptr));
1102                 sws_last_width = frame->width;
1103                 sws_last_height = frame->height;
1104                 sws_last_src_format = frame->format;
1105         }
1106         if (sws_ctx == nullptr) {
1107                 fprintf(stderr, "%s: Could not create scaler context\n", pathname.c_str());
1108                 *error = true;
1109                 return video_frame;
1110         }
1111
1112         uint8_t *pic_data[4] = { nullptr, nullptr, nullptr, nullptr };
1113         int linesizes[4] = { 0, 0, 0, 0 };
1114         if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
1115                 pic_data[0] = video_frame->data;
1116                 linesizes[0] = frame_width(frame) * 4;
1117                 video_frame->len = (frame_width(frame) * 4) * frame_height(frame);
1118         } else if (pixel_format == PixelFormat_NV12) {
1119                 pic_data[0] = video_frame->data;
1120                 linesizes[0] = frame_width(frame);
1121
1122                 pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
1123                 linesizes[1] = frame_width(frame);
1124
1125                 video_frame->len = (frame_width(frame) * 2) * frame_height(frame);
1126
1127                 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
1128                 current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location);
1129         } else {
1130                 assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
1131                 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
1132
1133                 int chroma_width = AV_CEIL_RSHIFT(int(frame_width(frame)), desc->log2_chroma_w);
1134                 int chroma_height = AV_CEIL_RSHIFT(int(frame_height(frame)), desc->log2_chroma_h);
1135
1136                 pic_data[0] = video_frame->data;
1137                 linesizes[0] = frame_width(frame);
1138
1139                 pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
1140                 linesizes[1] = chroma_width;
1141
1142                 pic_data[2] = pic_data[1] + chroma_width * chroma_height;
1143                 linesizes[2] = chroma_width;
1144
1145                 video_frame->len = frame_width(frame) * frame_height(frame) + 2 * chroma_width * chroma_height;
1146
1147                 current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location);
1148         }
1149         sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes);
1150
1151         return video_frame;
1152 }
1153
1154 int FFmpegCapture::interrupt_cb_thunk(void *opaque)
1155 {
1156         return reinterpret_cast<FFmpegCapture *>(opaque)->interrupt_cb();
1157 }
1158
1159 int FFmpegCapture::interrupt_cb()
1160 {
1161         return should_interrupt.load();
1162 }
1163
1164 unsigned FFmpegCapture::frame_width(const AVFrame *frame) const
1165 {
1166         if (width == 0) {
1167                 return frame->width;
1168         } else {
1169                 return width;
1170         }
1171 }
1172
1173 unsigned FFmpegCapture::frame_height(const AVFrame *frame) const
1174 {
1175         if (height == 0) {
1176                 return frame->height;
1177         } else {
1178                 return height;
1179         }
1180 }
1181
1182 #ifdef HAVE_SRT
1183 int FFmpegCapture::read_srt_thunk(void *opaque, uint8_t *buf, int buf_size)
1184 {
1185         return reinterpret_cast<FFmpegCapture *>(opaque)->read_srt(buf, buf_size);
1186 }
1187
1188 int FFmpegCapture::read_srt(uint8_t *buf, int buf_size)
1189 {
1190         SRT_MSGCTRL mc = srt_msgctrl_default;
1191         return srt_recvmsg2(srt_sock, reinterpret_cast<char *>(buf), buf_size, &mc);
1192 }
1193 #endif