1 #include "ffmpeg_capture.h"
3 #include "shared/shared_defs.h"
11 #include <movit/effect.h>
12 #include <movit/image_format.h>
13 #include <movit/ycbcr.h>
27 #include <libavcodec/avcodec.h>
28 #include <libavcodec/codec.h>
29 #include <libavcodec/codec_id.h>
30 #include <libavcodec/codec_par.h>
31 #include <libavformat/avformat.h>
32 #include <libavutil/avutil.h>
33 #include <libavutil/buffer.h>
34 #include <libavutil/channel_layout.h>
35 #include <libavutil/common.h>
36 #include <libavutil/dict.h>
37 #include <libavutil/error.h>
38 #include <libavutil/frame.h>
39 #include <libavutil/hwcontext.h>
40 #include <libavutil/mathematics.h>
41 #include <libavutil/pixdesc.h>
42 #include <libavutil/pixfmt.h>
43 #include <libavutil/rational.h>
44 #include <libavutil/samplefmt.h>
45 #include <libavutil/version.h>
46 #include <libswresample/swresample.h>
47 #include <libswscale/swscale.h>
54 #include <unordered_set>
58 #include <movit/colorspace_conversion_effect.h>
60 #include "bmusb/bmusb.h"
61 #include "shared/context.h"
62 #include "shared/ffmpeg_raii.h"
63 #include "ffmpeg_util.h"
65 #include "ref_counted_frame.h"
66 #include "shared/timebase.h"
73 using namespace std::chrono;
74 using namespace bmusb;
75 using namespace movit;
76 using namespace Eigen;
78 // Avoid deprecation warnings, but we don't want to drop FFmpeg 5.1 support just yet.
79 #if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 30, 100)
80 #define pkt_duration duration
85 steady_clock::time_point compute_frame_start(int64_t frame_pts, int64_t pts_origin, const AVRational &video_timebase, const steady_clock::time_point &origin, double rate)
87 const duration<double> pts((frame_pts - pts_origin) * double(video_timebase.num) / double(video_timebase.den));
88 return origin + duration_cast<steady_clock::duration>(pts / rate);
91 bool changed_since(const std::string &pathname, const timespec &ts)
97 if (stat(pathname.c_str(), &buf) != 0) {
98 fprintf(stderr, "%s: Couldn't check for new version, leaving the old in place.\n", pathname.c_str());
101 return (buf.st_mtim.tv_sec != ts.tv_sec || buf.st_mtim.tv_nsec != ts.tv_nsec);
104 bool is_full_range(const AVPixFmtDescriptor *desc)
106 // This is horrible, but there's no better way that I know of.
107 return (strchr(desc->name, 'j') != nullptr);
110 AVPixelFormat decide_dst_format(AVPixelFormat src_format, bmusb::PixelFormat dst_format_type)
112 if (dst_format_type == bmusb::PixelFormat_8BitBGRA) {
113 return AV_PIX_FMT_BGRA;
115 if (dst_format_type == FFmpegCapture::PixelFormat_NV12) {
116 return AV_PIX_FMT_NV12;
119 assert(dst_format_type == bmusb::PixelFormat_8BitYCbCrPlanar);
121 // If this is a non-Y'CbCr format, just convert to 4:4:4 Y'CbCr
122 // and be done with it. It's too strange to spend a lot of time on.
123 // (Let's hope there's no alpha.)
124 const AVPixFmtDescriptor *src_desc = av_pix_fmt_desc_get(src_format);
125 if (src_desc == nullptr ||
126 src_desc->nb_components != 3 ||
127 (src_desc->flags & AV_PIX_FMT_FLAG_RGB)) {
128 return AV_PIX_FMT_YUV444P;
131 // The best for us would be Cb and Cr together if possible,
132 // but FFmpeg doesn't support that except in the special case of
133 // NV12, so we need to go to planar even for the case of NV12.
134 // Thus, look for the closest (but no worse) 8-bit planar Y'CbCr format
135 // that matches in color range. (This will also include the case of
136 // the source format already being acceptable.)
137 bool src_full_range = is_full_range(src_desc);
138 const char *best_format = "yuv444p";
139 unsigned best_score = numeric_limits<unsigned>::max();
140 for (const AVPixFmtDescriptor *desc = av_pix_fmt_desc_next(nullptr);
142 desc = av_pix_fmt_desc_next(desc)) {
143 // Find planar Y'CbCr formats only.
144 if (desc->nb_components != 3) continue;
145 if (desc->flags & AV_PIX_FMT_FLAG_RGB) continue;
146 if (!(desc->flags & AV_PIX_FMT_FLAG_PLANAR)) continue;
147 if (desc->comp[0].plane != 0 ||
148 desc->comp[1].plane != 1 ||
149 desc->comp[2].plane != 2) continue;
151 // 8-bit formats only.
152 if (desc->flags & AV_PIX_FMT_FLAG_BE) continue;
153 if (desc->comp[0].depth != 8) continue;
155 // Same or better chroma resolution only.
156 int chroma_w_diff = src_desc->log2_chroma_w - desc->log2_chroma_w;
157 int chroma_h_diff = src_desc->log2_chroma_h - desc->log2_chroma_h;
158 if (chroma_w_diff < 0 || chroma_h_diff < 0)
161 // Matching full/limited range only.
162 if (is_full_range(desc) != src_full_range)
165 // Pick something with as little excess chroma resolution as possible.
166 unsigned score = (1 << (chroma_w_diff)) << chroma_h_diff;
167 if (score < best_score) {
169 best_format = desc->name;
172 return av_get_pix_fmt(best_format);
175 YCbCrFormat decode_ycbcr_format(const AVPixFmtDescriptor *desc, const AVFrame *frame, bool is_mjpeg, AVColorSpace *last_colorspace, AVChromaLocation *last_chroma_location)
178 AVColorSpace colorspace = frame->colorspace;
179 switch (colorspace) {
180 case AVCOL_SPC_BT709:
181 format.luma_coefficients = YCBCR_REC_709;
183 case AVCOL_SPC_BT470BG:
184 case AVCOL_SPC_SMPTE170M:
185 case AVCOL_SPC_SMPTE240M:
186 format.luma_coefficients = YCBCR_REC_601;
188 case AVCOL_SPC_BT2020_NCL:
189 format.luma_coefficients = YCBCR_REC_2020;
191 case AVCOL_SPC_UNSPECIFIED:
192 format.luma_coefficients = (frame->height >= 720 ? YCBCR_REC_709 : YCBCR_REC_601);
195 if (colorspace != *last_colorspace) {
196 fprintf(stderr, "Unknown Y'CbCr coefficient enum %d from FFmpeg; choosing Rec. 709.\n",
199 format.luma_coefficients = YCBCR_REC_709;
202 *last_colorspace = colorspace;
204 format.full_range = is_full_range(desc);
205 format.num_levels = 1 << desc->comp[0].depth;
206 format.chroma_subsampling_x = 1 << desc->log2_chroma_w;
207 format.chroma_subsampling_y = 1 << desc->log2_chroma_h;
209 switch (frame->chroma_location) {
210 case AVCHROMA_LOC_LEFT:
211 format.cb_x_position = 0.0;
212 format.cb_y_position = 0.5;
214 case AVCHROMA_LOC_CENTER:
215 format.cb_x_position = 0.5;
216 format.cb_y_position = 0.5;
218 case AVCHROMA_LOC_TOPLEFT:
219 format.cb_x_position = 0.0;
220 format.cb_y_position = 0.0;
222 case AVCHROMA_LOC_TOP:
223 format.cb_x_position = 0.5;
224 format.cb_y_position = 0.0;
226 case AVCHROMA_LOC_BOTTOMLEFT:
227 format.cb_x_position = 0.0;
228 format.cb_y_position = 1.0;
230 case AVCHROMA_LOC_BOTTOM:
231 format.cb_x_position = 0.5;
232 format.cb_y_position = 1.0;
235 if (frame->chroma_location != *last_chroma_location) {
236 fprintf(stderr, "Unknown chroma location coefficient enum %d from FFmpeg; choosing center.\n",
237 frame->chroma_location);
239 format.cb_x_position = 0.5;
240 format.cb_y_position = 0.5;
243 *last_chroma_location = frame->chroma_location;
245 if (is_mjpeg && !format.full_range) {
246 // Limited-range MJPEG is only detected by FFmpeg whenever a special
247 // JPEG comment is set, which means that in practice, the stream is
248 // almost certainly generated by Futatabi. Override FFmpeg's forced
249 // MJPEG defaults (it disregards the values set in the mux) with what
251 format.luma_coefficients = YCBCR_REC_709;
252 format.cb_x_position = 0.0;
253 format.cb_y_position = 0.5;
256 format.cr_x_position = format.cb_x_position;
257 format.cr_y_position = format.cb_y_position;
261 RGBTriplet get_neutral_color(AVDictionary *metadata)
263 if (metadata == nullptr) {
264 return RGBTriplet(1.0f, 1.0f, 1.0f);
266 AVDictionaryEntry *entry = av_dict_get(metadata, "WhitePoint", nullptr, 0);
267 if (entry == nullptr) {
268 return RGBTriplet(1.0f, 1.0f, 1.0f);
271 unsigned x_nom, x_den, y_nom, y_den;
272 if (sscanf(entry->value, " %u:%u , %u:%u", &x_nom, &x_den, &y_nom, &y_den) != 4) {
273 fprintf(stderr, "WARNING: Unable to parse white point '%s', using default white point\n", entry->value);
274 return RGBTriplet(1.0f, 1.0f, 1.0f);
277 double x = double(x_nom) / x_den;
278 double y = double(y_nom) / y_den;
279 double z = 1.0 - x - y;
281 Matrix3d rgb_to_xyz_matrix = movit::ColorspaceConversionEffect::get_xyz_matrix(COLORSPACE_sRGB);
282 Vector3d rgb = rgb_to_xyz_matrix.inverse() * Vector3d(x, y, z);
284 return RGBTriplet(rgb[0], rgb[1], rgb[2]);
289 FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height, QSurface *surface)
290 : filename(filename), width(width), height(height), video_timebase{1, 1}, surface(surface)
292 description = "Video: " + filename;
294 last_frame = steady_clock::now();
296 avformat_network_init(); // In case someone wants this.
300 FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id, QSurface *surface)
301 : srt_sock(srt_sock),
302 width(0), // Don't resize; SRT streams typically have stable resolution, and should behave much like regular cards in general.
304 pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar),
305 video_timebase{1, 1},
308 if (stream_id.empty()) {
309 description = "SRT stream";
311 description = stream_id;
313 play_as_fast_as_possible = true;
315 last_frame = steady_clock::now();
319 FFmpegCapture::~FFmpegCapture()
321 if (has_dequeue_callbacks) {
322 dequeue_cleanup_callback();
324 swr_free(&resampler);
326 if (srt_sock != -1) {
333 void FFmpegCapture::configure_card()
335 if (video_frame_allocator == nullptr) {
336 owned_video_frame_allocator.reset(new MallocFrameAllocator(FRAME_SIZE, NUM_QUEUED_VIDEO_FRAMES));
337 set_video_frame_allocator(owned_video_frame_allocator.get());
339 if (audio_frame_allocator == nullptr) {
340 // Audio can come out in pretty large chunks, so increase from the default 1 MB.
341 owned_audio_frame_allocator.reset(new MallocFrameAllocator(1 << 20, NUM_QUEUED_AUDIO_FRAMES));
342 set_audio_frame_allocator(owned_audio_frame_allocator.get());
346 void FFmpegCapture::start_bm_capture()
352 producer_thread_should_quit.unquit();
353 producer_thread = thread(&FFmpegCapture::producer_thread_func, this);
356 void FFmpegCapture::stop_dequeue_thread()
362 producer_thread_should_quit.quit();
363 producer_thread.join();
366 std::map<uint32_t, VideoMode> FFmpegCapture::get_available_video_modes() const
368 // Note: This will never really be shown in the UI.
372 snprintf(buf, sizeof(buf), "%ux%u", sws_last_width, sws_last_height);
375 mode.autodetect = false;
376 mode.width = sws_last_width;
377 mode.height = sws_last_height;
378 mode.frame_rate_num = 60;
379 mode.frame_rate_den = 1;
380 mode.interlaced = false;
382 return {{ 0, mode }};
385 void FFmpegCapture::producer_thread_func()
387 char thread_name[16];
388 snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
389 pthread_setname_np(pthread_self(), thread_name);
391 // We need a context in case create_frame() needs to reallocate something.
392 // (If none is given, we are probably in Kaeru, which uses MallocFrameAllocator
393 // anyway, which doesn't reallocate currently and definitely doesn't need
394 // an active OpenGL context to do so.)
395 QOpenGLContext *context = nullptr;
396 if (surface != nullptr) {
397 context = create_context(this->surface);
398 eglBindAPI(EGL_OPENGL_API);
399 if (!make_current(context, this->surface)) {
400 printf("display=%p surface=%p context=%p curr=%p err=%d\n", eglGetCurrentDisplay(), this->surface, context, eglGetCurrentContext(),
406 while (!producer_thread_should_quit.should_quit()) {
407 string filename_copy;
409 lock_guard<mutex> lock(filename_mu);
410 filename_copy = filename;
414 if (srt_sock == -1) {
415 pathname = search_for_file(filename_copy);
417 pathname = description;
419 if (pathname.empty()) {
420 send_disconnected_frame();
424 producer_thread_should_quit.sleep_for(seconds(1));
425 fprintf(stderr, "%s not found, sleeping one second and trying again...\n", filename_copy.c_str());
428 should_interrupt = false;
429 if (!play_video(pathname)) {
431 send_disconnected_frame();
435 fprintf(stderr, "Error when playing %s, sleeping one second and trying again...\n", pathname.c_str());
436 producer_thread_should_quit.sleep_for(seconds(1));
441 send_disconnected_frame();
445 // Probably just EOF, will exit the loop above on next test.
448 if (has_dequeue_callbacks) {
449 dequeue_cleanup_callback();
450 has_dequeue_callbacks = false;
453 delete_context(context);
456 void FFmpegCapture::send_disconnected_frame()
458 // Send an empty frame to signal that we have no signal anymore.
459 FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame();
460 size_t frame_width = width == 0 ? global_flags.width : width;
461 size_t frame_height = height == 0 ? global_flags.height : height;
462 if (video_frame.data) {
463 VideoFormat video_format;
464 video_format.width = frame_width;
465 video_format.height = frame_height;
466 video_format.frame_rate_nom = 60;
467 video_format.frame_rate_den = 1;
468 video_format.is_connected = false;
469 if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
470 video_format.stride = frame_width * 4;
471 video_frame.len = frame_width * frame_height * 4;
472 memset(video_frame.data, 0, video_frame.len);
474 video_format.stride = frame_width;
475 current_frame_ycbcr_format.luma_coefficients = YCBCR_REC_709;
476 current_frame_ycbcr_format.full_range = true;
477 current_frame_ycbcr_format.num_levels = 256;
478 current_frame_ycbcr_format.chroma_subsampling_x = 2;
479 current_frame_ycbcr_format.chroma_subsampling_y = 2;
480 current_frame_ycbcr_format.cb_x_position = 0.0f;
481 current_frame_ycbcr_format.cb_y_position = 0.0f;
482 current_frame_ycbcr_format.cr_x_position = 0.0f;
483 current_frame_ycbcr_format.cr_y_position = 0.0f;
484 video_frame.len = frame_width * frame_height * 2;
485 memset(video_frame.data, 0, frame_width * frame_height);
486 memset(video_frame.data + frame_width * frame_height, 128, frame_width * frame_height); // Valid for both NV12 and planar.
489 if (frame_callback != nullptr) {
490 frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
491 video_frame, /*video_offset=*/0, video_format,
492 FrameAllocator::Frame(), /*audio_offset=*/0, AudioFormat());
494 last_frame_was_connected = false;
499 if (card_disconnected_callback != nullptr) {
500 card_disconnected_callback();
505 template<AVHWDeviceType type>
506 AVPixelFormat get_hw_format(AVCodecContext *ctx, const AVPixelFormat *fmt)
508 bool found_config_of_right_type = false;
509 for (int i = 0;; ++i) { // Termination condition inside loop.
510 const AVCodecHWConfig *config = avcodec_get_hw_config(ctx->codec, i);
511 if (config == nullptr) { // End of list.
514 if (!(config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) ||
515 config->device_type != type) {
516 // Not interesting for us.
520 // We have a config of the right type, but does it actually support
521 // the pixel format we want? (Seemingly, FFmpeg's way of signaling errors
522 // is to just replace the pixel format with a software-decoded one,
524 found_config_of_right_type = true;
525 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
526 if (config->pix_fmt == *fmt_ptr) {
527 fprintf(stderr, "Initialized '%s' hardware decoding for codec '%s'.\n",
528 av_hwdevice_get_type_name(type), ctx->codec->name);
529 if (ctx->profile == FF_PROFILE_H264_BASELINE) {
530 fprintf(stderr, "WARNING: Stream claims to be H.264 Baseline, which is generally poorly supported in hardware decoders.\n");
531 fprintf(stderr, " Consider encoding it as Constrained Baseline, Main or High instead.\n");
532 fprintf(stderr, " Decoding might fail and fall back to software.\n");
534 return config->pix_fmt;
537 fprintf(stderr, "Decoder '%s' supports only these pixel formats:", ctx->codec->name);
538 unordered_set<AVPixelFormat> seen;
539 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
540 if (!seen.count(*fmt_ptr)) {
541 fprintf(stderr, " %s", av_get_pix_fmt_name(*fmt_ptr));
542 seen.insert(*fmt_ptr);
545 fprintf(stderr, " (wanted %s for hardware acceleration)\n", av_get_pix_fmt_name(config->pix_fmt));
549 if (!found_config_of_right_type) {
550 fprintf(stderr, "Decoder '%s' does not support device type '%s'.\n", ctx->codec->name, av_hwdevice_get_type_name(type));
553 // We found no VA-API formats, so take the first software format.
554 for (const AVPixelFormat *fmt_ptr = fmt; *fmt_ptr != -1; ++fmt_ptr) {
555 if ((av_pix_fmt_desc_get(*fmt_ptr)->flags & AV_PIX_FMT_FLAG_HWACCEL) == 0) {
556 fprintf(stderr, "Falling back to software format %s.\n", av_get_pix_fmt_name(*fmt_ptr));
561 // Fallback: Just return anything. (Should never really happen.)
565 bool FFmpegCapture::play_video(const string &pathname)
567 // Note: Call before open, not after; otherwise, there's a race.
568 // (There is now, too, but it tips the correct way. We could use fstat()
569 // if we had the file descriptor.)
570 timespec last_modified;
572 if (stat(pathname.c_str(), &buf) != 0) {
573 // Probably some sort of protocol, so can't stat.
574 last_modified.tv_sec = -1;
576 last_modified = buf.st_mtim;
578 last_colorspace = static_cast<AVColorSpace>(-1);
579 last_chroma_location = static_cast<AVChromaLocation>(-1);
581 AVFormatContextWithCloser format_ctx;
582 if (srt_sock == -1) {
583 // Regular file (or stream).
584 frame_timeout_started = steady_clock::now();
585 frame_timeout_valid = true;
586 format_ctx = avformat_open_input_unique(pathname.c_str(), /*fmt=*/nullptr,
588 AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
589 frame_timeout_valid = false;
592 // SRT socket, already opened.
593 const AVInputFormat *mpegts_fmt = av_find_input_format("mpegts");
594 format_ctx = avformat_open_input_unique(&FFmpegCapture::read_srt_thunk, this,
595 mpegts_fmt, /*options=*/nullptr,
596 AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
601 if (format_ctx == nullptr) {
602 fprintf(stderr, "%s: Error opening file\n", pathname.c_str());
606 if (avformat_find_stream_info(format_ctx.get(), nullptr) < 0) {
607 fprintf(stderr, "%s: Error finding stream info\n", pathname.c_str());
611 int video_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_VIDEO);
612 if (video_stream_index == -1) {
613 fprintf(stderr, "%s: No video stream found\n", pathname.c_str());
617 int audio_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_AUDIO);
618 int subtitle_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_SUBTITLE);
619 has_last_subtitle = false;
621 // Open video decoder.
622 const AVCodecParameters *video_codecpar = format_ctx->streams[video_stream_index]->codecpar;
623 const AVCodec *video_codec = avcodec_find_decoder(video_codecpar->codec_id);
625 video_timebase = format_ctx->streams[video_stream_index]->time_base;
626 AVCodecContextWithDeleter video_codec_ctx = avcodec_alloc_context3_unique(nullptr);
627 if (avcodec_parameters_to_context(video_codec_ctx.get(), video_codecpar) < 0) {
628 fprintf(stderr, "%s: Cannot fill video codec parameters\n", pathname.c_str());
631 if (video_codec == nullptr) {
632 fprintf(stderr, "%s: Cannot find video decoder\n", pathname.c_str());
636 // Seemingly, it's not too easy to make something that just initializes
637 // “whatever goes”, so we don't get CUDA or VULKAN or whatever here
638 // without enumerating through several different types.
639 // VA-API and VDPAU will do for now. We prioritize VDPAU for the
640 // simple reason that there's a VA-API-via-VDPAU emulation for NVidia
641 // cards that seems to work, but just hangs when trying to transfer the frame.
643 // Note that we don't actually check codec support beforehand,
644 // so if you have a low-end VDPAU device but a high-end VA-API device,
645 // you lose out on the extra codec support from the latter.
646 AVBufferRef *hw_device_ctx = nullptr;
647 if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VDPAU, nullptr, nullptr, 0) >= 0) {
648 video_codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
649 video_codec_ctx->get_format = get_hw_format<AV_HWDEVICE_TYPE_VDPAU>;
650 } else if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, nullptr, nullptr, 0) >= 0) {
651 video_codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
652 video_codec_ctx->get_format = get_hw_format<AV_HWDEVICE_TYPE_VAAPI>;
654 fprintf(stderr, "Failed to initialize VA-API or VDPAU for FFmpeg acceleration. Decoding video in software.\n");
657 if (avcodec_open2(video_codec_ctx.get(), video_codec, nullptr) < 0) {
658 fprintf(stderr, "%s: Cannot open video decoder\n", pathname.c_str());
661 unique_ptr<AVCodecContext, decltype(avcodec_close)*> video_codec_ctx_cleanup(
662 video_codec_ctx.get(), avcodec_close);
664 // Used in decode_ycbcr_format().
665 is_mjpeg = video_codecpar->codec_id == AV_CODEC_ID_MJPEG;
667 // Open audio decoder, if we have audio.
668 AVCodecContextWithDeleter audio_codec_ctx;
669 if (audio_stream_index != -1) {
670 audio_codec_ctx = avcodec_alloc_context3_unique(nullptr);
671 const AVCodecParameters *audio_codecpar = format_ctx->streams[audio_stream_index]->codecpar;
672 audio_timebase = format_ctx->streams[audio_stream_index]->time_base;
673 if (avcodec_parameters_to_context(audio_codec_ctx.get(), audio_codecpar) < 0) {
674 fprintf(stderr, "%s: Cannot fill audio codec parameters\n", pathname.c_str());
677 const AVCodec *audio_codec = avcodec_find_decoder(audio_codecpar->codec_id);
678 if (audio_codec == nullptr) {
679 fprintf(stderr, "%s: Cannot find audio decoder\n", pathname.c_str());
682 if (avcodec_open2(audio_codec_ctx.get(), audio_codec, nullptr) < 0) {
683 fprintf(stderr, "%s: Cannot open audio decoder\n", pathname.c_str());
687 unique_ptr<AVCodecContext, decltype(avcodec_close)*> audio_codec_ctx_cleanup(
688 audio_codec_ctx.get(), avcodec_close);
693 bool first_frame = true;
694 int consecutive_errors = 0;
695 while (!producer_thread_should_quit.should_quit()) {
696 if (process_queued_commands(format_ctx.get(), pathname, last_modified, /*rewound=*/nullptr)) {
699 if (should_interrupt.load()) {
700 // Check as a failsafe, so that we don't need to rely on avio if we don't have to.
703 UniqueFrame audio_frame = audio_frame_allocator->alloc_frame();
704 AudioFormat audio_format;
708 frame_timeout_started = steady_clock::now();
709 frame_timeout_valid = true;
710 AVFrameWithDeleter frame = decode_frame(format_ctx.get(), video_codec_ctx.get(), audio_codec_ctx.get(),
711 pathname, video_stream_index, audio_stream_index, subtitle_stream_index, audio_frame.get(), &audio_format, &audio_pts, &error);
712 frame_timeout_valid = false;
713 if (should_interrupt.load()) {
714 // Abort no matter whether we got a frame or not.
718 if (++consecutive_errors >= 100) {
719 fprintf(stderr, "More than 100 consecutive error video frames, aborting playback.\n");
725 consecutive_errors = 0;
727 if (frame == nullptr) {
728 // EOF. Loop back to the start if we can.
729 if (format_ctx->pb != nullptr && format_ctx->pb->seekable == 0) {
730 // Not seekable (but seemingly, sometimes av_seek_frame() would return 0 anyway,
734 if (av_seek_frame(format_ctx.get(), /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
735 fprintf(stderr, "%s: Rewind failed, not looping.\n", pathname.c_str());
738 if (video_codec_ctx != nullptr) {
739 avcodec_flush_buffers(video_codec_ctx.get());
741 if (audio_codec_ctx != nullptr) {
742 avcodec_flush_buffers(audio_codec_ctx.get());
744 // If the file has changed since last time, return to get it reloaded.
745 // Note that depending on how you move the file into place, you might
746 // end up corrupting the one you're already playing, so this path
747 // might not trigger.
748 if (changed_since(pathname, last_modified)) {
755 VideoFormat video_format = construct_video_format(frame.get(), video_timebase);
756 if (video_format.frame_rate_nom == 0 || video_format.frame_rate_den == 0) {
757 // Invalid frame rate; try constructing it from the previous frame length.
758 // (This is especially important if we are the master card, for SRT,
759 // since it affects audio. Not all senders have good timebases
760 // (e.g., Larix rounds first to timebase 1000 and then multiplies by
761 // 90 from there, it seems), but it's much better to have an oscillating
762 // value than just locking at 60.
763 if (last_pts != 0 && frame->pts > last_pts) {
764 int64_t pts_diff = frame->pts - last_pts;
765 video_format.frame_rate_nom = video_timebase.den;
766 video_format.frame_rate_den = video_timebase.num * pts_diff;
768 video_format.frame_rate_nom = 60;
769 video_format.frame_rate_den = 1;
772 UniqueFrame video_frame = make_video_frame(frame.get(), pathname, &error);
778 if (last_pts == 0 && pts_origin == 0) {
779 pts_origin = frame->pts;
781 steady_clock::time_point now = steady_clock::now();
782 if (play_as_fast_as_possible) {
783 video_frame->received_timestamp = now;
784 audio_frame->received_timestamp = now;
785 next_frame_start = now;
787 next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
788 if (first_frame && last_frame_was_connected) {
789 // If reconnect took more than one second, this is probably a live feed,
790 // and we should reset the resampler. (Or the rate is really, really low,
791 // in which case a reset on the first frame is fine anyway.)
792 if (duration<double>(next_frame_start - last_frame).count() >= 1.0) {
793 last_frame_was_connected = false;
796 video_frame->received_timestamp = next_frame_start;
798 // The easiest way to get all the rate conversions etc. right is to move the
799 // audio PTS into the video PTS timebase and go from there. (We'll get some
800 // rounding issues, but they should not be a big problem.)
801 int64_t audio_pts_as_video_pts = av_rescale_q(audio_pts, audio_timebase, video_timebase);
802 audio_frame->received_timestamp = compute_frame_start(audio_pts_as_video_pts, pts_origin, video_timebase, start, rate);
804 if (audio_frame->len != 0) {
805 // The received timestamps in Nageru are measured after we've just received the frame.
806 // However, pts (especially audio pts) is at the _beginning_ of the frame.
807 // If we have locked audio, the distinction doesn't really matter, as pts is
808 // on a relative scale and a fixed offset is fine. But if we don't, we will have
809 // a different number of samples each time, which will cause huge audio jitter
810 // and throw off the resampler.
812 // In a sense, we should have compensated by adding the frame and audio lengths
813 // to video_frame->received_timestamp and audio_frame->received_timestamp respectively,
814 // but that would mean extra waiting in sleep_until(). All we need is that they
815 // are correct relative to each other, though (and to the other frames we send),
816 // so just align the end of the audio frame, and we're fine.
817 size_t num_samples = (audio_frame->len * 8) / audio_format.bits_per_sample / audio_format.num_channels;
818 double offset = double(num_samples) / OUTPUT_FREQUENCY -
819 double(video_format.frame_rate_den) / video_format.frame_rate_nom;
820 audio_frame->received_timestamp += duration_cast<steady_clock::duration>(duration<double>(offset));
823 if (duration<double>(now - next_frame_start).count() >= 0.1) {
824 // If we don't have enough CPU to keep up, or if we have a live stream
825 // where the initial origin was somehow wrong, we could be behind indefinitely.
826 // In particular, this will give the audio resampler problems as it tries
827 // to speed up to reduce the delay, hitting the low end of the buffer every time.
828 fprintf(stderr, "%s: Playback %.0f ms behind, resetting time scale\n",
830 1e3 * duration<double>(now - next_frame_start).count());
831 pts_origin = frame->pts;
832 start = next_frame_start = now;
833 timecode += TYPICAL_FPS * 2 + 1;
836 bool finished_wakeup;
837 if (play_as_fast_as_possible) {
838 finished_wakeup = !producer_thread_should_quit.should_quit();
840 finished_wakeup = producer_thread_should_quit.sleep_until(next_frame_start);
842 if (finished_wakeup) {
843 if (audio_frame->len > 0) {
844 assert(audio_pts != -1);
846 if (!last_frame_was_connected) {
847 // We're recovering from an error (or really slow load, see above).
848 // Make sure to get the audio resampler reset. (This is a hack;
849 // ideally, the frame callback should just accept a way to signal
850 // audio discontinuity.)
851 timecode += TYPICAL_FPS * 2 + 1;
853 last_neutral_color = get_neutral_color(frame->metadata);
854 if (frame_callback != nullptr) {
855 frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
856 video_frame.get_and_release(), 0, video_format,
857 audio_frame.get_and_release(), 0, audio_format);
860 last_frame = steady_clock::now();
861 last_frame_was_connected = true;
864 if (producer_thread_should_quit.should_quit()) break;
866 bool rewound = false;
867 if (process_queued_commands(format_ctx.get(), pathname, last_modified, &rewound)) {
870 // If we just rewound, drop this frame on the floor and be done.
874 // OK, we didn't, so probably a rate change. Recalculate next_frame_start,
875 // but if it's now in the past, we'll reset the origin, so that we don't
876 // generate a huge backlog of frames that we need to run through quickly.
877 next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
878 steady_clock::time_point now = steady_clock::now();
879 if (next_frame_start < now) {
880 pts_origin = frame->pts;
881 start = next_frame_start = now;
885 last_pts = frame->pts;
890 void FFmpegCapture::internal_rewind()
892 pts_origin = last_pts = 0;
893 start = next_frame_start = steady_clock::now();
896 bool FFmpegCapture::process_queued_commands(AVFormatContext *format_ctx, const std::string &pathname, timespec last_modified, bool *rewound)
898 // Process any queued commands from other threads.
899 vector<QueuedCommand> commands;
901 lock_guard<mutex> lock(queue_mu);
902 swap(commands, command_queue);
904 for (const QueuedCommand &cmd : commands) {
905 switch (cmd.command) {
906 case QueuedCommand::REWIND:
907 if (av_seek_frame(format_ctx, /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
908 fprintf(stderr, "%s: Rewind failed, stopping play.\n", pathname.c_str());
910 // If the file has changed since last time, return to get it reloaded.
911 // Note that depending on how you move the file into place, you might
912 // end up corrupting the one you're already playing, so this path
913 // might not trigger.
914 if (changed_since(pathname, last_modified)) {
918 if (rewound != nullptr) {
923 case QueuedCommand::CHANGE_RATE:
924 // Change the origin to the last played frame.
925 start = compute_frame_start(last_pts, pts_origin, video_timebase, start, rate);
926 pts_origin = last_pts;
928 play_as_fast_as_possible = (rate >= 10.0);
939 AVFrameWithDeleter FFmpegCapture::decode_frame(AVFormatContext *format_ctx, AVCodecContext *video_codec_ctx, AVCodecContext *audio_codec_ctx,
940 const std::string &pathname, int video_stream_index, int audio_stream_index, int subtitle_stream_index,
941 FrameAllocator::Frame *audio_frame, AudioFormat *audio_format, int64_t *audio_pts, bool *error)
945 // Read packets until we have a frame or there are none left.
946 bool frame_finished = false;
947 AVFrameWithDeleter audio_avframe = av_frame_alloc_unique();
948 AVFrameWithDeleter video_avframe = av_frame_alloc_unique();
951 bool has_audio = false;
953 AVPacketWithDeleter pkt = av_packet_alloc_unique();
956 if (av_read_frame(format_ctx, pkt.get()) == 0) {
957 if (pkt->stream_index == audio_stream_index && audio_callback != nullptr) {
958 audio_callback(pkt.get(), format_ctx->streams[audio_stream_index]->time_base);
960 if (pkt->stream_index == video_stream_index && video_callback != nullptr) {
961 video_callback(pkt.get(), format_ctx->streams[video_stream_index]->time_base);
963 if (pkt->stream_index == video_stream_index && global_flags.transcode_video) {
964 if (avcodec_send_packet(video_codec_ctx, pkt.get()) < 0) {
965 fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str());
967 return AVFrameWithDeleter(nullptr);
969 } else if (pkt->stream_index == audio_stream_index && global_flags.transcode_audio) {
971 if (avcodec_send_packet(audio_codec_ctx, pkt.get()) < 0) {
972 fprintf(stderr, "%s: Cannot send packet to audio codec.\n", pathname.c_str());
974 return AVFrameWithDeleter(nullptr);
976 } else if (pkt->stream_index == subtitle_stream_index) {
977 last_subtitle = string(reinterpret_cast<const char *>(pkt->data), pkt->size);
978 has_last_subtitle = true;
981 eof = true; // Or error, but ignore that for the time being.
984 // Decode audio, if any.
987 int err = avcodec_receive_frame(audio_codec_ctx, audio_avframe.get());
989 if (*audio_pts == -1) {
990 *audio_pts = audio_avframe->pts;
992 convert_audio(audio_avframe.get(), audio_frame, audio_format);
993 } else if (err == AVERROR(EAGAIN)) {
996 fprintf(stderr, "%s: Cannot receive frame from audio codec.\n", pathname.c_str());
998 return AVFrameWithDeleter(nullptr);
1003 // Decode video, if we have a frame.
1004 int err = avcodec_receive_frame(video_codec_ctx, video_avframe.get());
1006 if (video_avframe->format == AV_PIX_FMT_VAAPI ||
1007 video_avframe->format == AV_PIX_FMT_VDPAU) {
1008 // Get the frame down to the CPU. (TODO: See if we can keep it
1009 // on the GPU all the way, since it will be going up again later.
1010 // However, this only works if the OpenGL GPU is the same one.)
1011 AVFrameWithDeleter sw_frame = av_frame_alloc_unique();
1012 int err = av_hwframe_transfer_data(sw_frame.get(), video_avframe.get(), 0);
1014 fprintf(stderr, "%s: Cannot transfer hardware video frame to software.\n", pathname.c_str());
1016 return AVFrameWithDeleter(nullptr);
1018 sw_frame->pts = video_avframe->pts;
1019 sw_frame->pkt_duration = video_avframe->pkt_duration;
1020 video_avframe = move(sw_frame);
1022 frame_finished = true;
1024 } else if (err != AVERROR(EAGAIN)) {
1025 fprintf(stderr, "%s: Cannot receive frame from video codec.\n", pathname.c_str());
1027 return AVFrameWithDeleter(nullptr);
1032 return video_avframe;
1034 return AVFrameWithDeleter(nullptr);
1037 void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator::Frame *audio_frame, AudioFormat *audio_format)
1039 // Decide on a format. If there already is one in this audio frame,
1040 // we're pretty much forced to use it. If not, we try to find an exact match.
1041 // If that still doesn't work, we default to 32-bit signed chunked
1042 // (float would be nice, but there's really no way to signal that yet).
1043 AVSampleFormat dst_format;
1044 if (audio_format->bits_per_sample == 0) {
1045 switch (audio_avframe->format) {
1046 case AV_SAMPLE_FMT_S16:
1047 case AV_SAMPLE_FMT_S16P:
1048 audio_format->bits_per_sample = 16;
1049 dst_format = AV_SAMPLE_FMT_S16;
1051 case AV_SAMPLE_FMT_S32:
1052 case AV_SAMPLE_FMT_S32P:
1054 audio_format->bits_per_sample = 32;
1055 dst_format = AV_SAMPLE_FMT_S32;
1058 } else if (audio_format->bits_per_sample == 16) {
1059 dst_format = AV_SAMPLE_FMT_S16;
1060 } else if (audio_format->bits_per_sample == 32) {
1061 dst_format = AV_SAMPLE_FMT_S32;
1065 audio_format->num_channels = 2;
1067 AVChannelLayout channel_layout = audio_avframe->ch_layout;
1068 if (!av_channel_layout_check(&channel_layout) ||
1069 channel_layout.order == AV_CHANNEL_ORDER_UNSPEC) {
1070 av_channel_layout_default(&channel_layout, audio_avframe->ch_layout.nb_channels);
1073 if (resampler == nullptr ||
1074 audio_avframe->format != last_src_format ||
1075 dst_format != last_dst_format ||
1076 av_channel_layout_compare(&channel_layout, &last_channel_layout) != 0||
1077 audio_avframe->sample_rate != last_sample_rate) {
1078 // TODO: When we get C++20, use AV_CHANNEL_LAYOUT_STEREO_DOWNMIX.
1079 AVChannelLayout stereo_downmix;
1080 stereo_downmix.order = AV_CHANNEL_ORDER_NATIVE;
1081 stereo_downmix.nb_channels = 2;
1082 stereo_downmix.u.mask = AV_CH_LAYOUT_STEREO_DOWNMIX;
1084 swr_free(&resampler);
1085 resampler = nullptr;
1086 int err = swr_alloc_set_opts2(&resampler,
1087 /*out_ch_layout=*/&stereo_downmix,
1088 /*out_sample_fmt=*/dst_format,
1089 /*out_sample_rate=*/OUTPUT_FREQUENCY,
1090 /*in_ch_layout=*/&channel_layout,
1091 /*in_sample_fmt=*/AVSampleFormat(audio_avframe->format),
1092 /*in_sample_rate=*/audio_avframe->sample_rate,
1094 /*log_ctx=*/nullptr);
1096 if (err != 0 || resampler == nullptr) {
1097 fprintf(stderr, "Allocating resampler failed.\n");
1101 if (swr_init(resampler) < 0) {
1102 fprintf(stderr, "Could not open resample context.\n");
1106 last_src_format = AVSampleFormat(audio_avframe->format);
1107 last_dst_format = dst_format;
1108 last_channel_layout = channel_layout;
1109 last_sample_rate = audio_avframe->sample_rate;
1112 size_t bytes_per_sample = (audio_format->bits_per_sample / 8) * 2;
1113 size_t num_samples_room = (audio_frame->size - audio_frame->len) / bytes_per_sample;
1115 uint8_t *data = audio_frame->data + audio_frame->len;
1116 int out_samples = swr_convert(resampler, &data, num_samples_room,
1117 const_cast<const uint8_t **>(audio_avframe->data), audio_avframe->nb_samples);
1118 if (out_samples < 0) {
1119 fprintf(stderr, "Audio conversion failed.\n");
1123 audio_frame->len += out_samples * bytes_per_sample;
1126 VideoFormat FFmpegCapture::construct_video_format(const AVFrame *frame, AVRational video_timebase)
1128 VideoFormat video_format;
1129 video_format.width = frame_width(frame);
1130 video_format.height = frame_height(frame);
1131 if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
1132 video_format.stride = frame_width(frame) * 4;
1133 } else if (pixel_format == FFmpegCapture::PixelFormat_NV12) {
1134 video_format.stride = frame_width(frame);
1136 assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
1137 video_format.stride = frame_width(frame);
1139 video_format.frame_rate_nom = video_timebase.den;
1140 video_format.frame_rate_den = frame->pkt_duration * video_timebase.num;
1141 video_format.has_signal = true;
1142 video_format.is_connected = true;
1143 return video_format;
1146 UniqueFrame FFmpegCapture::make_video_frame(const AVFrame *frame, const string &pathname, bool *error)
1150 UniqueFrame video_frame(video_frame_allocator->create_frame(frame->width, frame->height, frame->width));
1151 if (video_frame->data == nullptr) {
1155 if (sws_ctx == nullptr ||
1156 sws_last_width != frame->width ||
1157 sws_last_height != frame->height ||
1158 sws_last_src_format != frame->format) {
1159 sws_dst_format = decide_dst_format(AVPixelFormat(frame->format), pixel_format);
1161 sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format),
1162 frame_width(frame), frame_height(frame), sws_dst_format,
1163 SWS_BICUBIC, nullptr, nullptr, nullptr));
1164 sws_last_width = frame->width;
1165 sws_last_height = frame->height;
1166 sws_last_src_format = frame->format;
1168 if (sws_ctx == nullptr) {
1169 fprintf(stderr, "%s: Could not create scaler context\n", pathname.c_str());
1174 uint8_t *pic_data[4] = { nullptr, nullptr, nullptr, nullptr };
1175 int linesizes[4] = { 0, 0, 0, 0 };
1176 if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
1177 pic_data[0] = video_frame->data;
1178 linesizes[0] = frame_width(frame) * 4;
1179 video_frame->len = (frame_width(frame) * 4) * frame_height(frame);
1180 } else if (pixel_format == PixelFormat_NV12) {
1181 pic_data[0] = video_frame->data;
1182 linesizes[0] = frame_width(frame);
1184 pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
1185 linesizes[1] = frame_width(frame);
1187 video_frame->len = (frame_width(frame) * 2) * frame_height(frame);
1189 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
1190 current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location);
1192 assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
1193 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
1195 int chroma_width = AV_CEIL_RSHIFT(int(frame_width(frame)), desc->log2_chroma_w);
1196 int chroma_height = AV_CEIL_RSHIFT(int(frame_height(frame)), desc->log2_chroma_h);
1198 pic_data[0] = video_frame->data;
1199 linesizes[0] = frame_width(frame);
1201 pic_data[1] = pic_data[0] + frame_width(frame) * frame_height(frame);
1202 linesizes[1] = chroma_width;
1204 pic_data[2] = pic_data[1] + chroma_width * chroma_height;
1205 linesizes[2] = chroma_width;
1207 video_frame->len = frame_width(frame) * frame_height(frame) + 2 * chroma_width * chroma_height;
1209 current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg, &last_colorspace, &last_chroma_location);
1212 // FIXME: Currently, if the video is too high-res for one of the allocated
1213 // frames, we simply refuse to scale it here to avoid crashes. It would be better
1214 // if we could somehow signal getting larger frames, especially as 4K is a thing now.
1215 if (video_frame->len > video_frame->size) {
1216 fprintf(stderr, "%s: Decoded frame would be larger than supported frame size (%zu > %zu), not decoding.\n", pathname.c_str(), video_frame->len, video_frame->size);
1221 sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes);
1226 int FFmpegCapture::interrupt_cb_thunk(void *opaque)
1228 return reinterpret_cast<FFmpegCapture *>(opaque)->interrupt_cb();
1231 int FFmpegCapture::interrupt_cb()
1233 // If ten seconds is gone without anything happening, we assume that
1234 // we are in a network stream that died and FFmpeg just didn't
1235 // pick it up (or perhaps it just hung, keeping the connection open).
1236 // Called back approximately every 100 ms if something is hanging,
1237 // so we get more than enough accuracy for our purposes.
1238 if (!should_interrupt && frame_timeout_valid &&
1239 duration<double>(steady_clock::now() - frame_timeout_started).count() >= 10.0) {
1240 string filename_copy;
1242 lock_guard<mutex> lock(filename_mu);
1243 filename_copy = filename;
1245 fprintf(stderr, "%s: No frame for more than 10 seconds, restarting stream.\n", filename.c_str());
1246 should_interrupt = true;
1248 return should_interrupt.load();
1251 unsigned FFmpegCapture::frame_width(const AVFrame *frame) const
1254 return frame->width;
1260 unsigned FFmpegCapture::frame_height(const AVFrame *frame) const
1263 return frame->height;
1270 int FFmpegCapture::read_srt_thunk(void *opaque, uint8_t *buf, int buf_size)
1272 return reinterpret_cast<FFmpegCapture *>(opaque)->read_srt(buf, buf_size);
1275 int FFmpegCapture::read_srt(uint8_t *buf, int buf_size)
1277 SRT_MSGCTRL mc = srt_msgctrl_default;
1278 return srt_recvmsg2(srt_sock, reinterpret_cast<char *>(buf), buf_size, &mc);