1 #include "ffmpeg_capture.h"
13 #include <libavcodec/avcodec.h>
14 #include <libavformat/avformat.h>
15 #include <libavutil/avutil.h>
16 #include <libavutil/error.h>
17 #include <libavutil/frame.h>
18 #include <libavutil/imgutils.h>
19 #include <libavutil/mem.h>
20 #include <libavutil/pixfmt.h>
21 #include <libavutil/opt.h>
22 #include <libswscale/swscale.h>
32 #include <movit/colorspace_conversion_effect.h>
34 #include "bmusb/bmusb.h"
35 #include "shared/ffmpeg_raii.h"
36 #include "ffmpeg_util.h"
38 #include "image_input.h"
39 #include "ref_counted_frame.h"
40 #include "shared/timebase.h"
46 #define FRAME_SIZE (8 << 20) // 8 MB.
49 using namespace std::chrono;
50 using namespace bmusb;
51 using namespace movit;
52 using namespace Eigen;
56 steady_clock::time_point compute_frame_start(int64_t frame_pts, int64_t pts_origin, const AVRational &video_timebase, const steady_clock::time_point &origin, double rate)
58 const duration<double> pts((frame_pts - pts_origin) * double(video_timebase.num) / double(video_timebase.den));
59 return origin + duration_cast<steady_clock::duration>(pts / rate);
62 bool changed_since(const std::string &pathname, const timespec &ts)
68 if (stat(pathname.c_str(), &buf) != 0) {
69 fprintf(stderr, "%s: Couldn't check for new version, leaving the old in place.\n", pathname.c_str());
72 return (buf.st_mtim.tv_sec != ts.tv_sec || buf.st_mtim.tv_nsec != ts.tv_nsec);
75 bool is_full_range(const AVPixFmtDescriptor *desc)
77 // This is horrible, but there's no better way that I know of.
78 return (strchr(desc->name, 'j') != nullptr);
81 AVPixelFormat decide_dst_format(AVPixelFormat src_format, bmusb::PixelFormat dst_format_type)
83 if (dst_format_type == bmusb::PixelFormat_8BitBGRA) {
84 return AV_PIX_FMT_BGRA;
86 if (dst_format_type == FFmpegCapture::PixelFormat_NV12) {
87 return AV_PIX_FMT_NV12;
90 assert(dst_format_type == bmusb::PixelFormat_8BitYCbCrPlanar);
92 // If this is a non-Y'CbCr format, just convert to 4:4:4 Y'CbCr
93 // and be done with it. It's too strange to spend a lot of time on.
94 // (Let's hope there's no alpha.)
95 const AVPixFmtDescriptor *src_desc = av_pix_fmt_desc_get(src_format);
96 if (src_desc == nullptr ||
97 src_desc->nb_components != 3 ||
98 (src_desc->flags & AV_PIX_FMT_FLAG_RGB)) {
99 return AV_PIX_FMT_YUV444P;
102 // The best for us would be Cb and Cr together if possible,
103 // but FFmpeg doesn't support that except in the special case of
104 // NV12, so we need to go to planar even for the case of NV12.
105 // Thus, look for the closest (but no worse) 8-bit planar Y'CbCr format
106 // that matches in color range. (This will also include the case of
107 // the source format already being acceptable.)
108 bool src_full_range = is_full_range(src_desc);
109 const char *best_format = "yuv444p";
110 unsigned best_score = numeric_limits<unsigned>::max();
111 for (const AVPixFmtDescriptor *desc = av_pix_fmt_desc_next(nullptr);
113 desc = av_pix_fmt_desc_next(desc)) {
114 // Find planar Y'CbCr formats only.
115 if (desc->nb_components != 3) continue;
116 if (desc->flags & AV_PIX_FMT_FLAG_RGB) continue;
117 if (!(desc->flags & AV_PIX_FMT_FLAG_PLANAR)) continue;
118 if (desc->comp[0].plane != 0 ||
119 desc->comp[1].plane != 1 ||
120 desc->comp[2].plane != 2) continue;
122 // 8-bit formats only.
123 if (desc->flags & AV_PIX_FMT_FLAG_BE) continue;
124 if (desc->comp[0].depth != 8) continue;
126 // Same or better chroma resolution only.
127 int chroma_w_diff = desc->log2_chroma_w - src_desc->log2_chroma_w;
128 int chroma_h_diff = desc->log2_chroma_h - src_desc->log2_chroma_h;
129 if (chroma_w_diff < 0 || chroma_h_diff < 0)
132 // Matching full/limited range only.
133 if (is_full_range(desc) != src_full_range)
136 // Pick something with as little excess chroma resolution as possible.
137 unsigned score = (1 << (chroma_w_diff)) << chroma_h_diff;
138 if (score < best_score) {
140 best_format = desc->name;
143 return av_get_pix_fmt(best_format);
146 YCbCrFormat decode_ycbcr_format(const AVPixFmtDescriptor *desc, const AVFrame *frame, bool is_mjpeg)
149 AVColorSpace colorspace = frame->colorspace;
150 switch (colorspace) {
151 case AVCOL_SPC_BT709:
152 format.luma_coefficients = YCBCR_REC_709;
154 case AVCOL_SPC_BT470BG:
155 case AVCOL_SPC_SMPTE170M:
156 case AVCOL_SPC_SMPTE240M:
157 format.luma_coefficients = YCBCR_REC_601;
159 case AVCOL_SPC_BT2020_NCL:
160 format.luma_coefficients = YCBCR_REC_2020;
162 case AVCOL_SPC_UNSPECIFIED:
163 format.luma_coefficients = (frame->height >= 720 ? YCBCR_REC_709 : YCBCR_REC_601);
166 fprintf(stderr, "Unknown Y'CbCr coefficient enum %d from FFmpeg; choosing Rec. 709.\n",
168 format.luma_coefficients = YCBCR_REC_709;
172 format.full_range = is_full_range(desc);
173 format.num_levels = 1 << desc->comp[0].depth;
174 format.chroma_subsampling_x = 1 << desc->log2_chroma_w;
175 format.chroma_subsampling_y = 1 << desc->log2_chroma_h;
177 switch (frame->chroma_location) {
178 case AVCHROMA_LOC_LEFT:
179 format.cb_x_position = 0.0;
180 format.cb_y_position = 0.5;
182 case AVCHROMA_LOC_CENTER:
183 format.cb_x_position = 0.5;
184 format.cb_y_position = 0.5;
186 case AVCHROMA_LOC_TOPLEFT:
187 format.cb_x_position = 0.0;
188 format.cb_y_position = 0.0;
190 case AVCHROMA_LOC_TOP:
191 format.cb_x_position = 0.5;
192 format.cb_y_position = 0.0;
194 case AVCHROMA_LOC_BOTTOMLEFT:
195 format.cb_x_position = 0.0;
196 format.cb_y_position = 1.0;
198 case AVCHROMA_LOC_BOTTOM:
199 format.cb_x_position = 0.5;
200 format.cb_y_position = 1.0;
203 fprintf(stderr, "Unknown chroma location coefficient enum %d from FFmpeg; choosing center.\n",
204 frame->chroma_location);
205 format.cb_x_position = 0.5;
206 format.cb_y_position = 0.5;
210 if (is_mjpeg && !format.full_range) {
211 // Limited-range MJPEG is only detected by FFmpeg whenever a special
212 // JPEG comment is set, which means that in practice, the stream is
213 // almost certainly generated by Futatabi. Override FFmpeg's forced
214 // MJPEG defaults (it disregards the values set in the mux) with what
216 format.luma_coefficients = YCBCR_REC_709;
217 format.cb_x_position = 0.0;
218 format.cb_y_position = 0.5;
221 format.cr_x_position = format.cb_x_position;
222 format.cr_y_position = format.cb_y_position;
226 RGBTriplet get_neutral_color(AVDictionary *metadata)
228 if (metadata == nullptr) {
229 return RGBTriplet(1.0f, 1.0f, 1.0f);
231 AVDictionaryEntry *entry = av_dict_get(metadata, "WhitePoint", nullptr, 0);
232 if (entry == nullptr) {
233 return RGBTriplet(1.0f, 1.0f, 1.0f);
236 unsigned x_nom, x_den, y_nom, y_den;
237 if (sscanf(entry->value, " %u:%u , %u:%u", &x_nom, &x_den, &y_nom, &y_den) != 4) {
238 fprintf(stderr, "WARNING: Unable to parse white point '%s', using default white point\n", entry->value);
239 return RGBTriplet(1.0f, 1.0f, 1.0f);
242 double x = double(x_nom) / x_den;
243 double y = double(y_nom) / y_den;
244 double z = 1.0 - x - y;
246 Matrix3d rgb_to_xyz_matrix = movit::ColorspaceConversionEffect::get_xyz_matrix(COLORSPACE_sRGB);
247 Vector3d rgb = rgb_to_xyz_matrix.inverse() * Vector3d(x, y, z);
249 return RGBTriplet(rgb[0], rgb[1], rgb[2]);
254 FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
255 : filename(filename), width(width), height(height), video_timebase{1, 1}
257 description = "Video: " + filename;
259 last_frame = steady_clock::now();
261 avformat_network_init(); // In case someone wants this.
265 FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id)
266 : srt_sock(srt_sock),
267 width(global_flags.width),
268 height(global_flags.height),
269 pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar),
272 if (stream_id.empty()) {
273 description = "SRT stream";
275 description = stream_id;
277 play_as_fast_as_possible = true;
279 last_frame = steady_clock::now();
283 FFmpegCapture::~FFmpegCapture()
285 if (has_dequeue_callbacks) {
286 dequeue_cleanup_callback();
288 swr_free(&resampler);
291 void FFmpegCapture::configure_card()
293 if (video_frame_allocator == nullptr) {
294 owned_video_frame_allocator.reset(new MallocFrameAllocator(FRAME_SIZE, NUM_QUEUED_VIDEO_FRAMES));
295 set_video_frame_allocator(owned_video_frame_allocator.get());
297 if (audio_frame_allocator == nullptr) {
298 // Audio can come out in pretty large chunks, so increase from the default 1 MB.
299 owned_audio_frame_allocator.reset(new MallocFrameAllocator(1 << 20, NUM_QUEUED_AUDIO_FRAMES));
300 set_audio_frame_allocator(owned_audio_frame_allocator.get());
304 void FFmpegCapture::start_bm_capture()
310 producer_thread_should_quit.unquit();
311 producer_thread = thread(&FFmpegCapture::producer_thread_func, this);
314 void FFmpegCapture::stop_dequeue_thread()
320 producer_thread_should_quit.quit();
321 producer_thread.join();
324 std::map<uint32_t, VideoMode> FFmpegCapture::get_available_video_modes() const
326 // Note: This will never really be shown in the UI.
330 snprintf(buf, sizeof(buf), "%ux%u", width, height);
333 mode.autodetect = false;
335 mode.height = height;
336 mode.frame_rate_num = 60;
337 mode.frame_rate_den = 1;
338 mode.interlaced = false;
340 return {{ 0, mode }};
343 void FFmpegCapture::producer_thread_func()
345 char thread_name[16];
346 snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
347 pthread_setname_np(pthread_self(), thread_name);
349 while (!producer_thread_should_quit.should_quit()) {
350 string filename_copy;
352 lock_guard<mutex> lock(filename_mu);
353 filename_copy = filename;
357 if (srt_sock == -1) {
358 pathname = search_for_file(filename_copy);
360 pathname = description;
362 if (pathname.empty()) {
363 send_disconnected_frame();
367 producer_thread_should_quit.sleep_for(seconds(1));
368 fprintf(stderr, "%s not found, sleeping one second and trying again...\n", filename_copy.c_str());
371 should_interrupt = false;
372 if (!play_video(pathname)) {
374 send_disconnected_frame();
378 fprintf(stderr, "Error when playing %s, sleeping one second and trying again...\n", pathname.c_str());
379 producer_thread_should_quit.sleep_for(seconds(1));
384 send_disconnected_frame();
388 // Probably just EOF, will exit the loop above on next test.
391 if (has_dequeue_callbacks) {
392 dequeue_cleanup_callback();
393 has_dequeue_callbacks = false;
397 void FFmpegCapture::send_disconnected_frame()
399 // Send an empty frame to signal that we have no signal anymore.
400 FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame();
401 if (video_frame.data) {
402 VideoFormat video_format;
403 video_format.width = width;
404 video_format.height = height;
405 video_format.frame_rate_nom = 60;
406 video_format.frame_rate_den = 1;
407 video_format.is_connected = false;
408 if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
409 video_format.stride = width * 4;
410 video_frame.len = width * height * 4;
411 memset(video_frame.data, 0, video_frame.len);
413 video_format.stride = width;
414 current_frame_ycbcr_format.luma_coefficients = YCBCR_REC_709;
415 current_frame_ycbcr_format.full_range = true;
416 current_frame_ycbcr_format.num_levels = 256;
417 current_frame_ycbcr_format.chroma_subsampling_x = 2;
418 current_frame_ycbcr_format.chroma_subsampling_y = 2;
419 current_frame_ycbcr_format.cb_x_position = 0.0f;
420 current_frame_ycbcr_format.cb_y_position = 0.0f;
421 current_frame_ycbcr_format.cr_x_position = 0.0f;
422 current_frame_ycbcr_format.cr_y_position = 0.0f;
423 video_frame.len = width * height * 2;
424 memset(video_frame.data, 0, width * height);
425 memset(video_frame.data + width * height, 128, width * height); // Valid for both NV12 and planar.
428 frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
429 video_frame, /*video_offset=*/0, video_format,
430 FrameAllocator::Frame(), /*audio_offset=*/0, AudioFormat());
431 last_frame_was_connected = false;
436 if (card_disconnected_callback != nullptr) {
437 card_disconnected_callback();
442 bool FFmpegCapture::play_video(const string &pathname)
444 // Note: Call before open, not after; otherwise, there's a race.
445 // (There is now, too, but it tips the correct way. We could use fstat()
446 // if we had the file descriptor.)
447 timespec last_modified;
449 if (stat(pathname.c_str(), &buf) != 0) {
450 // Probably some sort of protocol, so can't stat.
451 last_modified.tv_sec = -1;
453 last_modified = buf.st_mtim;
456 AVFormatContextWithCloser format_ctx;
457 if (srt_sock == -1) {
459 format_ctx = avformat_open_input_unique(pathname.c_str(), /*fmt=*/nullptr,
461 AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
464 // SRT socket, already opened.
465 AVInputFormat *mpegts_fmt = av_find_input_format("mpegts");
466 format_ctx = avformat_open_input_unique(&FFmpegCapture::read_srt_thunk, this,
467 mpegts_fmt, /*options=*/nullptr,
468 AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
473 if (format_ctx == nullptr) {
474 fprintf(stderr, "%s: Error opening file\n", pathname.c_str());
478 if (avformat_find_stream_info(format_ctx.get(), nullptr) < 0) {
479 fprintf(stderr, "%s: Error finding stream info\n", pathname.c_str());
483 int video_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_VIDEO);
484 if (video_stream_index == -1) {
485 fprintf(stderr, "%s: No video stream found\n", pathname.c_str());
489 int audio_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_AUDIO);
490 int subtitle_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_SUBTITLE);
491 has_last_subtitle = false;
493 // Open video decoder.
494 const AVCodecParameters *video_codecpar = format_ctx->streams[video_stream_index]->codecpar;
495 AVCodec *video_codec = avcodec_find_decoder(video_codecpar->codec_id);
496 video_timebase = format_ctx->streams[video_stream_index]->time_base;
497 AVCodecContextWithDeleter video_codec_ctx = avcodec_alloc_context3_unique(nullptr);
498 if (avcodec_parameters_to_context(video_codec_ctx.get(), video_codecpar) < 0) {
499 fprintf(stderr, "%s: Cannot fill video codec parameters\n", pathname.c_str());
502 if (video_codec == nullptr) {
503 fprintf(stderr, "%s: Cannot find video decoder\n", pathname.c_str());
506 if (avcodec_open2(video_codec_ctx.get(), video_codec, nullptr) < 0) {
507 fprintf(stderr, "%s: Cannot open video decoder\n", pathname.c_str());
510 unique_ptr<AVCodecContext, decltype(avcodec_close)*> video_codec_ctx_cleanup(
511 video_codec_ctx.get(), avcodec_close);
513 // Used in decode_ycbcr_format().
514 is_mjpeg = video_codecpar->codec_id == AV_CODEC_ID_MJPEG;
516 // Open audio decoder, if we have audio.
517 AVCodecContextWithDeleter audio_codec_ctx;
518 if (audio_stream_index != -1) {
519 audio_codec_ctx = avcodec_alloc_context3_unique(nullptr);
520 const AVCodecParameters *audio_codecpar = format_ctx->streams[audio_stream_index]->codecpar;
521 audio_timebase = format_ctx->streams[audio_stream_index]->time_base;
522 if (avcodec_parameters_to_context(audio_codec_ctx.get(), audio_codecpar) < 0) {
523 fprintf(stderr, "%s: Cannot fill audio codec parameters\n", pathname.c_str());
526 AVCodec *audio_codec = avcodec_find_decoder(audio_codecpar->codec_id);
527 if (audio_codec == nullptr) {
528 fprintf(stderr, "%s: Cannot find audio decoder\n", pathname.c_str());
531 if (avcodec_open2(audio_codec_ctx.get(), audio_codec, nullptr) < 0) {
532 fprintf(stderr, "%s: Cannot open audio decoder\n", pathname.c_str());
536 unique_ptr<AVCodecContext, decltype(avcodec_close)*> audio_codec_ctx_cleanup(
537 audio_codec_ctx.get(), avcodec_close);
542 bool first_frame = true;
543 while (!producer_thread_should_quit.should_quit()) {
544 if (process_queued_commands(format_ctx.get(), pathname, last_modified, /*rewound=*/nullptr)) {
547 if (should_interrupt.load()) {
548 // Check as a failsafe, so that we don't need to rely on avio if we don't have to.
551 UniqueFrame audio_frame = audio_frame_allocator->alloc_frame();
552 AudioFormat audio_format;
556 AVFrameWithDeleter frame = decode_frame(format_ctx.get(), video_codec_ctx.get(), audio_codec_ctx.get(),
557 pathname, video_stream_index, audio_stream_index, subtitle_stream_index, audio_frame.get(), &audio_format, &audio_pts, &error);
561 if (frame == nullptr) {
562 // EOF. Loop back to the start if we can.
563 if (format_ctx->pb != nullptr && format_ctx->pb->seekable == 0) {
564 // Not seekable (but seemingly, sometimes av_seek_frame() would return 0 anyway,
568 if (av_seek_frame(format_ctx.get(), /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
569 fprintf(stderr, "%s: Rewind failed, not looping.\n", pathname.c_str());
572 if (video_codec_ctx != nullptr) {
573 avcodec_flush_buffers(video_codec_ctx.get());
575 if (audio_codec_ctx != nullptr) {
576 avcodec_flush_buffers(audio_codec_ctx.get());
578 // If the file has changed since last time, return to get it reloaded.
579 // Note that depending on how you move the file into place, you might
580 // end up corrupting the one you're already playing, so this path
581 // might not trigger.
582 if (changed_since(pathname, last_modified)) {
589 VideoFormat video_format = construct_video_format(frame.get(), video_timebase);
590 UniqueFrame video_frame = make_video_frame(frame.get(), pathname, &error);
596 if (last_pts == 0 && pts_origin == 0) {
597 pts_origin = frame->pts;
599 steady_clock::time_point now = steady_clock::now();
600 if (play_as_fast_as_possible) {
601 video_frame->received_timestamp = now;
602 audio_frame->received_timestamp = now;
603 next_frame_start = now;
605 next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
606 if (first_frame && last_frame_was_connected) {
607 // If reconnect took more than one second, this is probably a live feed,
608 // and we should reset the resampler. (Or the rate is really, really low,
609 // in which case a reset on the first frame is fine anyway.)
610 if (duration<double>(next_frame_start - last_frame).count() >= 1.0) {
611 last_frame_was_connected = false;
614 video_frame->received_timestamp = next_frame_start;
616 // The easiest way to get all the rate conversions etc. right is to move the
617 // audio PTS into the video PTS timebase and go from there. (We'll get some
618 // rounding issues, but they should not be a big problem.)
619 int64_t audio_pts_as_video_pts = av_rescale_q(audio_pts, audio_timebase, video_timebase);
620 audio_frame->received_timestamp = compute_frame_start(audio_pts_as_video_pts, pts_origin, video_timebase, start, rate);
622 if (audio_frame->len != 0) {
623 // The received timestamps in Nageru are measured after we've just received the frame.
624 // However, pts (especially audio pts) is at the _beginning_ of the frame.
625 // If we have locked audio, the distinction doesn't really matter, as pts is
626 // on a relative scale and a fixed offset is fine. But if we don't, we will have
627 // a different number of samples each time, which will cause huge audio jitter
628 // and throw off the resampler.
630 // In a sense, we should have compensated by adding the frame and audio lengths
631 // to video_frame->received_timestamp and audio_frame->received_timestamp respectively,
632 // but that would mean extra waiting in sleep_until(). All we need is that they
633 // are correct relative to each other, though (and to the other frames we send),
634 // so just align the end of the audio frame, and we're fine.
635 size_t num_samples = (audio_frame->len * 8) / audio_format.bits_per_sample / audio_format.num_channels;
636 double offset = double(num_samples) / OUTPUT_FREQUENCY -
637 double(video_format.frame_rate_den) / video_format.frame_rate_nom;
638 audio_frame->received_timestamp += duration_cast<steady_clock::duration>(duration<double>(offset));
641 if (duration<double>(now - next_frame_start).count() >= 0.1) {
642 // If we don't have enough CPU to keep up, or if we have a live stream
643 // where the initial origin was somehow wrong, we could be behind indefinitely.
644 // In particular, this will give the audio resampler problems as it tries
645 // to speed up to reduce the delay, hitting the low end of the buffer every time.
646 fprintf(stderr, "%s: Playback %.0f ms behind, resetting time scale\n",
648 1e3 * duration<double>(now - next_frame_start).count());
649 pts_origin = frame->pts;
650 start = next_frame_start = now;
651 timecode += MAX_FPS * 2 + 1;
654 bool finished_wakeup;
655 if (play_as_fast_as_possible) {
656 finished_wakeup = !producer_thread_should_quit.should_quit();
658 finished_wakeup = producer_thread_should_quit.sleep_until(next_frame_start);
660 if (finished_wakeup) {
661 if (audio_frame->len > 0) {
662 assert(audio_pts != -1);
664 if (!last_frame_was_connected) {
665 // We're recovering from an error (or really slow load, see above).
666 // Make sure to get the audio resampler reset. (This is a hack;
667 // ideally, the frame callback should just accept a way to signal
668 // audio discontinuity.)
669 timecode += MAX_FPS * 2 + 1;
671 last_neutral_color = get_neutral_color(frame->metadata);
672 frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
673 video_frame.get_and_release(), 0, video_format,
674 audio_frame.get_and_release(), 0, audio_format);
676 last_frame = steady_clock::now();
677 last_frame_was_connected = true;
680 if (producer_thread_should_quit.should_quit()) break;
682 bool rewound = false;
683 if (process_queued_commands(format_ctx.get(), pathname, last_modified, &rewound)) {
686 // If we just rewound, drop this frame on the floor and be done.
690 // OK, we didn't, so probably a rate change. Recalculate next_frame_start,
691 // but if it's now in the past, we'll reset the origin, so that we don't
692 // generate a huge backlog of frames that we need to run through quickly.
693 next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
694 steady_clock::time_point now = steady_clock::now();
695 if (next_frame_start < now) {
696 pts_origin = frame->pts;
697 start = next_frame_start = now;
701 last_pts = frame->pts;
706 void FFmpegCapture::internal_rewind()
708 pts_origin = last_pts = 0;
709 start = next_frame_start = steady_clock::now();
712 bool FFmpegCapture::process_queued_commands(AVFormatContext *format_ctx, const std::string &pathname, timespec last_modified, bool *rewound)
714 // Process any queued commands from other threads.
715 vector<QueuedCommand> commands;
717 lock_guard<mutex> lock(queue_mu);
718 swap(commands, command_queue);
720 for (const QueuedCommand &cmd : commands) {
721 switch (cmd.command) {
722 case QueuedCommand::REWIND:
723 if (av_seek_frame(format_ctx, /*stream_index=*/-1, /*timestamp=*/0, /*flags=*/0) < 0) {
724 fprintf(stderr, "%s: Rewind failed, stopping play.\n", pathname.c_str());
726 // If the file has changed since last time, return to get it reloaded.
727 // Note that depending on how you move the file into place, you might
728 // end up corrupting the one you're already playing, so this path
729 // might not trigger.
730 if (changed_since(pathname, last_modified)) {
734 if (rewound != nullptr) {
739 case QueuedCommand::CHANGE_RATE:
740 // Change the origin to the last played frame.
741 start = compute_frame_start(last_pts, pts_origin, video_timebase, start, rate);
742 pts_origin = last_pts;
744 play_as_fast_as_possible = (rate >= 10.0);
755 AVFrameWithDeleter FFmpegCapture::decode_frame(AVFormatContext *format_ctx, AVCodecContext *video_codec_ctx, AVCodecContext *audio_codec_ctx,
756 const std::string &pathname, int video_stream_index, int audio_stream_index, int subtitle_stream_index,
757 FrameAllocator::Frame *audio_frame, AudioFormat *audio_format, int64_t *audio_pts, bool *error)
761 // Read packets until we have a frame or there are none left.
762 bool frame_finished = false;
763 AVFrameWithDeleter audio_avframe = av_frame_alloc_unique();
764 AVFrameWithDeleter video_avframe = av_frame_alloc_unique();
767 bool has_audio = false;
770 unique_ptr<AVPacket, decltype(av_packet_unref)*> pkt_cleanup(
771 &pkt, av_packet_unref);
772 av_init_packet(&pkt);
775 if (av_read_frame(format_ctx, &pkt) == 0) {
776 if (pkt.stream_index == audio_stream_index && audio_callback != nullptr) {
777 audio_callback(&pkt, format_ctx->streams[audio_stream_index]->time_base);
779 if (pkt.stream_index == video_stream_index) {
780 if (avcodec_send_packet(video_codec_ctx, &pkt) < 0) {
781 fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str());
783 return AVFrameWithDeleter(nullptr);
785 } else if (pkt.stream_index == audio_stream_index) {
787 if (avcodec_send_packet(audio_codec_ctx, &pkt) < 0) {
788 fprintf(stderr, "%s: Cannot send packet to audio codec.\n", pathname.c_str());
790 return AVFrameWithDeleter(nullptr);
792 } else if (pkt.stream_index == subtitle_stream_index) {
793 last_subtitle = string(reinterpret_cast<const char *>(pkt.data), pkt.size);
794 has_last_subtitle = true;
797 eof = true; // Or error, but ignore that for the time being.
800 // Decode audio, if any.
803 int err = avcodec_receive_frame(audio_codec_ctx, audio_avframe.get());
805 if (*audio_pts == -1) {
806 *audio_pts = audio_avframe->pts;
808 convert_audio(audio_avframe.get(), audio_frame, audio_format);
809 } else if (err == AVERROR(EAGAIN)) {
812 fprintf(stderr, "%s: Cannot receive frame from audio codec.\n", pathname.c_str());
814 return AVFrameWithDeleter(nullptr);
819 // Decode video, if we have a frame.
820 int err = avcodec_receive_frame(video_codec_ctx, video_avframe.get());
822 frame_finished = true;
824 } else if (err != AVERROR(EAGAIN)) {
825 fprintf(stderr, "%s: Cannot receive frame from video codec.\n", pathname.c_str());
827 return AVFrameWithDeleter(nullptr);
832 return video_avframe;
834 return AVFrameWithDeleter(nullptr);
837 void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator::Frame *audio_frame, AudioFormat *audio_format)
839 // Decide on a format. If there already is one in this audio frame,
840 // we're pretty much forced to use it. If not, we try to find an exact match.
841 // If that still doesn't work, we default to 32-bit signed chunked
842 // (float would be nice, but there's really no way to signal that yet).
843 AVSampleFormat dst_format;
844 if (audio_format->bits_per_sample == 0) {
845 switch (audio_avframe->format) {
846 case AV_SAMPLE_FMT_S16:
847 case AV_SAMPLE_FMT_S16P:
848 audio_format->bits_per_sample = 16;
849 dst_format = AV_SAMPLE_FMT_S16;
851 case AV_SAMPLE_FMT_S32:
852 case AV_SAMPLE_FMT_S32P:
854 audio_format->bits_per_sample = 32;
855 dst_format = AV_SAMPLE_FMT_S32;
858 } else if (audio_format->bits_per_sample == 16) {
859 dst_format = AV_SAMPLE_FMT_S16;
860 } else if (audio_format->bits_per_sample == 32) {
861 dst_format = AV_SAMPLE_FMT_S32;
865 audio_format->num_channels = 2;
867 int64_t channel_layout = audio_avframe->channel_layout;
868 if (channel_layout == 0) {
869 channel_layout = av_get_default_channel_layout(audio_avframe->channels);
872 if (resampler == nullptr ||
873 audio_avframe->format != last_src_format ||
874 dst_format != last_dst_format ||
875 channel_layout != last_channel_layout ||
876 audio_avframe->sample_rate != last_sample_rate) {
877 swr_free(&resampler);
878 resampler = swr_alloc_set_opts(nullptr,
879 /*out_ch_layout=*/AV_CH_LAYOUT_STEREO_DOWNMIX,
880 /*out_sample_fmt=*/dst_format,
881 /*out_sample_rate=*/OUTPUT_FREQUENCY,
882 /*in_ch_layout=*/channel_layout,
883 /*in_sample_fmt=*/AVSampleFormat(audio_avframe->format),
884 /*in_sample_rate=*/audio_avframe->sample_rate,
886 /*log_ctx=*/nullptr);
888 if (resampler == nullptr) {
889 fprintf(stderr, "Allocating resampler failed.\n");
893 if (swr_init(resampler) < 0) {
894 fprintf(stderr, "Could not open resample context.\n");
898 last_src_format = AVSampleFormat(audio_avframe->format);
899 last_dst_format = dst_format;
900 last_channel_layout = channel_layout;
901 last_sample_rate = audio_avframe->sample_rate;
904 size_t bytes_per_sample = (audio_format->bits_per_sample / 8) * 2;
905 size_t num_samples_room = (audio_frame->size - audio_frame->len) / bytes_per_sample;
907 uint8_t *data = audio_frame->data + audio_frame->len;
908 int out_samples = swr_convert(resampler, &data, num_samples_room,
909 const_cast<const uint8_t **>(audio_avframe->data), audio_avframe->nb_samples);
910 if (out_samples < 0) {
911 fprintf(stderr, "Audio conversion failed.\n");
915 audio_frame->len += out_samples * bytes_per_sample;
918 VideoFormat FFmpegCapture::construct_video_format(const AVFrame *frame, AVRational video_timebase)
920 VideoFormat video_format;
921 video_format.width = width;
922 video_format.height = height;
923 if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
924 video_format.stride = width * 4;
925 } else if (pixel_format == FFmpegCapture::PixelFormat_NV12) {
926 video_format.stride = width;
928 assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
929 video_format.stride = width;
931 video_format.frame_rate_nom = video_timebase.den;
932 video_format.frame_rate_den = frame->pkt_duration * video_timebase.num;
933 if (video_format.frame_rate_nom == 0 || video_format.frame_rate_den == 0) {
934 // Invalid frame rate.
935 video_format.frame_rate_nom = 60;
936 video_format.frame_rate_den = 1;
938 video_format.has_signal = true;
939 video_format.is_connected = true;
943 UniqueFrame FFmpegCapture::make_video_frame(const AVFrame *frame, const string &pathname, bool *error)
947 UniqueFrame video_frame(video_frame_allocator->alloc_frame());
948 if (video_frame->data == nullptr) {
952 if (sws_ctx == nullptr ||
953 sws_last_width != frame->width ||
954 sws_last_height != frame->height ||
955 sws_last_src_format != frame->format) {
956 sws_dst_format = decide_dst_format(AVPixelFormat(frame->format), pixel_format);
958 sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format),
959 width, height, sws_dst_format,
960 SWS_BICUBIC, nullptr, nullptr, nullptr));
961 sws_last_width = frame->width;
962 sws_last_height = frame->height;
963 sws_last_src_format = frame->format;
965 if (sws_ctx == nullptr) {
966 fprintf(stderr, "%s: Could not create scaler context\n", pathname.c_str());
971 uint8_t *pic_data[4] = { nullptr, nullptr, nullptr, nullptr };
972 int linesizes[4] = { 0, 0, 0, 0 };
973 if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
974 pic_data[0] = video_frame->data;
975 linesizes[0] = width * 4;
976 video_frame->len = (width * 4) * height;
977 } else if (pixel_format == PixelFormat_NV12) {
978 pic_data[0] = video_frame->data;
979 linesizes[0] = width;
981 pic_data[1] = pic_data[0] + width * height;
982 linesizes[1] = width;
984 video_frame->len = (width * 2) * height;
986 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
987 current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg);
989 assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
990 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
992 int chroma_width = AV_CEIL_RSHIFT(int(width), desc->log2_chroma_w);
993 int chroma_height = AV_CEIL_RSHIFT(int(height), desc->log2_chroma_h);
995 pic_data[0] = video_frame->data;
996 linesizes[0] = width;
998 pic_data[1] = pic_data[0] + width * height;
999 linesizes[1] = chroma_width;
1001 pic_data[2] = pic_data[1] + chroma_width * chroma_height;
1002 linesizes[2] = chroma_width;
1004 video_frame->len = width * height + 2 * chroma_width * chroma_height;
1006 current_frame_ycbcr_format = decode_ycbcr_format(desc, frame, is_mjpeg);
1008 sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes);
1013 int FFmpegCapture::interrupt_cb_thunk(void *opaque)
1015 return reinterpret_cast<FFmpegCapture *>(opaque)->interrupt_cb();
1018 int FFmpegCapture::interrupt_cb()
1020 return should_interrupt.load();
1024 int FFmpegCapture::read_srt_thunk(void *opaque, uint8_t *buf, int buf_size)
1026 return reinterpret_cast<FFmpegCapture *>(opaque)->read_srt(buf, buf_size);
1029 int FFmpegCapture::read_srt(uint8_t *buf, int buf_size)
1031 SRT_MSGCTRL mc = srt_msgctrl_default;
1032 return srt_recvmsg2(srt_sock, reinterpret_cast<char *>(buf), buf_size, &mc);