#include <libavutil/imgutils.h>
#include <libavutil/mem.h>
#include <libavutil/pixfmt.h>
+#include <libavutil/opt.h>
#include <libswscale/swscale.h>
}
#include "ffmpeg_util.h"
#include "flags.h"
#include "image_input.h"
+#include "ref_counted_frame.h"
+#include "timebase.h"
#define FRAME_SIZE (8 << 20) // 8 MB.
if (dst_format_type == bmusb::PixelFormat_8BitBGRA) {
return AV_PIX_FMT_BGRA;
}
+ if (dst_format_type == FFmpegCapture::PixelFormat_NV12) {
+ return AV_PIX_FMT_NV12;
+ }
assert(dst_format_type == bmusb::PixelFormat_8BitYCbCrPlanar);
} // namespace
FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
- : filename(filename), width(width), height(height)
+ : filename(filename), width(width), height(height), video_timebase{1, 1}
{
- // Not really used for anything.
description = "Video: " + filename;
+ last_frame = steady_clock::now();
+
avformat_network_init(); // In case someone wants this.
}
if (has_dequeue_callbacks) {
dequeue_cleanup_callback();
}
+ avresample_free(&resampler);
}
void FFmpegCapture::configure_card()
set_video_frame_allocator(owned_video_frame_allocator.get());
}
if (audio_frame_allocator == nullptr) {
- owned_audio_frame_allocator.reset(new MallocFrameAllocator(65536, NUM_QUEUED_AUDIO_FRAMES));
+ // Audio can come out in pretty large chunks, so increase from the default 1 MB.
+ owned_audio_frame_allocator.reset(new MallocFrameAllocator(1 << 20, NUM_QUEUED_AUDIO_FRAMES));
set_audio_frame_allocator(owned_audio_frame_allocator.get());
}
}
pthread_setname_np(pthread_self(), thread_name);
while (!producer_thread_should_quit.should_quit()) {
- string pathname = search_for_file(filename);
- if (filename.empty()) {
- fprintf(stderr, "%s not found, sleeping one second and trying again...\n", filename.c_str());
+ string filename_copy;
+ {
+ lock_guard<mutex> lock(filename_mu);
+ filename_copy = filename;
+ }
+
+ string pathname = search_for_file(filename_copy);
+ if (pathname.empty()) {
+ fprintf(stderr, "%s not found, sleeping one second and trying again...\n", filename_copy.c_str());
send_disconnected_frame();
producer_thread_should_quit.sleep_for(seconds(1));
continue;
}
+ should_interrupt = false;
if (!play_video(pathname)) {
// Error.
fprintf(stderr, "Error when playing %s, sleeping one second and trying again...\n", pathname.c_str());
VideoFormat video_format;
video_format.width = width;
video_format.height = height;
- video_format.stride = width * 4;
video_format.frame_rate_nom = 60;
video_format.frame_rate_den = 1;
video_format.is_connected = false;
+ if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
+ video_format.stride = width * 4;
+ video_frame.len = width * height * 4;
+ memset(video_frame.data, 0, video_frame.len);
+ } else {
+ video_format.stride = width;
+ current_frame_ycbcr_format.luma_coefficients = YCBCR_REC_709;
+ current_frame_ycbcr_format.full_range = true;
+ current_frame_ycbcr_format.num_levels = 256;
+ current_frame_ycbcr_format.chroma_subsampling_x = 2;
+ current_frame_ycbcr_format.chroma_subsampling_y = 2;
+ current_frame_ycbcr_format.cb_x_position = 0.0f;
+ current_frame_ycbcr_format.cb_y_position = 0.0f;
+ current_frame_ycbcr_format.cr_x_position = 0.0f;
+ current_frame_ycbcr_format.cr_y_position = 0.0f;
+ video_frame.len = width * height * 2;
+ memset(video_frame.data, 0, width * height);
+ memset(video_frame.data + width * height, 128, width * height); // Valid for both NV12 and planar.
+ }
- video_frame.len = width * height * 4;
- memset(video_frame.data, 0, video_frame.len);
-
- frame_callback(timecode++,
+ frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
video_frame, /*video_offset=*/0, video_format,
FrameAllocator::Frame(), /*audio_offset=*/0, AudioFormat());
+ last_frame_was_connected = false;
}
}
last_modified = buf.st_mtim;
}
- auto format_ctx = avformat_open_input_unique(pathname.c_str(), nullptr, nullptr);
+ AVDictionary *opts = nullptr;
+ av_dict_set(&opts, "fflags", "nobuffer", 0);
+
+ auto format_ctx = avformat_open_input_unique(pathname.c_str(), nullptr, &opts, AVIOInterruptCB{ &FFmpegCapture::interrupt_cb_thunk, this });
if (format_ctx == nullptr) {
fprintf(stderr, "%s: Error opening file\n", pathname.c_str());
return false;
return false;
}
- const AVCodecParameters *codecpar = format_ctx->streams[video_stream_index]->codecpar;
- AVRational video_timebase = format_ctx->streams[video_stream_index]->time_base;
- AVCodecContextWithDeleter codec_ctx = avcodec_alloc_context3_unique(nullptr);
- if (avcodec_parameters_to_context(codec_ctx.get(), codecpar) < 0) {
- fprintf(stderr, "%s: Cannot fill codec parameters\n", pathname.c_str());
+ int audio_stream_index = find_stream_index(format_ctx.get(), AVMEDIA_TYPE_AUDIO);
+
+ // Open video decoder.
+ const AVCodecParameters *video_codecpar = format_ctx->streams[video_stream_index]->codecpar;
+ AVCodec *video_codec = avcodec_find_decoder(video_codecpar->codec_id);
+ video_timebase = format_ctx->streams[video_stream_index]->time_base;
+ AVCodecContextWithDeleter video_codec_ctx = avcodec_alloc_context3_unique(nullptr);
+ if (avcodec_parameters_to_context(video_codec_ctx.get(), video_codecpar) < 0) {
+ fprintf(stderr, "%s: Cannot fill video codec parameters\n", pathname.c_str());
return false;
}
- AVCodec *codec = avcodec_find_decoder(codecpar->codec_id);
- if (codec == nullptr) {
- fprintf(stderr, "%s: Cannot find decoder\n", pathname.c_str());
+ if (video_codec == nullptr) {
+ fprintf(stderr, "%s: Cannot find video decoder\n", pathname.c_str());
return false;
}
- if (avcodec_open2(codec_ctx.get(), codec, nullptr) < 0) {
- fprintf(stderr, "%s: Cannot open decoder\n", pathname.c_str());
+ if (avcodec_open2(video_codec_ctx.get(), video_codec, nullptr) < 0) {
+ fprintf(stderr, "%s: Cannot open video decoder\n", pathname.c_str());
return false;
}
- unique_ptr<AVCodecContext, decltype(avcodec_close)*> codec_ctx_cleanup(
- codec_ctx.get(), avcodec_close);
+ unique_ptr<AVCodecContext, decltype(avcodec_close)*> video_codec_ctx_cleanup(
+ video_codec_ctx.get(), avcodec_close);
+
+ // Open audio decoder, if we have audio.
+ AVCodecContextWithDeleter audio_codec_ctx;
+ if (audio_stream_index != -1) {
+ audio_codec_ctx = avcodec_alloc_context3_unique(nullptr);
+ const AVCodecParameters *audio_codecpar = format_ctx->streams[audio_stream_index]->codecpar;
+ audio_timebase = format_ctx->streams[audio_stream_index]->time_base;
+ if (avcodec_parameters_to_context(audio_codec_ctx.get(), audio_codecpar) < 0) {
+ fprintf(stderr, "%s: Cannot fill audio codec parameters\n", pathname.c_str());
+ return false;
+ }
+ AVCodec *audio_codec = avcodec_find_decoder(audio_codecpar->codec_id);
+ if (audio_codec == nullptr) {
+ fprintf(stderr, "%s: Cannot find audio decoder\n", pathname.c_str());
+ return false;
+ }
+ if (avcodec_open2(audio_codec_ctx.get(), audio_codec, nullptr) < 0) {
+ fprintf(stderr, "%s: Cannot open audio decoder\n", pathname.c_str());
+ return false;
+ }
+ }
+ unique_ptr<AVCodecContext, decltype(avcodec_close)*> audio_codec_ctx_cleanup(
+ audio_codec_ctx.get(), avcodec_close);
internal_rewind();
- unique_ptr<SwsContext, decltype(sws_freeContext)*> sws_ctx(nullptr, sws_freeContext);
- int sws_last_width = -1, sws_last_height = -1, sws_last_src_format = -1;
- AVPixelFormat sws_dst_format = AVPixelFormat(-1); // In practice, always initialized.
-
// Main loop.
+ bool first_frame = true;
while (!producer_thread_should_quit.should_quit()) {
- if (process_queued_commands(format_ctx.get(), pathname, last_modified)) {
+ if (process_queued_commands(format_ctx.get(), pathname, last_modified, /*rewound=*/nullptr)) {
return true;
}
+ UniqueFrame audio_frame = audio_frame_allocator->alloc_frame();
+ AudioFormat audio_format;
+ int64_t audio_pts;
bool error;
- AVFrameWithDeleter frame = decode_frame(format_ctx.get(), codec_ctx.get(), pathname, video_stream_index, &error);
+ AVFrameWithDeleter frame = decode_frame(format_ctx.get(), video_codec_ctx.get(), audio_codec_ctx.get(),
+ pathname, video_stream_index, audio_stream_index, audio_frame.get(), &audio_format, &audio_pts, &error);
if (error) {
return false;
}
fprintf(stderr, "%s: Rewind failed, not looping.\n", pathname.c_str());
return true;
}
+ if (video_codec_ctx != nullptr) {
+ avcodec_flush_buffers(video_codec_ctx.get());
+ }
+ if (audio_codec_ctx != nullptr) {
+ avcodec_flush_buffers(audio_codec_ctx.get());
+ }
// If the file has changed since last time, return to get it reloaded.
// Note that depending on how you move the file into place, you might
// end up corrupting the one you're already playing, so this path
continue;
}
- if (sws_ctx == nullptr ||
- sws_last_width != frame->width ||
- sws_last_height != frame->height ||
- sws_last_src_format != frame->format) {
- sws_dst_format = decide_dst_format(AVPixelFormat(frame->format), pixel_format);
- sws_ctx.reset(
- sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format),
- width, height, sws_dst_format,
- SWS_BICUBIC, nullptr, nullptr, nullptr));
- sws_last_width = frame->width;
- sws_last_height = frame->height;
- sws_last_src_format = frame->format;
- }
- if (sws_ctx == nullptr) {
- fprintf(stderr, "%s: Could not create scaler context\n", pathname.c_str());
+ VideoFormat video_format = construct_video_format(frame.get(), video_timebase);
+ UniqueFrame video_frame = make_video_frame(frame.get(), pathname, &error);
+ if (error) {
return false;
}
- VideoFormat video_format;
- video_format.width = width;
- video_format.height = height;
- if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
- video_format.stride = width * 4;
- } else {
- assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
- video_format.stride = width;
- }
- video_format.frame_rate_nom = video_timebase.den;
- video_format.frame_rate_den = av_frame_get_pkt_duration(frame.get()) * video_timebase.num;
- if (video_format.frame_rate_nom == 0 || video_format.frame_rate_den == 0) {
- // Invalid frame rate.
- video_format.frame_rate_nom = 60;
- video_format.frame_rate_den = 1;
- }
- video_format.has_signal = true;
- video_format.is_connected = true;
-
- next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
- last_pts = frame->pts;
+ for ( ;; ) {
+ if (last_pts == 0 && pts_origin == 0) {
+ pts_origin = frame->pts;
+ }
+ next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
+ if (first_frame && last_frame_was_connected) {
+ // If reconnect took more than one second, this is probably a live feed,
+ // and we should reset the resampler. (Or the rate is really, really low,
+ // in which case a reset on the first frame is fine anyway.)
+ if (duration<double>(next_frame_start - last_frame).count() >= 1.0) {
+ last_frame_was_connected = false;
+ }
+ }
+ video_frame->received_timestamp = next_frame_start;
+
+ // The easiest way to get all the rate conversions etc. right is to move the
+ // audio PTS into the video PTS timebase and go from there. (We'll get some
+ // rounding issues, but they should not be a big problem.)
+ int64_t audio_pts_as_video_pts = av_rescale_q(audio_pts, audio_timebase, video_timebase);
+ audio_frame->received_timestamp = compute_frame_start(audio_pts_as_video_pts, pts_origin, video_timebase, start, rate);
+
+ if (audio_frame->len != 0) {
+ // The received timestamps in Nageru are measured after we've just received the frame.
+ // However, pts (especially audio pts) is at the _beginning_ of the frame.
+ // If we have locked audio, the distinction doesn't really matter, as pts is
+ // on a relative scale and a fixed offset is fine. But if we don't, we will have
+ // a different number of samples each time, which will cause huge audio jitter
+ // and throw off the resampler.
+ //
+ // In a sense, we should have compensated by adding the frame and audio lengths
+ // to video_frame->received_timestamp and audio_frame->received_timestamp respectively,
+ // but that would mean extra waiting in sleep_until(). All we need is that they
+ // are correct relative to each other, though (and to the other frames we send),
+ // so just align the end of the audio frame, and we're fine.
+ size_t num_samples = (audio_frame->len * 8) / audio_format.bits_per_sample / audio_format.num_channels;
+ double offset = double(num_samples) / OUTPUT_FREQUENCY -
+ double(video_format.frame_rate_den) / video_format.frame_rate_nom;
+ audio_frame->received_timestamp += duration_cast<steady_clock::duration>(duration<double>(offset));
+ }
- FrameAllocator::Frame video_frame = video_frame_allocator->alloc_frame();
- if (video_frame.data != nullptr) {
- uint8_t *pic_data[4] = { nullptr, nullptr, nullptr, nullptr };
- int linesizes[4] = { 0, 0, 0, 0 };
- if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
- pic_data[0] = video_frame.data;
- linesizes[0] = video_format.stride;
- video_frame.len = video_format.stride * height;
+ steady_clock::time_point now = steady_clock::now();
+ if (duration<double>(now - next_frame_start).count() >= 0.1) {
+ // If we don't have enough CPU to keep up, or if we have a live stream
+ // where the initial origin was somehow wrong, we could be behind indefinitely.
+ // In particular, this will give the audio resampler problems as it tries
+ // to speed up to reduce the delay, hitting the low end of the buffer every time.
+ fprintf(stderr, "%s: Playback %.0f ms behind, resetting time scale\n",
+ pathname.c_str(),
+ 1e3 * duration<double>(now - next_frame_start).count());
+ pts_origin = frame->pts;
+ start = next_frame_start = now;
+ timecode += MAX_FPS * 2 + 1;
+ }
+ bool finished_wakeup = producer_thread_should_quit.sleep_until(next_frame_start);
+ if (finished_wakeup) {
+ if (audio_frame->len > 0) {
+ assert(audio_pts != -1);
+ }
+ if (!last_frame_was_connected) {
+ // We're recovering from an error (or really slow load, see above).
+ // Make sure to get the audio resampler reset. (This is a hack;
+ // ideally, the frame callback should just accept a way to signal
+ // audio discontinuity.)
+ timecode += MAX_FPS * 2 + 1;
+ }
+ frame_callback(frame->pts, video_timebase, audio_pts, audio_timebase, timecode++,
+ video_frame.get_and_release(), 0, video_format,
+ audio_frame.get_and_release(), 0, audio_format);
+ first_frame = false;
+ last_frame = steady_clock::now();
+ last_frame_was_connected = true;
+ break;
} else {
- assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
- const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
-
- int chroma_width = AV_CEIL_RSHIFT(int(width), desc->log2_chroma_w);
- int chroma_height = AV_CEIL_RSHIFT(int(height), desc->log2_chroma_h);
-
- pic_data[0] = video_frame.data;
- linesizes[0] = width;
-
- pic_data[1] = pic_data[0] + width * height;
- linesizes[1] = chroma_width;
-
- pic_data[2] = pic_data[1] + chroma_width * chroma_height;
- linesizes[2] = chroma_width;
-
- video_frame.len = width * height + 2 * chroma_width * chroma_height;
-
- current_frame_ycbcr_format = decode_ycbcr_format(desc, frame.get());
+ if (producer_thread_should_quit.should_quit()) break;
+
+ bool rewound = false;
+ if (process_queued_commands(format_ctx.get(), pathname, last_modified, &rewound)) {
+ return true;
+ }
+ // If we just rewound, drop this frame on the floor and be done.
+ if (rewound) {
+ break;
+ }
+ // OK, we didn't, so probably a rate change. Recalculate next_frame_start,
+ // but if it's now in the past, we'll reset the origin, so that we don't
+ // generate a huge backlog of frames that we need to run through quickly.
+ next_frame_start = compute_frame_start(frame->pts, pts_origin, video_timebase, start, rate);
+ steady_clock::time_point now = steady_clock::now();
+ if (next_frame_start < now) {
+ pts_origin = frame->pts;
+ start = next_frame_start = now;
+ }
}
- sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes);
- video_frame.received_timestamp = next_frame_start;
}
-
- FrameAllocator::Frame audio_frame;
- AudioFormat audio_format;
- audio_format.bits_per_sample = 32;
- audio_format.num_channels = 8;
-
- producer_thread_should_quit.sleep_until(next_frame_start);
- frame_callback(timecode++,
- video_frame, 0, video_format,
- audio_frame, 0, audio_format);
+ last_pts = frame->pts;
}
return true;
}
start = next_frame_start = steady_clock::now();
}
-bool FFmpegCapture::process_queued_commands(AVFormatContext *format_ctx, const std::string &pathname, timespec last_modified)
+bool FFmpegCapture::process_queued_commands(AVFormatContext *format_ctx, const std::string &pathname, timespec last_modified, bool *rewound)
{
// Process any queued commands from other threads.
vector<QueuedCommand> commands;
return true;
}
internal_rewind();
+ if (rewound != nullptr) {
+ *rewound = true;
+ }
break;
case QueuedCommand::CHANGE_RATE:
- start = next_frame_start;
+ // Change the origin to the last played frame.
+ start = compute_frame_start(last_pts, pts_origin, video_timebase, start, rate);
pts_origin = last_pts;
rate = cmd.new_rate;
break;
return false;
}
-AVFrameWithDeleter FFmpegCapture::decode_frame(AVFormatContext *format_ctx, AVCodecContext *codec_ctx, const std::string &pathname, int video_stream_index, bool *error)
+namespace {
+
+} // namespace
+
+AVFrameWithDeleter FFmpegCapture::decode_frame(AVFormatContext *format_ctx, AVCodecContext *video_codec_ctx, AVCodecContext *audio_codec_ctx,
+ const std::string &pathname, int video_stream_index, int audio_stream_index,
+ FrameAllocator::Frame *audio_frame, AudioFormat *audio_format, int64_t *audio_pts, bool *error)
{
*error = false;
// Read packets until we have a frame or there are none left.
bool frame_finished = false;
- AVFrameWithDeleter frame = av_frame_alloc_unique();
+ AVFrameWithDeleter audio_avframe = av_frame_alloc_unique();
+ AVFrameWithDeleter video_avframe = av_frame_alloc_unique();
bool eof = false;
+ *audio_pts = -1;
+ bool has_audio = false;
do {
AVPacket pkt;
unique_ptr<AVPacket, decltype(av_packet_unref)*> pkt_cleanup(
pkt.data = nullptr;
pkt.size = 0;
if (av_read_frame(format_ctx, &pkt) == 0) {
- if (pkt.stream_index != video_stream_index) {
- // Ignore audio for now.
- continue;
+ if (pkt.stream_index == audio_stream_index && audio_callback != nullptr) {
+ audio_callback(&pkt, format_ctx->streams[audio_stream_index]->time_base);
}
- if (avcodec_send_packet(codec_ctx, &pkt) < 0) {
- fprintf(stderr, "%s: Cannot send packet to codec.\n", pathname.c_str());
- *error = true;
- return AVFrameWithDeleter(nullptr);
+ if (pkt.stream_index == video_stream_index) {
+ if (avcodec_send_packet(video_codec_ctx, &pkt) < 0) {
+ fprintf(stderr, "%s: Cannot send packet to video codec.\n", pathname.c_str());
+ *error = true;
+ return AVFrameWithDeleter(nullptr);
+ }
+ } else if (pkt.stream_index == audio_stream_index) {
+ has_audio = true;
+ if (avcodec_send_packet(audio_codec_ctx, &pkt) < 0) {
+ fprintf(stderr, "%s: Cannot send packet to audio codec.\n", pathname.c_str());
+ *error = true;
+ return AVFrameWithDeleter(nullptr);
+ }
}
} else {
eof = true; // Or error, but ignore that for the time being.
}
- int err = avcodec_receive_frame(codec_ctx, frame.get());
+ // Decode audio, if any.
+ if (has_audio) {
+ for ( ;; ) {
+ int err = avcodec_receive_frame(audio_codec_ctx, audio_avframe.get());
+ if (err == 0) {
+ if (*audio_pts == -1) {
+ *audio_pts = audio_avframe->pts;
+ }
+ convert_audio(audio_avframe.get(), audio_frame, audio_format);
+ } else if (err == AVERROR(EAGAIN)) {
+ break;
+ } else {
+ fprintf(stderr, "%s: Cannot receive frame from audio codec.\n", pathname.c_str());
+ *error = true;
+ return AVFrameWithDeleter(nullptr);
+ }
+ }
+ }
+
+ // Decode video, if we have a frame.
+ int err = avcodec_receive_frame(video_codec_ctx, video_avframe.get());
if (err == 0) {
frame_finished = true;
break;
} else if (err != AVERROR(EAGAIN)) {
- fprintf(stderr, "%s: Cannot receive frame from codec.\n", pathname.c_str());
+ fprintf(stderr, "%s: Cannot receive frame from video codec.\n", pathname.c_str());
*error = true;
return AVFrameWithDeleter(nullptr);
}
} while (!eof);
if (frame_finished)
- return frame;
+ return video_avframe;
else
return AVFrameWithDeleter(nullptr);
}
+
+void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator::Frame *audio_frame, AudioFormat *audio_format)
+{
+ // Decide on a format. If there already is one in this audio frame,
+ // we're pretty much forced to use it. If not, we try to find an exact match.
+ // If that still doesn't work, we default to 32-bit signed chunked
+ // (float would be nice, but there's really no way to signal that yet).
+ AVSampleFormat dst_format;
+ if (audio_format->bits_per_sample == 0) {
+ switch (audio_avframe->format) {
+ case AV_SAMPLE_FMT_S16:
+ case AV_SAMPLE_FMT_S16P:
+ audio_format->bits_per_sample = 16;
+ dst_format = AV_SAMPLE_FMT_S16;
+ break;
+ case AV_SAMPLE_FMT_S32:
+ case AV_SAMPLE_FMT_S32P:
+ default:
+ audio_format->bits_per_sample = 32;
+ dst_format = AV_SAMPLE_FMT_S32;
+ break;
+ }
+ } else if (audio_format->bits_per_sample == 16) {
+ dst_format = AV_SAMPLE_FMT_S16;
+ } else if (audio_format->bits_per_sample == 32) {
+ dst_format = AV_SAMPLE_FMT_S32;
+ } else {
+ assert(false);
+ }
+ audio_format->num_channels = 2;
+
+ int64_t channel_layout = audio_avframe->channel_layout;
+ if (channel_layout == 0) {
+ channel_layout = av_get_default_channel_layout(audio_avframe->channels);
+ }
+
+ if (resampler == nullptr ||
+ audio_avframe->format != last_src_format ||
+ dst_format != last_dst_format ||
+ channel_layout != last_channel_layout ||
+ av_frame_get_sample_rate(audio_avframe) != last_sample_rate) {
+ avresample_free(&resampler);
+ resampler = avresample_alloc_context();
+ if (resampler == nullptr) {
+ fprintf(stderr, "Allocating resampler failed.\n");
+ exit(1);
+ }
+
+ av_opt_set_int(resampler, "in_channel_layout", channel_layout, 0);
+ av_opt_set_int(resampler, "out_channel_layout", AV_CH_LAYOUT_STEREO_DOWNMIX, 0);
+ av_opt_set_int(resampler, "in_sample_rate", av_frame_get_sample_rate(audio_avframe), 0);
+ av_opt_set_int(resampler, "out_sample_rate", OUTPUT_FREQUENCY, 0);
+ av_opt_set_int(resampler, "in_sample_fmt", audio_avframe->format, 0);
+ av_opt_set_int(resampler, "out_sample_fmt", dst_format, 0);
+
+ if (avresample_open(resampler) < 0) {
+ fprintf(stderr, "Could not open resample context.\n");
+ exit(1);
+ }
+
+ last_src_format = AVSampleFormat(audio_avframe->format);
+ last_dst_format = dst_format;
+ last_channel_layout = channel_layout;
+ last_sample_rate = av_frame_get_sample_rate(audio_avframe);
+ }
+
+ size_t bytes_per_sample = (audio_format->bits_per_sample / 8) * 2;
+ size_t num_samples_room = (audio_frame->size - audio_frame->len) / bytes_per_sample;
+
+ uint8_t *data = audio_frame->data + audio_frame->len;
+ int out_samples = avresample_convert(resampler, &data, 0, num_samples_room,
+ const_cast<uint8_t **>(audio_avframe->data), audio_avframe->linesize[0], audio_avframe->nb_samples);
+ if (out_samples < 0) {
+ fprintf(stderr, "Audio conversion failed.\n");
+ exit(1);
+ }
+
+ audio_frame->len += out_samples * bytes_per_sample;
+}
+
+VideoFormat FFmpegCapture::construct_video_format(const AVFrame *frame, AVRational video_timebase)
+{
+ VideoFormat video_format;
+ video_format.width = width;
+ video_format.height = height;
+ if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
+ video_format.stride = width * 4;
+ } else if (pixel_format == FFmpegCapture::PixelFormat_NV12) {
+ video_format.stride = width;
+ } else {
+ assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
+ video_format.stride = width;
+ }
+ video_format.frame_rate_nom = video_timebase.den;
+ video_format.frame_rate_den = av_frame_get_pkt_duration(frame) * video_timebase.num;
+ if (video_format.frame_rate_nom == 0 || video_format.frame_rate_den == 0) {
+ // Invalid frame rate.
+ video_format.frame_rate_nom = 60;
+ video_format.frame_rate_den = 1;
+ }
+ video_format.has_signal = true;
+ video_format.is_connected = true;
+ return video_format;
+}
+
+UniqueFrame FFmpegCapture::make_video_frame(const AVFrame *frame, const string &pathname, bool *error)
+{
+ *error = false;
+
+ UniqueFrame video_frame(video_frame_allocator->alloc_frame());
+ if (video_frame->data == nullptr) {
+ return video_frame;
+ }
+
+ if (sws_ctx == nullptr ||
+ sws_last_width != frame->width ||
+ sws_last_height != frame->height ||
+ sws_last_src_format != frame->format) {
+ sws_dst_format = decide_dst_format(AVPixelFormat(frame->format), pixel_format);
+ sws_ctx.reset(
+ sws_getContext(frame->width, frame->height, AVPixelFormat(frame->format),
+ width, height, sws_dst_format,
+ SWS_BICUBIC, nullptr, nullptr, nullptr));
+ sws_last_width = frame->width;
+ sws_last_height = frame->height;
+ sws_last_src_format = frame->format;
+ }
+ if (sws_ctx == nullptr) {
+ fprintf(stderr, "%s: Could not create scaler context\n", pathname.c_str());
+ *error = true;
+ return video_frame;
+ }
+
+ uint8_t *pic_data[4] = { nullptr, nullptr, nullptr, nullptr };
+ int linesizes[4] = { 0, 0, 0, 0 };
+ if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
+ pic_data[0] = video_frame->data;
+ linesizes[0] = width * 4;
+ video_frame->len = (width * 4) * height;
+ } else if (pixel_format == PixelFormat_NV12) {
+ pic_data[0] = video_frame->data;
+ linesizes[0] = width;
+
+ pic_data[1] = pic_data[0] + width * height;
+ linesizes[1] = width;
+
+ video_frame->len = (width * 2) * height;
+
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
+ current_frame_ycbcr_format = decode_ycbcr_format(desc, frame);
+ } else {
+ assert(pixel_format == bmusb::PixelFormat_8BitYCbCrPlanar);
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sws_dst_format);
+
+ int chroma_width = AV_CEIL_RSHIFT(int(width), desc->log2_chroma_w);
+ int chroma_height = AV_CEIL_RSHIFT(int(height), desc->log2_chroma_h);
+
+ pic_data[0] = video_frame->data;
+ linesizes[0] = width;
+
+ pic_data[1] = pic_data[0] + width * height;
+ linesizes[1] = chroma_width;
+
+ pic_data[2] = pic_data[1] + chroma_width * chroma_height;
+ linesizes[2] = chroma_width;
+
+ video_frame->len = width * height + 2 * chroma_width * chroma_height;
+
+ current_frame_ycbcr_format = decode_ycbcr_format(desc, frame);
+ }
+ sws_scale(sws_ctx.get(), frame->data, frame->linesize, 0, frame->height, pic_data, linesizes);
+
+ return video_frame;
+}
+
+int FFmpegCapture::interrupt_cb_thunk(void *unique)
+{
+ return reinterpret_cast<FFmpegCapture *>(unique)->interrupt_cb();
+}
+
+int FFmpegCapture::interrupt_cb()
+{
+ return should_interrupt.load();
+}