FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
: filename(filename), width(width), height(height), video_timebase{1, 1}
{
- // Not really used for anything.
description = "Video: " + filename;
last_frame = steady_clock::now();
pthread_setname_np(pthread_self(), thread_name);
while (!producer_thread_should_quit.should_quit()) {
- string pathname = search_for_file(filename);
- if (filename.empty()) {
- fprintf(stderr, "%s not found, sleeping one second and trying again...\n", filename.c_str());
+ string filename_copy;
+ {
+ lock_guard<mutex> lock(filename_mu);
+ filename_copy = filename;
+ }
+
+ string pathname = search_for_file(filename_copy);
+ if (pathname.empty()) {
+ fprintf(stderr, "%s not found, sleeping one second and trying again...\n", filename_copy.c_str());
send_disconnected_frame();
producer_thread_should_quit.sleep_for(seconds(1));
continue;
if (pixel_format == bmusb::PixelFormat_8BitBGRA) {
video_format.stride = width * 4;
video_frame.len = width * height * 4;
+ memset(video_frame.data, 0, video_frame.len);
} else {
video_format.stride = width;
current_frame_ycbcr_format.luma_coefficients = YCBCR_REC_709;
current_frame_ycbcr_format.cr_x_position = 0.0f;
current_frame_ycbcr_format.cr_y_position = 0.0f;
video_frame.len = width * height * 2;
+ memset(video_frame.data, 0, width * height);
+ memset(video_frame.data + width * height, 128, width * height); // Valid for both NV12 and planar.
}
- memset(video_frame.data, 0, video_frame.len);
frame_callback(-1, AVRational{1, TIMEBASE}, -1, AVRational{1, TIMEBASE}, timecode++,
video_frame, /*video_offset=*/0, video_format,
int64_t audio_pts_as_video_pts = av_rescale_q(audio_pts, audio_timebase, video_timebase);
audio_frame->received_timestamp = compute_frame_start(audio_pts_as_video_pts, pts_origin, video_timebase, start, rate);
+ if (audio_frame->len != 0) {
+ // The received timestamps in Nageru are measured after we've just received the frame.
+ // However, pts (especially audio pts) is at the _beginning_ of the frame.
+ // If we have locked audio, the distinction doesn't really matter, as pts is
+ // on a relative scale and a fixed offset is fine. But if we don't, we will have
+ // a different number of samples each time, which will cause huge audio jitter
+ // and throw off the resampler.
+ //
+ // In a sense, we should have compensated by adding the frame and audio lengths
+ // to video_frame->received_timestamp and audio_frame->received_timestamp respectively,
+ // but that would mean extra waiting in sleep_until(). All we need is that they
+ // are correct relative to each other, though (and to the other frames we send),
+ // so just align the end of the audio frame, and we're fine.
+ size_t num_samples = (audio_frame->len * 8) / audio_format.bits_per_sample / audio_format.num_channels;
+ double offset = double(num_samples) / OUTPUT_FREQUENCY -
+ double(video_format.frame_rate_den) / video_format.frame_rate_nom;
+ audio_frame->received_timestamp += duration_cast<steady_clock::duration>(duration<double>(offset));
+ }
+
+ steady_clock::time_point now = steady_clock::now();
+ if (duration<double>(now - next_frame_start).count() >= 0.1) {
+ // If we don't have enough CPU to keep up, or if we have a live stream
+ // where the initial origin was somehow wrong, we could be behind indefinitely.
+ // In particular, this will give the audio resampler problems as it tries
+ // to speed up to reduce the delay, hitting the low end of the buffer every time.
+ fprintf(stderr, "%s: Playback %.0f ms behind, resetting time scale\n",
+ pathname.c_str(),
+ 1e3 * duration<double>(now - next_frame_start).count());
+ pts_origin = frame->pts;
+ start = next_frame_start = now;
+ timecode += MAX_FPS * 2 + 1;
+ }
bool finished_wakeup = producer_thread_should_quit.sleep_until(next_frame_start);
if (finished_wakeup) {
if (audio_frame->len > 0) {
AVFrameWithDeleter video_avframe = av_frame_alloc_unique();
bool eof = false;
*audio_pts = -1;
+ bool has_audio = false;
do {
AVPacket pkt;
unique_ptr<AVPacket, decltype(av_packet_unref)*> pkt_cleanup(
return AVFrameWithDeleter(nullptr);
}
} else if (pkt.stream_index == audio_stream_index) {
- if (*audio_pts == -1) {
- *audio_pts = pkt.pts;
- }
+ has_audio = true;
if (avcodec_send_packet(audio_codec_ctx, &pkt) < 0) {
fprintf(stderr, "%s: Cannot send packet to audio codec.\n", pathname.c_str());
*error = true;
}
// Decode audio, if any.
- if (*audio_pts != -1) {
+ if (has_audio) {
for ( ;; ) {
int err = avcodec_receive_frame(audio_codec_ctx, audio_avframe.get());
if (err == 0) {
+ if (*audio_pts == -1) {
+ *audio_pts = audio_avframe->pts;
+ }
convert_audio(audio_avframe.get(), audio_frame, audio_format);
} else if (err == AVERROR(EAGAIN)) {
break;
}
av_opt_set_int(resampler, "in_channel_layout", channel_layout, 0);
- av_opt_set_int(resampler, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
+ av_opt_set_int(resampler, "out_channel_layout", AV_CH_LAYOUT_STEREO_DOWNMIX, 0);
av_opt_set_int(resampler, "in_sample_rate", av_frame_get_sample_rate(audio_avframe), 0);
av_opt_set_int(resampler, "out_sample_rate", OUTPUT_FREQUENCY, 0);
av_opt_set_int(resampler, "in_sample_fmt", audio_avframe->format, 0);