void QueueLengthPolicy::update_policy(steady_clock::time_point now,
steady_clock::time_point expected_next_frame,
+ int64_t input_frame_duration,
int64_t master_frame_duration,
double max_input_card_jitter_seconds,
double max_master_card_jitter_seconds)
{
+ double input_frame_duration_seconds = input_frame_duration / double(TIMEBASE);
double master_frame_duration_seconds = master_frame_duration / double(TIMEBASE);
// Figure out when we can expect the next frame for this card, assuming
// We account for this by looking at the situation five frames ahead,
// assuming everything else is the same.
double frames_allowed;
- if (max_master_card_jitter_seconds < max_input_card_jitter_seconds) {
- frames_allowed = frames_needed + 5 * (max_input_card_jitter_seconds - max_master_card_jitter_seconds) / master_frame_duration_seconds;
+ if (master_frame_duration < input_frame_duration) {
+ frames_allowed = frames_needed + 5 * (input_frame_duration_seconds - master_frame_duration_seconds) / master_frame_duration_seconds;
} else {
frames_allowed = frames_needed;
}
} while (!success);
}
- audio_mixer.add_audio(device, audio_frame.data + audio_offset, num_samples, audio_format, frame_length, audio_frame.received_timestamp);
+ if (num_samples > 0) {
+ audio_mixer.add_audio(device, audio_frame.data + audio_offset, num_samples, audio_format, frame_length, audio_frame.received_timestamp);
+ }
// Done with the audio, so release it.
if (audio_frame.owner) {
card->last_timecode = timecode;
- // Calculate jitter for this card here. We do it on arrival so that we
- // make sure every frame counts, even the dropped ones -- and it will also
- // make sure the jitter number is as recent as possible, should it change.
- card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames);
-
PBOFrameAllocator::Userdata *userdata = (PBOFrameAllocator::Userdata *)video_frame.userdata;
size_t cbcr_width, cbcr_height, cbcr_offset, y_offset;
new_frame.dropped_frames = dropped_frames;
new_frame.received_timestamp = video_frame.received_timestamp;
card->new_frames.push_back(move(new_frame));
- card->new_frames_changed.notify_all();
+ card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames);
}
+ card->new_frames_changed.notify_all();
return;
}
new_frame.dropped_frames = dropped_frames;
new_frame.received_timestamp = video_frame.received_timestamp; // Ignore the audio timestamp.
card->new_frames.push_back(move(new_frame));
- card->new_frames_changed.notify_all();
+ card->jitter_history.frame_arrived(video_frame.received_timestamp, frame_length, dropped_frames);
}
+ card->new_frames_changed.notify_all();
}
}
card->queue_length_policy.update_policy(
output_frame_info.frame_timestamp,
card->jitter_history.get_expected_next_frame(),
+ new_frames[master_card_index].length,
output_frame_info.frame_duration,
card->jitter_history.estimate_max_jitter(),
output_jitter_history.estimate_max_jitter());
// The theme can't (or at least shouldn't!) call connect_signal() on
// each FFmpeg input, so we'll do it here.
for (const pair<LiveInputWrapper *, FFmpegCapture *> &conn : theme->get_signal_connections()) {
- conn.first->connect_signal_raw(conn.second->get_card_index());
+ conn.first->connect_signal_raw(conn.second->get_card_index(), input_state);
}
// If HDMI/SDI output is active and the user has requested auto mode,