class YCbCrInput;
} // namespace movit
+// A class to estimate the future jitter. Used in QueueLengthPolicy (see below).
+//
+// There are many ways to estimate jitter; I've tested a few ones (and also
+// some algorithms that don't explicitly model jitter) with different
+// parameters on some real-life data in experiments/queue_drop_policy.cpp.
+// This is one based on simple order statistics where I've added some margin in
+// the number of starvation events; I believe that about one every hour would
+// probably be acceptable, but this one typically goes lower than that, at the
+// cost of 2–3 ms extra latency. (If the queue is hard-limited to one frame, it's
+// possible to get ~10 ms further down, but this would mean framedrops every
+// second or so.) The general strategy is: Take the 99.9-percentile jitter over
+// last 5000 frames, multiply by two, and that's our worst-case jitter
+// estimate. The fact that we're not using the max value means that we could
+// actually even throw away very late frames immediately, which means we only
+// get one user-visible event instead of seeing something both when the frame
+// arrives late (duplicate frame) and then again when we drop.
+class JitterHistory {
+private:
+ static constexpr size_t history_length = 5000;
+ static constexpr double percentile = 0.999;
+ static constexpr double multiplier = 2.0;
+
+public:
+ void register_metrics(const std::vector<std::pair<std::string, std::string>> &labels);
+ void unregister_metrics(const std::vector<std::pair<std::string, std::string>> &labels);
+
+ void clear() {
+ history.clear();
+ orders.clear();
+ }
+ void frame_arrived(std::chrono::steady_clock::time_point now, int64_t frame_duration, size_t dropped_frames);
+ std::chrono::steady_clock::time_point get_expected_next_frame() const { return expected_timestamp; }
+ double estimate_max_jitter() const;
+
+private:
+ // A simple O(k) based algorithm for getting the k-th largest or
+ // smallest element from our window; we simply keep the multiset
+ // ordered (insertions and deletions are O(n) as always) and then
+ // iterate from one of the sides. If we had larger values of k,
+ // we could go for a more complicated setup with two sets or heaps
+ // (one increasing and one decreasing) that we keep balanced around
+ // the point, or it is possible to reimplement std::set with
+ // counts in each node. However, since k=5, we don't need this.
+ std::multiset<double> orders;
+ std::deque<std::multiset<double>::iterator> history;
+
+ std::chrono::steady_clock::time_point expected_timestamp = std::chrono::steady_clock::time_point::min();
+
+ // Metrics. There are no direct summaries for jitter, since we already have latency summaries.
+ std::atomic<int64_t> metric_input_underestimated_jitter_frames{0};
+ std::atomic<double> metric_input_estimated_max_jitter_seconds{0.0 / 0.0};
+};
+
// For any card that's not the master (where we pick out the frames as they
// come, as fast as we can process), there's going to be a queue. The question
// is when we should drop frames from that queue (apart from the obvious
// 2. We don't want to add more delay than is needed.
//
// Our general strategy is to drop as many frames as we can (helping for #2)
-// that we think is safe for #1 given jitter. To this end, we set a lower floor N,
-// where we assume that if we have N frames in the queue, we're always safe from
-// starvation. (Typically, N will be 0 or 1. It starts off at 0.) If we have
-// more than N frames in the queue after reading out the one we need, we head-drop
-// them to reduce the queue.
-//
-// N is reduced as follows: If the queue has had at least one spare frame for
-// at least 50 (master) frames (ie., it's been too conservative for a second),
-// we reduce N by 1 and reset the timers.
+// that we think is safe for #1 given jitter. To this end, we measure the
+// deviation from the expected arrival time for all cards, and use that for
+// continuous jitter estimation.
//
-// Whenever the queue is starved (we needed a frame but there was none),
-// and we've been at N since the last starvation, N was obviously too low,
-// so we increment it. We will never set N above 5, though.
+// We then drop everything from the queue that we're sure we won't need to
+// serve the output in the time before the next frame arrives. Typically,
+// this means the queue will contain 0 or 1 frames, although more is also
+// possible if the jitter is very high.
class QueueLengthPolicy {
public:
QueueLengthPolicy() {}
void reset(unsigned card_index) {
this->card_index = card_index;
- safe_queue_length = 1;
- frames_with_at_least_one = 0;
- been_at_safe_point_since_last_starvation = false;
}
void register_metrics(const std::vector<std::pair<std::string, std::string>> &labels);
-
- void update_policy(unsigned queue_length); // Call before picking out a frame, so 0 means starvation.
+ void unregister_metrics(const std::vector<std::pair<std::string, std::string>> &labels);
+
+ // Call after picking out a frame, so 0 means starvation.
+ void update_policy(std::chrono::steady_clock::time_point now,
+ std::chrono::steady_clock::time_point expected_next_frame,
+ int64_t input_frame_duration,
+ int64_t master_frame_duration,
+ double max_input_card_jitter_seconds,
+ double max_master_card_jitter_seconds);
unsigned get_safe_queue_length() const { return safe_queue_length; }
private:
- unsigned card_index; // For debugging only.
- unsigned safe_queue_length = 1; // Called N in the comments. Can never go below 1.
- unsigned frames_with_at_least_one = 0;
- bool been_at_safe_point_since_last_starvation = false;
+ unsigned card_index; // For debugging and metrics only.
+ unsigned safe_queue_length = 0; // Can never go below zero.
// Metrics.
- std::atomic<int64_t> metric_input_queue_length_frames{0};
std::atomic<int64_t> metric_input_queue_safe_length_frames{1};
- std::atomic<int64_t> metric_input_duped_frames{0};
};
class Mixer {
}
// Note: You can also get this through the global variable global_audio_mixer.
- AudioMixer *get_audio_mixer() { return &audio_mixer; }
- const AudioMixer *get_audio_mixer() const { return &audio_mixer; }
+ AudioMixer *get_audio_mixer() { return audio_mixer.get(); }
+ const AudioMixer *get_audio_mixer() const { return audio_mixer.get(); }
void schedule_cut()
{
return cards[card_index].output != nullptr;
}
+ bool card_is_ffmpeg(unsigned card_index) const {
+ assert(card_index < num_cards + num_video_inputs);
+ return cards[card_index].type == CardType::FFMPEG_INPUT;
+ }
+
std::map<uint32_t, bmusb::VideoMode> get_available_video_modes(unsigned card_index) const {
assert(card_index < num_cards);
return cards[card_index].capture->get_available_video_modes();
cards[card_index].capture->set_audio_input(input);
}
+ std::string get_ffmpeg_filename(unsigned card_index) const;
+
+ void set_ffmpeg_filename(unsigned card_index, const std::string &filename);
+
void change_x264_bitrate(unsigned rate_kbit) {
video_encoder->change_x264_bitrate(rate_kbit);
}
display_timecode_on_stdout = enable;
}
+ int64_t get_num_connected_clients() const {
+ return httpd.get_num_connected_clients();
+ }
+
+ std::vector<Theme::MenuEntry> get_theme_menu() { return theme->get_theme_menu(); }
+
+ void theme_menu_entry_clicked(int lua_ref) { return theme->theme_menu_entry_clicked(lua_ref); }
+
+ void set_theme_menu_callback(std::function<void()> callback)
+ {
+ theme->set_theme_menu_callback(callback);
+ }
+
+ void wait_for_next_frame();
+
private:
struct CaptureCard;
enum class CardType {
LIVE_CARD,
FAKE_CAPTURE,
- FFMPEG_INPUT
+ FFMPEG_INPUT,
+ CEF_INPUT,
};
void configure_card(unsigned card_index, bmusb::CaptureInterface *capture, CardType card_type, DeckLinkOutput *output);
void set_output_card_internal(int card_index); // Should only be called from the mixer thread.
void audio_thread_func();
void release_display_frame(DisplayFrame *frame);
double pts() { return double(pts_int) / TIMEBASE; }
- // Call this _before_ trying to pull out a frame from a capture card;
- // it will update the policy and drop the right amount of frames for you.
- void trim_queue(CaptureCard *card, unsigned card_index);
+ void trim_queue(CaptureCard *card, size_t safe_queue_length);
+ std::pair<std::string, std::string> get_channels_json();
+ std::pair<std::string, std::string> get_channel_color_http(unsigned channel_idx);
HTTPD httpd;
- unsigned num_cards, num_video_inputs;
+ unsigned num_cards, num_video_inputs, num_html_inputs = 0;
QSurface *mixer_surface, *h264_encoder_surface, *decklink_output_surface;
std::unique_ptr<movit::ResourcePool> resource_pool;
movit::YCbCrInput *display_input;
int64_t pts_int = 0; // In TIMEBASE units.
- unsigned frame_num = 0;
+
+ mutable std::mutex frame_num_mutex;
+ std::condition_variable frame_num_updated;
+ unsigned frame_num = 0; // Under <frame_num_mutex>.
// Accumulated errors in number of 1/TIMEBASE audio samples. If OUTPUT_FREQUENCY divided by
// frame rate is integer, will always stay zero.
CardType type;
std::unique_ptr<DeckLinkOutput> output;
+ // CEF only delivers frames when it actually has a change.
+ // If we trim the queue for latency reasons, we could thus
+ // end up in a situation trimming a frame that was meant to
+ // be displayed for a long time, which is really suboptimal.
+ // Thus, if we drop the last frame we have, may_have_dropped_last_frame
+ // is set to true, and the next starvation event will trigger
+ // us requestin a CEF repaint.
+ bool is_cef_capture, may_have_dropped_last_frame = false;
+
// If this card is used for output (ie., output_card_index points to it),
// it cannot simultaneously be uesd for capture, so <capture> gets replaced
// by a FakeCapture. However, since reconstructing the real capture object
std::chrono::steady_clock::time_point received_timestamp = std::chrono::steady_clock::time_point::min();
};
std::deque<NewFrame> new_frames;
- bool should_quit = false;
- std::condition_variable new_frames_changed; // Set whenever new_frames (or should_quit) is changed.
+ std::condition_variable new_frames_changed; // Set whenever new_frames is changed.
QueueLengthPolicy queue_length_policy; // Refers to the "new_frames" queue.
int last_timecode = -1; // Unwrapped.
+ JitterHistory jitter_history;
+
// Metrics.
std::vector<std::pair<std::string, std::string>> labels;
std::atomic<int64_t> metric_input_received_frames{0};
+ std::atomic<int64_t> metric_input_duped_frames{0};
std::atomic<int64_t> metric_input_dropped_frames_jitter{0};
std::atomic<int64_t> metric_input_dropped_frames_error{0};
std::atomic<int64_t> metric_input_resets{0};
+ std::atomic<int64_t> metric_input_queue_length_frames{0};
std::atomic<int64_t> metric_input_has_signal_bool{-1};
std::atomic<int64_t> metric_input_is_connected_bool{-1};
std::atomic<int64_t> metric_input_frame_rate_den{-1};
std::atomic<int64_t> metric_input_sample_rate_hz{-1};
};
+ JitterHistory output_jitter_history;
CaptureCard cards[MAX_VIDEO_CARDS]; // Protected by <card_mutex>.
YCbCrInterpretation ycbcr_interpretation[MAX_VIDEO_CARDS]; // Protected by <card_mutex>.
- AudioMixer audio_mixer; // Same as global_audio_mixer (see audio_mixer.h).
+ std::unique_ptr<AudioMixer> audio_mixer; // Same as global_audio_mixer (see audio_mixer.h).
bool input_card_is_master_clock(unsigned card_index, unsigned master_card_index) const;
struct OutputFrameInfo {
int dropped_frames; // Since last frame.
class OutputChannel {
public:
~OutputChannel();
- void output_frame(DisplayFrame frame);
+ void output_frame(DisplayFrame &&frame);
bool get_display_frame(DisplayFrame *frame);
void add_frame_ready_callback(void *key, new_frame_ready_callback_t callback);
void remove_frame_ready_callback(void *key);
std::vector<uint32_t> mode_scanlist[MAX_VIDEO_CARDS];
unsigned mode_scanlist_index[MAX_VIDEO_CARDS]{ 0 };
std::chrono::steady_clock::time_point last_mode_scan_change[MAX_VIDEO_CARDS];
-
- // Metrics.
- std::atomic<int64_t> metric_frames_output_total{0};
- std::atomic<int64_t> metric_frames_output_dropped{0};
- std::atomic<double> metric_start_time_seconds{0.0 / 0.0};
- std::atomic<int64_t> metrics_memory_used_bytes{0};
- std::atomic<double> metrics_memory_locked_limit_bytes{0.0 / 0.0};
};
extern Mixer *global_mixer;
-extern bool uses_mlock;
#endif // !defined(_MIXER_H)