4 // The actual video mixer, running in its own separate background thread.
15 #include <condition_variable>
26 #include <movit/effect.h>
27 #include <movit/image_format.h>
29 #include "audio_mixer.h"
30 #include "bmusb/bmusb.h"
32 #include "ffmpeg_capture.h"
33 #include "shared/httpd.h"
34 #include "input_state.h"
36 #include "pbo_frame_allocator.h"
37 #include "queue_length_policy.h"
38 #include "ref_counted_frame.h"
39 #include "shared/ref_counted_gl_sync.h"
40 #include "srt_metrics.h"
42 #include "shared/timebase.h"
43 #include "video_encoder.h"
44 #include "ycbcr_interpretation.h"
47 class ChromaSubsampler;
52 class TimecodeRenderer;
64 // The surface format is used for offscreen destinations for OpenGL contexts we need.
65 Mixer(const QSurfaceFormat &format);
70 void transition_clicked(int transition_num);
71 void channel_clicked(int preview_num);
76 OUTPUT_INPUT0, // 1, 2, 3, up to 15 follow numerically.
81 // The chain for rendering this frame. To render a display frame,
82 // first wait for <ready_fence>, then call <setup_chain>
83 // to wire up all the inputs, and then finally call
84 // chain->render_to_screen() or similar.
85 movit::EffectChain *chain;
86 std::function<void()> setup_chain;
88 // Asserted when all the inputs are ready; you cannot render the chain
90 RefCountedGLsync ready_fence;
92 // Holds on to all the input frames needed for this display frame,
93 // so they are not released while still rendering.
94 std::vector<RefCountedFrame> input_frames;
96 // Textures that should be released back to the resource pool
97 // when this frame disappears, if any.
98 // TODO: Refcount these as well?
99 std::vector<GLuint> temp_textures;
101 // Implicitly frees the previous one if there's a new frame available.
102 bool get_display_frame(Output output, DisplayFrame *frame) {
103 return output_channel[output].get_display_frame(frame);
106 // NOTE: Callbacks will be called with a mutex held, so you should probably
107 // not do real work in them.
108 typedef std::function<void()> new_frame_ready_callback_t;
109 void add_frame_ready_callback(Output output, void *key, new_frame_ready_callback_t callback)
111 output_channel[output].add_frame_ready_callback(key, callback);
114 void remove_frame_ready_callback(Output output, void *key)
116 output_channel[output].remove_frame_ready_callback(key);
119 // TODO: Should this really be per-channel? Shouldn't it just be called for e.g. the live output?
120 typedef std::function<void(const std::vector<std::string> &)> transition_names_updated_callback_t;
121 void set_transition_names_updated_callback(Output output, transition_names_updated_callback_t callback)
123 output_channel[output].set_transition_names_updated_callback(callback);
126 typedef std::function<void(const std::string &)> name_updated_callback_t;
127 void set_name_updated_callback(Output output, name_updated_callback_t callback)
129 output_channel[output].set_name_updated_callback(callback);
132 typedef std::function<void(const std::string &)> color_updated_callback_t;
133 void set_color_updated_callback(Output output, color_updated_callback_t callback)
135 output_channel[output].set_color_updated_callback(callback);
138 std::vector<std::string> get_transition_names()
140 return theme->get_transition_names(pts());
143 unsigned get_num_channels() const
145 return theme->get_num_channels();
148 std::string get_channel_name(unsigned channel) const
150 return theme->get_channel_name(channel);
153 std::string get_channel_color(unsigned channel) const
155 return theme->get_channel_color(channel);
158 int map_channel_to_signal(unsigned channel) const
160 return theme->map_channel_to_signal(channel);
163 int map_signal_to_card(int signal)
165 return theme->map_signal_to_card(signal);
168 unsigned get_master_clock() const
170 return master_clock_channel;
173 void set_master_clock(unsigned channel)
175 master_clock_channel = channel;
178 void set_signal_mapping(int signal, int card)
180 return theme->set_signal_mapping(signal, card);
183 YCbCrInterpretation get_input_ycbcr_interpretation(unsigned card_index) const;
184 void set_input_ycbcr_interpretation(unsigned card_index, const YCbCrInterpretation &interpretation);
186 bool get_supports_set_wb(unsigned channel) const
188 return theme->get_supports_set_wb(channel);
191 void set_wb(unsigned channel, double r, double g, double b) const
193 theme->set_wb(channel, r, g, b);
196 std::string format_status_line(const std::string &disk_space_left_text, double file_length_seconds)
198 return theme->format_status_line(disk_space_left_text, file_length_seconds);
201 // Note: You can also get this through the global variable global_audio_mixer.
202 AudioMixer *get_audio_mixer() { return audio_mixer.get(); }
203 const AudioMixer *get_audio_mixer() const { return audio_mixer.get(); }
210 std::string get_card_description(unsigned card_index) const {
211 assert(card_index < MAX_VIDEO_CARDS);
212 return cards[card_index].capture->get_description();
215 // The difference between this and the previous function is that if a card
216 // is used as the current output, get_card_description() will return the
217 // fake card that's replacing it for input, whereas this function will return
218 // the card's actual name.
219 std::string get_output_card_description(unsigned card_index) const {
220 assert(card_can_be_used_as_output(card_index));
221 assert(card_index < MAX_VIDEO_CARDS);
222 if (cards[card_index].parked_capture) {
223 return cards[card_index].parked_capture->get_description();
225 return cards[card_index].capture->get_description();
229 bool card_can_be_used_as_output(unsigned card_index) const {
230 assert(card_index < MAX_VIDEO_CARDS);
231 return cards[card_index].output != nullptr && cards[card_index].capture != nullptr;
234 bool card_is_cef(unsigned card_index) const {
235 assert(card_index < MAX_VIDEO_CARDS);
236 return cards[card_index].type == CardType::CEF_INPUT;
239 bool card_is_ffmpeg(unsigned card_index) const {
240 assert(card_index < MAX_VIDEO_CARDS);
241 if (cards[card_index].type != CardType::FFMPEG_INPUT) {
245 // SRT inputs are more like regular inputs than FFmpeg inputs,
246 // so show them as such. (This allows the user to right-click
247 // to select a different input.)
248 return static_cast<FFmpegCapture *>(cards[card_index].capture.get())->get_srt_sock() == -1;
254 bool card_is_active(unsigned card_index) const {
255 assert(card_index < MAX_VIDEO_CARDS);
256 std::lock_guard<std::mutex> lock(card_mutex);
257 return cards[card_index].capture != nullptr;
260 void force_card_active(unsigned card_index)
262 // handle_hotplugged_cards() will pick this up.
263 std::lock_guard<std::mutex> lock(card_mutex);
264 cards[card_index].force_active = true;
267 std::map<uint32_t, bmusb::VideoMode> get_available_video_modes(unsigned card_index) const {
268 assert(card_index < MAX_VIDEO_CARDS);
269 return cards[card_index].capture->get_available_video_modes();
272 uint32_t get_current_video_mode(unsigned card_index) const {
273 assert(card_index < MAX_VIDEO_CARDS);
274 return cards[card_index].capture->get_current_video_mode();
277 void set_video_mode(unsigned card_index, uint32_t mode) {
278 assert(card_index < MAX_VIDEO_CARDS);
279 cards[card_index].capture->set_video_mode(mode);
282 void start_mode_scanning(unsigned card_index);
284 std::map<uint32_t, std::string> get_available_video_inputs(unsigned card_index) const {
285 assert(card_index < MAX_VIDEO_CARDS);
286 return cards[card_index].capture->get_available_video_inputs();
289 uint32_t get_current_video_input(unsigned card_index) const {
290 assert(card_index < MAX_VIDEO_CARDS);
291 return cards[card_index].capture->get_current_video_input();
294 void set_video_input(unsigned card_index, uint32_t input) {
295 assert(card_index < MAX_VIDEO_CARDS);
296 cards[card_index].capture->set_video_input(input);
299 std::map<uint32_t, std::string> get_available_audio_inputs(unsigned card_index) const {
300 assert(card_index < MAX_VIDEO_CARDS);
301 return cards[card_index].capture->get_available_audio_inputs();
304 uint32_t get_current_audio_input(unsigned card_index) const {
305 assert(card_index < MAX_VIDEO_CARDS);
306 return cards[card_index].capture->get_current_audio_input();
309 void set_audio_input(unsigned card_index, uint32_t input) {
310 assert(card_index < MAX_VIDEO_CARDS);
311 cards[card_index].capture->set_audio_input(input);
314 std::string get_ffmpeg_filename(unsigned card_index) const;
316 void set_ffmpeg_filename(unsigned card_index, const std::string &filename);
318 void change_x264_bitrate(unsigned rate_kbit) {
319 video_encoder->change_x264_bitrate(rate_kbit);
322 int get_output_card_index() const { // -1 = no output, just stream.
323 return desired_output_card_index;
326 void set_output_card(int card_index) { // -1 = no output, just stream.
327 desired_output_card_index = card_index;
330 bool get_output_card_is_master() const {
331 return output_card_is_master;
334 std::map<uint32_t, bmusb::VideoMode> get_available_output_video_modes() const;
336 uint32_t get_output_video_mode() const {
337 return desired_output_video_mode;
340 void set_output_video_mode(uint32_t mode) {
341 desired_output_video_mode = mode;
344 void set_display_timecode_in_stream(bool enable) {
345 display_timecode_in_stream = enable;
348 void set_display_timecode_on_stdout(bool enable) {
349 display_timecode_on_stdout = enable;
352 int64_t get_num_connected_clients() const {
353 return httpd.get_num_connected_clients();
356 Theme::MenuEntry *get_theme_menu() { return theme->get_theme_menu(); }
358 void theme_menu_entry_clicked(int lua_ref) { return theme->theme_menu_entry_clicked(lua_ref); }
360 void set_theme_menu_callback(std::function<void()> callback)
362 theme->set_theme_menu_callback(callback);
365 void wait_for_next_frame();
370 void configure_card(unsigned card_index, bmusb::CaptureInterface *capture, CardType card_type, DeckLinkOutput *output, bool is_srt_card);
371 void set_output_card_internal(int card_index); // Should only be called from the mixer thread.
372 void bm_frame(unsigned card_index, uint16_t timecode,
373 bmusb::FrameAllocator::Frame video_frame, size_t video_offset, bmusb::VideoFormat video_format,
374 bmusb::FrameAllocator::Frame audio_frame, size_t audio_offset, bmusb::AudioFormat audio_format);
375 void upload_texture_for_frame(
376 int field, bmusb::VideoFormat video_format,
377 size_t y_offset, size_t cbcr_offset, size_t video_offset,
378 PBOFrameAllocator::Userdata *userdata);
379 void bm_hotplug_add(libusb_device *dev);
380 void bm_hotplug_remove(unsigned card_index);
381 void place_rectangle(movit::Effect *resample_effect, movit::Effect *padding_effect, float x0, float y0, float x1, float y1);
383 void handle_hotplugged_cards();
384 void schedule_audio_resampling_tasks(unsigned dropped_frames, int num_samples_per_frame, int length_per_frame, bool is_preroll, std::chrono::steady_clock::time_point frame_timestamp);
385 std::string get_timecode_text() const;
386 void render_one_frame(int64_t duration);
387 void audio_thread_func();
388 void release_display_frame(DisplayFrame *frame);
392 double pts() { return double(pts_int) / TIMEBASE; }
393 void trim_queue(CaptureCard *card, size_t safe_queue_length);
394 std::pair<std::string, std::string> get_channels_json();
395 std::pair<std::string, std::string> get_channel_color_http(unsigned channel_idx);
398 unsigned num_video_inputs, num_html_inputs = 0;
400 QSurface *mixer_surface, *h264_encoder_surface, *decklink_output_surface, *image_update_surface;
401 std::unique_ptr<movit::ResourcePool> resource_pool;
402 std::unique_ptr<Theme> theme;
403 std::atomic<unsigned> audio_source_channel{0};
404 std::atomic<int> master_clock_channel{0}; // Gets overridden by <output_card_index> if output_card_is_master == true.
405 int output_card_index = -1; // -1 for none.
406 uint32_t output_video_mode = -1;
407 bool output_card_is_master = false; // Only relevant if output_card_index != -1.
409 // The mechanics of changing the output card and modes are so intricately connected
410 // with the work the mixer thread is doing. Thus, we don't change it directly,
411 // we just set this variable instead, which signals to the mixer thread that
412 // it should do the change before the next frame. This simplifies locking
413 // considerations immensely.
414 std::atomic<int> desired_output_card_index{-1};
415 std::atomic<uint32_t> desired_output_video_mode{0};
417 std::unique_ptr<movit::EffectChain> display_chain;
418 std::unique_ptr<ChromaSubsampler> chroma_subsampler;
419 std::unique_ptr<v210Converter> v210_converter;
420 std::unique_ptr<VideoEncoder> video_encoder;
421 std::unique_ptr<MJPEGEncoder> mjpeg_encoder;
423 std::unique_ptr<TimecodeRenderer> timecode_renderer;
424 std::atomic<bool> display_timecode_in_stream{false};
425 std::atomic<bool> display_timecode_on_stdout{false};
427 // Effects part of <display_chain>. Owned by <display_chain>.
428 movit::YCbCrInput *display_input;
430 int64_t pts_int = 0; // In TIMEBASE units.
432 mutable std::mutex frame_num_mutex;
433 std::condition_variable frame_num_updated;
434 unsigned frame_num = 0; // Under <frame_num_mutex>.
436 // Accumulated errors in number of 1/TIMEBASE audio samples. If OUTPUT_FREQUENCY divided by
437 // frame rate is integer, will always stay zero.
438 unsigned fractional_samples = 0;
440 // Monotonic counter that lets us know which slot was last turned into
441 // a fake capture. Used for SRT re-plugging.
442 unsigned fake_capture_counter = 0;
444 mutable std::mutex card_mutex;
445 bool has_bmusb_thread = false;
447 // If nullptr, the card is inactive, and will be hidden in the UI.
448 // Only fake capture cards can be inactive.
449 std::unique_ptr<bmusb::CaptureInterface> capture;
450 // If true, card must always be active (typically because it's one of the
451 // first cards, or because the theme has explicitly asked for it).
452 bool force_active = false;
453 bool is_fake_capture;
454 // If is_fake_capture is true, contains a monotonic timer value for when
455 // it was last changed. Otherwise undefined. Used for SRT re-plugging.
456 int fake_capture_counter;
457 std::string last_srt_stream_id = "<default, matches nothing>"; // Used for SRT re-plugging.
459 std::unique_ptr<DeckLinkOutput> output;
461 // CEF only delivers frames when it actually has a change.
462 // If we trim the queue for latency reasons, we could thus
463 // end up in a situation trimming a frame that was meant to
464 // be displayed for a long time, which is really suboptimal.
465 // Thus, if we drop the last frame we have, may_have_dropped_last_frame
466 // is set to true, and the next starvation event will trigger
467 // us requestin a CEF repaint.
468 bool is_cef_capture, may_have_dropped_last_frame = false;
470 // If this card is used for output (ie., output_card_index points to it),
471 // it cannot simultaneously be uesd for capture, so <capture> gets replaced
472 // by a FakeCapture. However, since reconstructing the real capture object
473 // with all its state can be annoying, it is not being deleted, just stopped
475 std::unique_ptr<bmusb::CaptureInterface> parked_capture;
477 std::unique_ptr<PBOFrameAllocator> frame_allocator;
479 // Stuff for the OpenGL context (for texture uploading).
480 QSurface *surface = nullptr;
483 RefCountedFrame frame;
484 int64_t length; // In TIMEBASE units.
486 unsigned field; // Which field (0 or 1) of the frame to use. Always 0 for progressive.
487 bool texture_uploaded = false;
488 unsigned dropped_frames = 0; // Number of dropped frames before this one.
489 std::chrono::steady_clock::time_point received_timestamp = std::chrono::steady_clock::time_point::min();
490 movit::RGBTriplet neutral_color{1.0f, 1.0f, 1.0f};
492 // Used for MJPEG encoding, and texture upload.
493 // width=0 or height=0 means a broken frame, ie., do not upload.
494 bmusb::VideoFormat video_format;
495 size_t video_offset, y_offset, cbcr_offset;
497 std::deque<NewFrame> new_frames;
498 std::condition_variable new_frames_changed; // Set whenever new_frames is changed.
499 QueueLengthPolicy queue_length_policy; // Refers to the "new_frames" queue.
501 std::vector<int32_t> new_raw_audio;
503 int last_timecode = -1; // Unwrapped.
505 JitterHistory jitter_history;
508 std::vector<std::pair<std::string, std::string>> labels;
509 std::atomic<int64_t> metric_input_received_frames{0};
510 std::atomic<int64_t> metric_input_duped_frames{0};
511 std::atomic<int64_t> metric_input_dropped_frames_jitter{0};
512 std::atomic<int64_t> metric_input_dropped_frames_error{0};
513 std::atomic<int64_t> metric_input_resets{0};
514 std::atomic<int64_t> metric_input_queue_length_frames{0};
516 std::atomic<int64_t> metric_input_has_signal_bool{-1};
517 std::atomic<int64_t> metric_input_is_connected_bool{-1};
518 std::atomic<int64_t> metric_input_interlaced_bool{-1};
519 std::atomic<int64_t> metric_input_width_pixels{-1};
520 std::atomic<int64_t> metric_input_height_pixels{-1};
521 std::atomic<int64_t> metric_input_frame_rate_nom{-1};
522 std::atomic<int64_t> metric_input_frame_rate_den{-1};
523 std::atomic<int64_t> metric_input_sample_rate_hz{-1};
526 SRTMetrics srt_metrics;
529 JitterHistory output_jitter_history;
530 CaptureCard cards[MAX_VIDEO_CARDS]; // Protected by <card_mutex>.
531 YCbCrInterpretation ycbcr_interpretation[MAX_VIDEO_CARDS]; // Protected by <card_mutex>.
532 movit::RGBTriplet last_received_neutral_color[MAX_VIDEO_CARDS]; // Used by the mixer thread only. Constructor-initialiezd.
533 std::unique_ptr<AudioMixer> audio_mixer; // Same as global_audio_mixer (see audio_mixer.h).
534 bool input_card_is_master_clock(unsigned card_index, unsigned master_card_index) const;
535 struct OutputFrameInfo {
536 int dropped_frames; // Since last frame.
537 int num_samples; // Audio samples needed for this output frame.
538 int64_t frame_duration; // In TIMEBASE units.
540 std::chrono::steady_clock::time_point frame_timestamp;
542 OutputFrameInfo get_one_frame_from_each_card(unsigned master_card_index, bool master_card_is_output, CaptureCard::NewFrame new_frames[MAX_VIDEO_CARDS], bool has_new_frame[MAX_VIDEO_CARDS], std::vector<int32_t> raw_audio[MAX_VIDEO_CARDS]);
544 std::string description_for_card(unsigned card_index);
545 static bool is_srt_card(const CaptureCard *card);
547 InputState input_state;
549 // Cards we have been noticed about being hotplugged, but haven't tried adding yet.
550 // Protected by its own mutex.
551 std::mutex hotplug_mutex;
552 std::vector<libusb_device *> hotplugged_cards;
554 std::vector<int> hotplugged_srt_cards;
557 class OutputChannel {
560 void output_frame(DisplayFrame &&frame);
561 bool get_display_frame(DisplayFrame *frame);
562 void add_frame_ready_callback(void *key, new_frame_ready_callback_t callback);
563 void remove_frame_ready_callback(void *key);
564 void set_transition_names_updated_callback(transition_names_updated_callback_t callback);
565 void set_name_updated_callback(name_updated_callback_t callback);
566 void set_color_updated_callback(color_updated_callback_t callback);
572 Mixer *parent = nullptr; // Not owned.
573 std::mutex frame_mutex;
574 DisplayFrame current_frame, ready_frame; // protected by <frame_mutex>
575 bool has_current_frame = false, has_ready_frame = false; // protected by <frame_mutex>
576 std::map<void *, new_frame_ready_callback_t> new_frame_ready_callbacks; // protected by <frame_mutex>
577 transition_names_updated_callback_t transition_names_updated_callback;
578 name_updated_callback_t name_updated_callback;
579 color_updated_callback_t color_updated_callback;
581 std::vector<std::string> last_transition_names;
582 std::string last_name, last_color;
584 OutputChannel output_channel[NUM_OUTPUTS];
586 std::thread mixer_thread;
587 std::thread audio_thread;
589 std::thread srt_thread;
591 std::atomic<bool> should_quit{false};
592 std::atomic<bool> should_cut{false};
594 std::unique_ptr<ALSAOutput> alsa;
600 std::chrono::steady_clock::time_point frame_timestamp;
602 std::mutex audio_mutex;
603 std::condition_variable audio_task_queue_changed;
604 std::queue<AudioTask> audio_task_queue; // Under audio_mutex.
606 // For mode scanning.
607 bool is_mode_scanning[MAX_VIDEO_CARDS]{ false };
608 std::vector<uint32_t> mode_scanlist[MAX_VIDEO_CARDS];
609 unsigned mode_scanlist_index[MAX_VIDEO_CARDS]{ 0 };
610 std::chrono::steady_clock::time_point last_mode_scan_change[MAX_VIDEO_CARDS];
613 extern Mixer *global_mixer;
615 #endif // !defined(_MIXER_H)