X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=mixer.h;h=c312ee3a756aed8e0ff35ccdc3b5c1d4947919ed;hb=12f9082b06c037b76dc3a653643bdaeaea89f2d2;hp=74d5cd7edd48412562b09c28136f632799477a73;hpb=08a7f1e36a4e59183b8b87f8a699f380cda12788;p=nageru diff --git a/mixer.h b/mixer.h index 74d5cd7..c312ee3 100644 --- a/mixer.h +++ b/mixer.h @@ -5,98 +5,211 @@ #include #undef Success +#include +#include + #include +#include +#include +#include #include - -#include "bmusb.h" +#include +#include +#include +#include +#include + +#include "bmusb/bmusb.h" +#include "ebu_r128_proc.h" #include "h264encode.h" +#include "httpd.h" #include "pbo_frame_allocator.h" +#include "ref_counted_frame.h" #include "ref_counted_gl_sync.h" +#include "resampler.h" +#include "theme.h" +#include "timebase.h" +#include "stereocompressor.h" +#include "filter.h" -#define NUM_CARDS 2 +class H264Encoder; +class QSurface; +namespace movit { +class Effect; +class EffectChain; +class FlatInput; +class ResourcePool; +} // namespace movit namespace movit { class YCbCrInput; } class QOpenGLContext; -class QSurface; +class QSurfaceFormat; class Mixer { public: - // The surfaces are used for offscreen destinations for OpenGL contexts we need. - // TODO: Figure out something slightly more generic. - Mixer(QSurface *surface1, QSurface *surface2, QSurface *surface3, QSurface *surface4); + // The surface format is used for offscreen destinations for OpenGL contexts we need. + Mixer(const QSurfaceFormat &format, unsigned num_cards); ~Mixer(); void start(); void quit(); - enum Source { - SOURCE_INPUT1, - SOURCE_INPUT2, - SOURCE_SBS, + void transition_clicked(int transition_num); + void channel_clicked(int preview_num); + + enum Output { + OUTPUT_LIVE = 0, + OUTPUT_PREVIEW, + OUTPUT_INPUT0, // 1, 2, 3, up to 15 follow numerically. + NUM_OUTPUTS = 18 }; - void cut(Source source); struct DisplayFrame { - GLuint texnum; - RefCountedGLsync ready_fence; // Asserted when the texture is done rendering. + // The chain for rendering this frame. To render a display frame, + // first wait for , then call + // to wire up all the inputs, and then finally call + // chain->render_to_screen() or similar. + movit::EffectChain *chain; + std::function setup_chain; + + // Asserted when all the inputs are ready; you cannot render the chain + // before this. + RefCountedGLsync ready_fence; + + // Holds on to all the input frames needed for this display frame, + // so they are not released while still rendering. + std::vector input_frames; + + // Textures that should be released back to the resource pool + // when this frame disappears, if any. + // TODO: Refcount these as well? + std::vector temp_textures; }; // Implicitly frees the previous one if there's a new frame available. - bool get_display_frame(DisplayFrame *frame); + bool get_display_frame(Output output, DisplayFrame *frame) { + return output_channel[output].get_display_frame(frame); + } typedef std::function new_frame_ready_callback_t; - void set_frame_ready_fallback(new_frame_ready_callback_t callback); + void set_frame_ready_callback(Output output, new_frame_ready_callback_t callback) + { + output_channel[output].set_frame_ready_callback(callback); + } + + typedef std::function audio_level_callback_t; + void set_audio_level_callback(audio_level_callback_t callback) + { + audio_level_callback = callback; + } + + std::vector get_transition_names() + { + return theme->get_transition_names(pts()); + } + + unsigned get_num_channels() const + { + return theme->get_num_channels(); + } + + std::string get_channel_name(unsigned channel) const + { + return theme->get_channel_name(channel); + } + + bool get_supports_set_wb(unsigned channel) const + { + return theme->get_supports_set_wb(channel); + } + + void set_wb(unsigned channel, double r, double g, double b) const + { + theme->set_wb(channel, r, g, b); + } private: - void bm_frame(int card_index, uint16_t timecode, + void bm_frame(unsigned card_index, uint16_t timecode, FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format, FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format); void place_rectangle(movit::Effect *resample_effect, movit::Effect *padding_effect, float x0, float y0, float x1, float y1); void thread_func(); - - std::unique_ptr pbo_allocator1, pbo_allocator2; - - QSurface *surface1, *surface2, *surface3, *surface4; - std::unique_ptr chain; - movit::ResourcePool *resource_pool; // Owned by . + void process_audio_one_frame(); + void subsample_chroma(GLuint src_tex, GLuint dst_dst); + void release_display_frame(DisplayFrame *frame); + double pts() { return double(pts_int) / TIMEBASE; } + + HTTPD httpd; + unsigned num_cards; + + QSurface *mixer_surface, *h264_encoder_surface; + std::unique_ptr resource_pool; + std::unique_ptr theme; + std::unique_ptr display_chain; GLuint cbcr_program_num; // Owned by . std::unique_ptr h264_encoder; - // Effects part of . Owned by . - movit::YCbCrInput *input[NUM_CARDS]; - movit::Effect *resample_effect, *resample2_effect; - movit::Effect *padding_effect, *padding2_effect; - - Source current_source = SOURCE_INPUT1; - int frame = 0; + // Effects part of . Owned by . + movit::FlatInput *display_input; - std::mutex display_frame_mutex; - DisplayFrame current_display_frame, ready_display_frame; // protected by - bool has_current_display_frame = false, has_ready_display_frame = false; // protected by + int64_t pts_int = 0; // In TIMEBASE units. std::mutex bmusb_mutex; struct CaptureCard { BMUSBCapture *usb; + std::unique_ptr frame_allocator; - // Threading stuff - bool thread_initialized = false; + // Stuff for the OpenGL context (for texture uploading). QSurface *surface; QOpenGLContext *context; bool new_data_ready = false; // Whether new_frame contains anything. - FrameAllocator::Frame new_frame; + bool should_quit = false; + RefCountedFrame new_frame; GLsync new_data_ready_fence; // Whether new_frame is ready for rendering. std::condition_variable new_data_ready_changed; // Set whenever new_data_ready is changed. - }; - CaptureCard cards[NUM_CARDS]; // protected by - - FrameAllocator::Frame bmusb_current_rendering_frame[NUM_CARDS]; + unsigned dropped_frames = 0; // Before new_frame. - new_frame_ready_callback_t new_frame_ready_callback; - bool has_new_frame_ready_callback = false; + std::mutex audio_mutex; + std::unique_ptr resampler; // Under audio_mutex. + int last_timecode = -1; // Unwrapped. + }; + CaptureCard cards[MAX_CARDS]; // protected by + + RefCountedFrame bmusb_current_rendering_frame[MAX_CARDS]; + + class OutputChannel { + public: + ~OutputChannel(); + void output_frame(DisplayFrame frame); + bool get_display_frame(DisplayFrame *frame); + void set_frame_ready_callback(new_frame_ready_callback_t callback); + + private: + friend class Mixer; + + Mixer *parent = nullptr; // Not owned. + std::mutex frame_mutex; + DisplayFrame current_frame, ready_frame; // protected by + bool has_current_frame = false, has_ready_frame = false; // protected by + new_frame_ready_callback_t new_frame_ready_callback; + bool has_new_frame_ready_callback = false; + }; + OutputChannel output_channel[NUM_OUTPUTS]; std::thread mixer_thread; bool should_quit = false; + + audio_level_callback_t audio_level_callback = nullptr; + Ebu_r128_proc r128; + + // TODO: Implement oversampled peak detection. + float peak = 0.0f; + + StereoFilter locut; // Cutoff 150 Hz, 24 dB/oct. + + // First compressor; takes us up to about -12 dBFS. + StereoCompressor level_compressor; }; extern Mixer *global_mixer;