#include <epoxy/gl.h>
#undef Success
+#include <stdbool.h>
+#include <stdint.h>
+
#include <movit/effect_chain.h>
+#include <movit/flat_input.h>
+#include <condition_variable>
+#include <cstddef>
#include <functional>
-
-#include "bmusb.h"
+#include <memory>
+#include <mutex>
+#include <string>
+#include <thread>
+#include <vector>
+
+#include "bmusb/bmusb.h"
+#include "ebu_r128_proc.h"
#include "h264encode.h"
+#include "httpd.h"
#include "pbo_frame_allocator.h"
+#include "ref_counted_frame.h"
#include "ref_counted_gl_sync.h"
-
-#define NUM_CARDS 2
+#include "resampler.h"
+#include "theme.h"
+#include "timebase.h"
+#include "stereocompressor.h"
+#include "filter.h"
+
+class H264Encoder;
+class QSurface;
+namespace movit {
+class Effect;
+class EffectChain;
+class FlatInput;
+class ResourcePool;
+} // namespace movit
namespace movit {
class YCbCrInput;
class Mixer {
public:
// The surface format is used for offscreen destinations for OpenGL contexts we need.
- Mixer(const QSurfaceFormat &format);
+ Mixer(const QSurfaceFormat &format, unsigned num_cards);
~Mixer();
void start();
void quit();
- enum Source {
- SOURCE_INPUT1,
- SOURCE_INPUT2,
- SOURCE_SBS,
- };
- void cut(Source source);
+ void transition_clicked(int transition_num);
+ void channel_clicked(int preview_num);
enum Output {
OUTPUT_LIVE = 0,
OUTPUT_PREVIEW,
- NUM_OUTPUTS
+ OUTPUT_INPUT0, // 1, 2, 3, up to 15 follow numerically.
+ NUM_OUTPUTS = 18
};
struct DisplayFrame {
- GLuint texnum;
- RefCountedGLsync ready_fence; // Asserted when the texture is done rendering.
+ // The chain for rendering this frame. To render a display frame,
+ // first wait for <ready_fence>, then call <setup_chain>
+ // to wire up all the inputs, and then finally call
+ // chain->render_to_screen() or similar.
+ movit::EffectChain *chain;
+ std::function<void()> setup_chain;
+
+ // Asserted when all the inputs are ready; you cannot render the chain
+ // before this.
+ RefCountedGLsync ready_fence;
+
+ // Holds on to all the input frames needed for this display frame,
+ // so they are not released while still rendering.
+ std::vector<RefCountedFrame> input_frames;
+
+ // Textures that should be released back to the resource pool
+ // when this frame disappears, if any.
+ // TODO: Refcount these as well?
+ std::vector<GLuint> temp_textures;
};
// Implicitly frees the previous one if there's a new frame available.
bool get_display_frame(Output output, DisplayFrame *frame) {
output_channel[output].set_frame_ready_callback(callback);
}
- // Ignored for OUTPUT_LIVE.
- void set_preview_size(Output output, int width, int height)
+ typedef std::function<void(float, float, float, float, float)> audio_level_callback_t;
+ void set_audio_level_callback(audio_level_callback_t callback)
+ {
+ audio_level_callback = callback;
+ }
+
+ std::vector<std::string> get_transition_names()
+ {
+ return theme->get_transition_names(pts());
+ }
+
+ unsigned get_num_channels() const
+ {
+ return theme->get_num_channels();
+ }
+
+ std::string get_channel_name(unsigned channel) const
{
- output_channel[output].set_size(width, height);
+ return theme->get_channel_name(channel);
+ }
+
+ bool get_supports_set_wb(unsigned channel) const
+ {
+ return theme->get_supports_set_wb(channel);
+ }
+
+ void set_wb(unsigned channel, double r, double g, double b) const
+ {
+ theme->set_wb(channel, r, g, b);
}
private:
- void bm_frame(int card_index, uint16_t timecode,
+ void bm_frame(unsigned card_index, uint16_t timecode,
FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format,
FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format);
void place_rectangle(movit::Effect *resample_effect, movit::Effect *padding_effect, float x0, float y0, float x1, float y1);
void thread_func();
+ void process_audio_one_frame();
void subsample_chroma(GLuint src_tex, GLuint dst_dst);
void release_display_frame(DisplayFrame *frame);
+ double pts() { return double(pts_int) / TIMEBASE; }
+
+ HTTPD httpd;
+ unsigned num_cards;
QSurface *mixer_surface, *h264_encoder_surface;
std::unique_ptr<movit::ResourcePool> resource_pool;
- std::unique_ptr<movit::EffectChain> chain;
- std::unique_ptr<movit::EffectChain> preview_chain;
+ std::unique_ptr<Theme> theme;
+ std::unique_ptr<movit::EffectChain> display_chain;
GLuint cbcr_program_num; // Owned by <resource_pool>.
std::unique_ptr<H264Encoder> h264_encoder;
- // Effects part of <chain>. Owned by <chain>.
- movit::YCbCrInput *input[NUM_CARDS];
- movit::Effect *resample_effect, *resample2_effect;
- movit::Effect *padding_effect, *padding2_effect;
+ // Effects part of <display_chain>. Owned by <display_chain>.
+ movit::FlatInput *display_input;
- // Effects part of <preview_chain>. Owned by <preview_chain>.
- movit::YCbCrInput *preview_input;
-
- Source current_source = SOURCE_INPUT1;
- int frame = 0;
+ int64_t pts_int = 0; // In TIMEBASE units.
std::mutex bmusb_mutex;
struct CaptureCard {
BMUSBCapture *usb;
std::unique_ptr<PBOFrameAllocator> frame_allocator;
- // Threading stuff
- bool thread_initialized = false;
+ // Stuff for the OpenGL context (for texture uploading).
QSurface *surface;
QOpenGLContext *context;
bool new_data_ready = false; // Whether new_frame contains anything.
- FrameAllocator::Frame new_frame;
+ bool should_quit = false;
+ RefCountedFrame new_frame;
GLsync new_data_ready_fence; // Whether new_frame is ready for rendering.
std::condition_variable new_data_ready_changed; // Set whenever new_data_ready is changed.
+ unsigned dropped_frames = 0; // Before new_frame.
+
+ std::mutex audio_mutex;
+ std::unique_ptr<Resampler> resampler; // Under audio_mutex.
+ int last_timecode = -1; // Unwrapped.
};
- CaptureCard cards[NUM_CARDS]; // protected by <bmusb_mutex>
+ CaptureCard cards[MAX_CARDS]; // protected by <bmusb_mutex>
- FrameAllocator::Frame bmusb_current_rendering_frame[NUM_CARDS];
+ RefCountedFrame bmusb_current_rendering_frame[MAX_CARDS];
class OutputChannel {
public:
- void output_frame(GLuint tex, RefCountedGLsync fence);
+ ~OutputChannel();
+ void output_frame(DisplayFrame frame);
bool get_display_frame(DisplayFrame *frame);
void set_frame_ready_callback(new_frame_ready_callback_t callback);
- void set_size(int width, int height); // Ignored for OUTPUT_LIVE.
private:
friend class Mixer;
bool has_current_frame = false, has_ready_frame = false; // protected by <frame_mutex>
new_frame_ready_callback_t new_frame_ready_callback;
bool has_new_frame_ready_callback = false;
-
- int width = 1280, height = 720;
};
OutputChannel output_channel[NUM_OUTPUTS];
std::thread mixer_thread;
bool should_quit = false;
+
+ audio_level_callback_t audio_level_callback = nullptr;
+ Ebu_r128_proc r128;
+
+ // TODO: Implement oversampled peak detection.
+ float peak = 0.0f;
+
+ StereoFilter locut; // Cutoff 150 Hz, 24 dB/oct.
+
+ // First compressor; takes us up to about -12 dBFS.
+ StereoCompressor level_compressor;
};
extern Mixer *global_mixer;