#include <epoxy/gl.h>
#undef Success
+#include <stdbool.h>
+#include <stdint.h>
+
#include <movit/effect_chain.h>
#include <movit/flat_input.h>
-#include <ebur128.h>
+#include <condition_variable>
+#include <cstddef>
#include <functional>
+#include <memory>
+#include <mutex>
+#include <string>
+#include <thread>
+#include <vector>
#include "bmusb/bmusb.h"
+#include "ebu_r128_proc.h"
#include "h264encode.h"
+#include "httpd.h"
#include "pbo_frame_allocator.h"
#include "ref_counted_frame.h"
#include "ref_counted_gl_sync.h"
-#include "theme.h"
#include "resampler.h"
+#include "theme.h"
#include "timebase.h"
-#include "httpd.h"
+#include "stereocompressor.h"
+#include "filter.h"
-#define NUM_CARDS 2
+class H264Encoder;
+class QSurface;
+namespace movit {
+class Effect;
+class EffectChain;
+class FlatInput;
+class ResourcePool;
+} // namespace movit
namespace movit {
class YCbCrInput;
class Mixer {
public:
// The surface format is used for offscreen destinations for OpenGL contexts we need.
- Mixer(const QSurfaceFormat &format);
+ Mixer(const QSurfaceFormat &format, unsigned num_cards);
~Mixer();
void start();
void quit();
enum Output {
OUTPUT_LIVE = 0,
OUTPUT_PREVIEW,
- OUTPUT_INPUT0,
- OUTPUT_INPUT1,
- OUTPUT_INPUT2,
- OUTPUT_INPUT3,
- NUM_OUTPUTS
+ OUTPUT_INPUT0, // 1, 2, 3, up to 15 follow numerically.
+ NUM_OUTPUTS = 18
};
struct DisplayFrame {
return theme->get_transition_names(pts());
}
+ unsigned get_num_channels() const
+ {
+ return theme->get_num_channels();
+ }
+
+ std::string get_channel_name(unsigned channel) const
+ {
+ return theme->get_channel_name(channel);
+ }
+
+ bool get_supports_set_wb(unsigned channel) const
+ {
+ return theme->get_supports_set_wb(channel);
+ }
+
+ void set_wb(unsigned channel, double r, double g, double b) const
+ {
+ theme->set_wb(channel, r, g, b);
+ }
+
private:
- void bm_frame(int card_index, uint16_t timecode,
+ void bm_frame(unsigned card_index, uint16_t timecode,
FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format,
FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format);
void place_rectangle(movit::Effect *resample_effect, movit::Effect *padding_effect, float x0, float y0, float x1, float y1);
void thread_func();
+ void process_audio_one_frame();
void subsample_chroma(GLuint src_tex, GLuint dst_dst);
void release_display_frame(DisplayFrame *frame);
double pts() { return double(pts_int) / TIMEBASE; }
HTTPD httpd;
+ unsigned num_cards;
QSurface *mixer_surface, *h264_encoder_surface;
std::unique_ptr<movit::ResourcePool> resource_pool;
QSurface *surface;
QOpenGLContext *context;
- bool new_data_ready = false; // Whether new_frame and new_frame_audio contains anything.
+ bool new_data_ready = false; // Whether new_frame contains anything.
bool should_quit = false;
RefCountedFrame new_frame;
GLsync new_data_ready_fence; // Whether new_frame is ready for rendering.
- std::vector<float> new_frame_audio;
std::condition_variable new_data_ready_changed; // Set whenever new_data_ready is changed.
unsigned dropped_frames = 0; // Before new_frame.
std::unique_ptr<Resampler> resampler; // Under audio_mutex.
int last_timecode = -1; // Unwrapped.
};
- CaptureCard cards[NUM_CARDS]; // protected by <bmusb_mutex>
+ CaptureCard cards[MAX_CARDS]; // protected by <bmusb_mutex>
- RefCountedFrame bmusb_current_rendering_frame[NUM_CARDS];
+ RefCountedFrame bmusb_current_rendering_frame[MAX_CARDS];
class OutputChannel {
public:
bool should_quit = false;
audio_level_callback_t audio_level_callback = nullptr;
- ebur128_state *r128_state = nullptr;
+ Ebu_r128_proc r128;
+
+ // TODO: Implement oversampled peak detection.
+ float peak = 0.0f;
+
+ StereoFilter locut; // Cutoff 150 Hz, 24 dB/oct.
+
+ // First compressor; takes us up to about -12 dBFS.
+ StereoCompressor level_compressor;
};
extern Mixer *global_mixer;