]> git.sesse.net Git - nageru/blobdiff - mixer.h
Re-run IWYU, again with lots of manual cleanup.
[nageru] / mixer.h
diff --git a/mixer.h b/mixer.h
index 7a0bfa3053fa6dc3bc4690ee0907fa533d0fbd9d..bf53c2482e23362e0cba1ff589cbf72d76d78dd0 100644 (file)
--- a/mixer.h
+++ b/mixer.h
@@ -5,22 +5,39 @@
 
 #include <epoxy/gl.h>
 #undef Success
+#include <stdbool.h>
+#include <stdint.h>
+
 #include <movit/effect_chain.h>
 #include <movit/flat_input.h>
-#include <ebur128.h>
+#include <condition_variable>
+#include <cstddef>
 #include <functional>
+#include <memory>
+#include <mutex>
+#include <string>
+#include <thread>
+#include <vector>
 
 #include "bmusb/bmusb.h"
+#include "ebu_r128_proc.h"
 #include "h264encode.h"
+#include "httpd.h"
 #include "pbo_frame_allocator.h"
 #include "ref_counted_frame.h"
 #include "ref_counted_gl_sync.h"
-#include "theme.h"
 #include "resampler.h"
+#include "theme.h"
 #include "timebase.h"
-#include "httpd.h"
 
-#define NUM_CARDS 2
+class H264Encoder;
+class QSurface;
+namespace movit {
+class Effect;
+class EffectChain;
+class FlatInput;
+class ResourcePool;
+}  // namespace movit
 
 namespace movit {
 class YCbCrInput;
@@ -31,7 +48,7 @@ class QSurfaceFormat;
 class Mixer {
 public:
        // The surface format is used for offscreen destinations for OpenGL contexts we need.
-       Mixer(const QSurfaceFormat &format);
+       Mixer(const QSurfaceFormat &format, unsigned num_cards);
        ~Mixer();
        void start();
        void quit();
@@ -81,7 +98,7 @@ public:
                output_channel[output].set_frame_ready_callback(callback);
        }
 
-       typedef std::function<void(float, float)> audio_level_callback_t;
+       typedef std::function<void(float, float, float, float, float)> audio_level_callback_t;
        void set_audio_level_callback(audio_level_callback_t callback)
        {
                audio_level_callback = callback;
@@ -93,7 +110,7 @@ public:
        }
 
 private:
-       void bm_frame(int card_index, uint16_t timecode,
+       void bm_frame(unsigned card_index, uint16_t timecode,
                FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format,
                FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format);
        void place_rectangle(movit::Effect *resample_effect, movit::Effect *padding_effect, float x0, float y0, float x1, float y1);
@@ -103,6 +120,7 @@ private:
        double pts() { return double(pts_int) / TIMEBASE; }
 
        HTTPD httpd;
+       unsigned num_cards;
 
        QSurface *mixer_surface, *h264_encoder_surface;
        std::unique_ptr<movit::ResourcePool> resource_pool;
@@ -137,9 +155,9 @@ private:
                std::unique_ptr<Resampler> resampler;  // Under audio_mutex.
                int last_timecode = -1;  // Unwrapped.
        };
-       CaptureCard cards[NUM_CARDS];  // protected by <bmusb_mutex>
+       CaptureCard cards[MAX_CARDS];  // protected by <bmusb_mutex>
 
-       RefCountedFrame bmusb_current_rendering_frame[NUM_CARDS];
+       RefCountedFrame bmusb_current_rendering_frame[MAX_CARDS];
 
        class OutputChannel {
        public:
@@ -164,7 +182,10 @@ private:
        bool should_quit = false;
 
        audio_level_callback_t audio_level_callback = nullptr;
-       ebur128_state *r128_state = nullptr;
+       Ebu_r128_proc r128;
+
+       // TODO: Implement oversampled peak detection.
+       float peak = 0.0f;
 };
 
 extern Mixer *global_mixer;