]> git.sesse.net Git - nageru/blob - mixer.h
Some the audio handling into its own function.
[nageru] / mixer.h
1 #ifndef _MIXER_H
2 #define _MIXER_H 1
3
4 // The actual video mixer, running in its own separate background thread.
5
6 #include <epoxy/gl.h>
7 #undef Success
8 #include <stdbool.h>
9 #include <stdint.h>
10
11 #include <movit/effect_chain.h>
12 #include <movit/flat_input.h>
13 #include <condition_variable>
14 #include <cstddef>
15 #include <functional>
16 #include <memory>
17 #include <mutex>
18 #include <string>
19 #include <thread>
20 #include <vector>
21
22 #include "bmusb/bmusb.h"
23 #include "ebu_r128_proc.h"
24 #include "h264encode.h"
25 #include "httpd.h"
26 #include "pbo_frame_allocator.h"
27 #include "ref_counted_frame.h"
28 #include "ref_counted_gl_sync.h"
29 #include "resampler.h"
30 #include "theme.h"
31 #include "timebase.h"
32
33 class H264Encoder;
34 class QSurface;
35 namespace movit {
36 class Effect;
37 class EffectChain;
38 class FlatInput;
39 class ResourcePool;
40 }  // namespace movit
41
42 namespace movit {
43 class YCbCrInput;
44 }
45 class QOpenGLContext;
46 class QSurfaceFormat;
47
48 class Mixer {
49 public:
50         // The surface format is used for offscreen destinations for OpenGL contexts we need.
51         Mixer(const QSurfaceFormat &format, unsigned num_cards);
52         ~Mixer();
53         void start();
54         void quit();
55
56         void transition_clicked(int transition_num);
57         void channel_clicked(int preview_num);
58
59         enum Output {
60                 OUTPUT_LIVE = 0,
61                 OUTPUT_PREVIEW,
62                 OUTPUT_INPUT0,  // 1, 2, 3, up to 15 follow numerically.
63                 NUM_OUTPUTS = 18
64         };
65
66         struct DisplayFrame {
67                 // The chain for rendering this frame. To render a display frame,
68                 // first wait for <ready_fence>, then call <setup_chain>
69                 // to wire up all the inputs, and then finally call
70                 // chain->render_to_screen() or similar.
71                 movit::EffectChain *chain;
72                 std::function<void()> setup_chain;
73
74                 // Asserted when all the inputs are ready; you cannot render the chain
75                 // before this.
76                 RefCountedGLsync ready_fence;
77
78                 // Holds on to all the input frames needed for this display frame,
79                 // so they are not released while still rendering.
80                 std::vector<RefCountedFrame> input_frames;
81
82                 // Textures that should be released back to the resource pool
83                 // when this frame disappears, if any.
84                 // TODO: Refcount these as well?
85                 std::vector<GLuint> temp_textures;
86         };
87         // Implicitly frees the previous one if there's a new frame available.
88         bool get_display_frame(Output output, DisplayFrame *frame) {
89                 return output_channel[output].get_display_frame(frame);
90         }
91
92         typedef std::function<void()> new_frame_ready_callback_t;
93         void set_frame_ready_callback(Output output, new_frame_ready_callback_t callback)
94         {
95                 output_channel[output].set_frame_ready_callback(callback);
96         }
97
98         typedef std::function<void(float, float, float, float, float)> audio_level_callback_t;
99         void set_audio_level_callback(audio_level_callback_t callback)
100         {
101                 audio_level_callback = callback;
102         }
103
104         std::vector<std::string> get_transition_names()
105         {
106                 return theme->get_transition_names(pts());
107         }
108
109         unsigned get_num_channels() const
110         {
111                 return theme->get_num_channels();
112         }
113
114         std::string get_channel_name(unsigned channel) const
115         {
116                 return theme->get_channel_name(channel);
117         }
118
119         bool get_supports_set_wb(unsigned channel) const
120         {
121                 return theme->get_supports_set_wb(channel);
122         }
123
124         void set_wb(unsigned channel, double r, double g, double b) const
125         {
126                 theme->set_wb(channel, r, g, b);
127         }
128
129 private:
130         void bm_frame(unsigned card_index, uint16_t timecode,
131                 FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format,
132                 FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format);
133         void place_rectangle(movit::Effect *resample_effect, movit::Effect *padding_effect, float x0, float y0, float x1, float y1);
134         void thread_func();
135         void process_audio_one_frame();
136         void subsample_chroma(GLuint src_tex, GLuint dst_dst);
137         void release_display_frame(DisplayFrame *frame);
138         double pts() { return double(pts_int) / TIMEBASE; }
139
140         HTTPD httpd;
141         unsigned num_cards;
142
143         QSurface *mixer_surface, *h264_encoder_surface;
144         std::unique_ptr<movit::ResourcePool> resource_pool;
145         std::unique_ptr<Theme> theme;
146         std::unique_ptr<movit::EffectChain> display_chain;
147         GLuint cbcr_program_num;  // Owned by <resource_pool>.
148         std::unique_ptr<H264Encoder> h264_encoder;
149
150         // Effects part of <display_chain>. Owned by <display_chain>.
151         movit::FlatInput *display_input;
152
153         int64_t pts_int = 0;  // In TIMEBASE units.
154
155         std::mutex bmusb_mutex;
156         struct CaptureCard {
157                 BMUSBCapture *usb;
158                 std::unique_ptr<PBOFrameAllocator> frame_allocator;
159
160                 // Stuff for the OpenGL context (for texture uploading).
161                 QSurface *surface;
162                 QOpenGLContext *context;
163
164                 bool new_data_ready = false;  // Whether new_frame and new_frame_audio contains anything.
165                 bool should_quit = false;
166                 RefCountedFrame new_frame;
167                 GLsync new_data_ready_fence;  // Whether new_frame is ready for rendering.
168                 std::vector<float> new_frame_audio;
169                 std::condition_variable new_data_ready_changed;  // Set whenever new_data_ready is changed.
170                 unsigned dropped_frames = 0;  // Before new_frame.
171
172                 std::mutex audio_mutex;
173                 std::unique_ptr<Resampler> resampler;  // Under audio_mutex.
174                 int last_timecode = -1;  // Unwrapped.
175         };
176         CaptureCard cards[MAX_CARDS];  // protected by <bmusb_mutex>
177
178         RefCountedFrame bmusb_current_rendering_frame[MAX_CARDS];
179
180         class OutputChannel {
181         public:
182                 ~OutputChannel();
183                 void output_frame(DisplayFrame frame);
184                 bool get_display_frame(DisplayFrame *frame);
185                 void set_frame_ready_callback(new_frame_ready_callback_t callback);
186
187         private:
188                 friend class Mixer;
189
190                 Mixer *parent = nullptr;  // Not owned.
191                 std::mutex frame_mutex;
192                 DisplayFrame current_frame, ready_frame;  // protected by <frame_mutex>
193                 bool has_current_frame = false, has_ready_frame = false;  // protected by <frame_mutex>
194                 new_frame_ready_callback_t new_frame_ready_callback;
195                 bool has_new_frame_ready_callback = false;
196         };
197         OutputChannel output_channel[NUM_OUTPUTS];
198
199         std::thread mixer_thread;
200         bool should_quit = false;
201
202         audio_level_callback_t audio_level_callback = nullptr;
203         Ebu_r128_proc r128;
204
205         // TODO: Implement oversampled peak detection.
206         float peak = 0.0f;
207 };
208
209 extern Mixer *global_mixer;
210
211 #endif  // !defined(_MIXER_H)