]> git.sesse.net Git - nageru/blob - mixer.h
Re-run IWYU, again with lots of manual cleanup.
[nageru] / mixer.h
1 #ifndef _MIXER_H
2 #define _MIXER_H 1
3
4 // The actual video mixer, running in its own separate background thread.
5
6 #include <epoxy/gl.h>
7 #undef Success
8 #include <stdbool.h>
9 #include <stdint.h>
10
11 #include <movit/effect_chain.h>
12 #include <movit/flat_input.h>
13 #include <condition_variable>
14 #include <cstddef>
15 #include <functional>
16 #include <memory>
17 #include <mutex>
18 #include <string>
19 #include <thread>
20 #include <vector>
21
22 #include "bmusb/bmusb.h"
23 #include "ebu_r128_proc.h"
24 #include "h264encode.h"
25 #include "httpd.h"
26 #include "pbo_frame_allocator.h"
27 #include "ref_counted_frame.h"
28 #include "ref_counted_gl_sync.h"
29 #include "resampler.h"
30 #include "theme.h"
31 #include "timebase.h"
32
33 class H264Encoder;
34 class QSurface;
35 namespace movit {
36 class Effect;
37 class EffectChain;
38 class FlatInput;
39 class ResourcePool;
40 }  // namespace movit
41
42 namespace movit {
43 class YCbCrInput;
44 }
45 class QOpenGLContext;
46 class QSurfaceFormat;
47
48 class Mixer {
49 public:
50         // The surface format is used for offscreen destinations for OpenGL contexts we need.
51         Mixer(const QSurfaceFormat &format, unsigned num_cards);
52         ~Mixer();
53         void start();
54         void quit();
55
56         void transition_clicked(int transition_num);
57         void channel_clicked(int preview_num);
58
59         enum Output {
60                 OUTPUT_LIVE = 0,
61                 OUTPUT_PREVIEW,
62                 OUTPUT_INPUT0,
63                 OUTPUT_INPUT1,
64                 OUTPUT_INPUT2,
65                 OUTPUT_INPUT3,
66                 NUM_OUTPUTS
67         };
68
69         struct DisplayFrame {
70                 // The chain for rendering this frame. To render a display frame,
71                 // first wait for <ready_fence>, then call <setup_chain>
72                 // to wire up all the inputs, and then finally call
73                 // chain->render_to_screen() or similar.
74                 movit::EffectChain *chain;
75                 std::function<void()> setup_chain;
76
77                 // Asserted when all the inputs are ready; you cannot render the chain
78                 // before this.
79                 RefCountedGLsync ready_fence;
80
81                 // Holds on to all the input frames needed for this display frame,
82                 // so they are not released while still rendering.
83                 std::vector<RefCountedFrame> input_frames;
84
85                 // Textures that should be released back to the resource pool
86                 // when this frame disappears, if any.
87                 // TODO: Refcount these as well?
88                 std::vector<GLuint> temp_textures;
89         };
90         // Implicitly frees the previous one if there's a new frame available.
91         bool get_display_frame(Output output, DisplayFrame *frame) {
92                 return output_channel[output].get_display_frame(frame);
93         }
94
95         typedef std::function<void()> new_frame_ready_callback_t;
96         void set_frame_ready_callback(Output output, new_frame_ready_callback_t callback)
97         {
98                 output_channel[output].set_frame_ready_callback(callback);
99         }
100
101         typedef std::function<void(float, float, float, float, float)> audio_level_callback_t;
102         void set_audio_level_callback(audio_level_callback_t callback)
103         {
104                 audio_level_callback = callback;
105         }
106
107         std::vector<std::string> get_transition_names()
108         {
109                 return theme->get_transition_names(pts());
110         }
111
112 private:
113         void bm_frame(unsigned card_index, uint16_t timecode,
114                 FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format,
115                 FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format);
116         void place_rectangle(movit::Effect *resample_effect, movit::Effect *padding_effect, float x0, float y0, float x1, float y1);
117         void thread_func();
118         void subsample_chroma(GLuint src_tex, GLuint dst_dst);
119         void release_display_frame(DisplayFrame *frame);
120         double pts() { return double(pts_int) / TIMEBASE; }
121
122         HTTPD httpd;
123         unsigned num_cards;
124
125         QSurface *mixer_surface, *h264_encoder_surface;
126         std::unique_ptr<movit::ResourcePool> resource_pool;
127         std::unique_ptr<Theme> theme;
128         std::unique_ptr<movit::EffectChain> display_chain;
129         GLuint cbcr_program_num;  // Owned by <resource_pool>.
130         std::unique_ptr<H264Encoder> h264_encoder;
131
132         // Effects part of <display_chain>. Owned by <display_chain>.
133         movit::FlatInput *display_input;
134
135         int64_t pts_int = 0;  // In TIMEBASE units.
136
137         std::mutex bmusb_mutex;
138         struct CaptureCard {
139                 BMUSBCapture *usb;
140                 std::unique_ptr<PBOFrameAllocator> frame_allocator;
141
142                 // Stuff for the OpenGL context (for texture uploading).
143                 QSurface *surface;
144                 QOpenGLContext *context;
145
146                 bool new_data_ready = false;  // Whether new_frame and new_frame_audio contains anything.
147                 bool should_quit = false;
148                 RefCountedFrame new_frame;
149                 GLsync new_data_ready_fence;  // Whether new_frame is ready for rendering.
150                 std::vector<float> new_frame_audio;
151                 std::condition_variable new_data_ready_changed;  // Set whenever new_data_ready is changed.
152                 unsigned dropped_frames = 0;  // Before new_frame.
153
154                 std::mutex audio_mutex;
155                 std::unique_ptr<Resampler> resampler;  // Under audio_mutex.
156                 int last_timecode = -1;  // Unwrapped.
157         };
158         CaptureCard cards[MAX_CARDS];  // protected by <bmusb_mutex>
159
160         RefCountedFrame bmusb_current_rendering_frame[MAX_CARDS];
161
162         class OutputChannel {
163         public:
164                 ~OutputChannel();
165                 void output_frame(DisplayFrame frame);
166                 bool get_display_frame(DisplayFrame *frame);
167                 void set_frame_ready_callback(new_frame_ready_callback_t callback);
168
169         private:
170                 friend class Mixer;
171
172                 Mixer *parent = nullptr;  // Not owned.
173                 std::mutex frame_mutex;
174                 DisplayFrame current_frame, ready_frame;  // protected by <frame_mutex>
175                 bool has_current_frame = false, has_ready_frame = false;  // protected by <frame_mutex>
176                 new_frame_ready_callback_t new_frame_ready_callback;
177                 bool has_new_frame_ready_callback = false;
178         };
179         OutputChannel output_channel[NUM_OUTPUTS];
180
181         std::thread mixer_thread;
182         bool should_quit = false;
183
184         audio_level_callback_t audio_level_callback = nullptr;
185         Ebu_r128_proc r128;
186
187         // TODO: Implement oversampled peak detection.
188         float peak = 0.0f;
189 };
190
191 extern Mixer *global_mixer;
192
193 #endif  // !defined(_MIXER_H)