]> git.sesse.net Git - nageru/blob - mixer.h
Create the previews dynamically, in a number determined by the theme.
[nageru] / mixer.h
1 #ifndef _MIXER_H
2 #define _MIXER_H 1
3
4 // The actual video mixer, running in its own separate background thread.
5
6 #include <epoxy/gl.h>
7 #undef Success
8 #include <stdbool.h>
9 #include <stdint.h>
10
11 #include <movit/effect_chain.h>
12 #include <movit/flat_input.h>
13 #include <condition_variable>
14 #include <cstddef>
15 #include <functional>
16 #include <memory>
17 #include <mutex>
18 #include <string>
19 #include <thread>
20 #include <vector>
21
22 #include "bmusb/bmusb.h"
23 #include "ebu_r128_proc.h"
24 #include "h264encode.h"
25 #include "httpd.h"
26 #include "pbo_frame_allocator.h"
27 #include "ref_counted_frame.h"
28 #include "ref_counted_gl_sync.h"
29 #include "resampler.h"
30 #include "theme.h"
31 #include "timebase.h"
32
33 class H264Encoder;
34 class QSurface;
35 namespace movit {
36 class Effect;
37 class EffectChain;
38 class FlatInput;
39 class ResourcePool;
40 }  // namespace movit
41
42 namespace movit {
43 class YCbCrInput;
44 }
45 class QOpenGLContext;
46 class QSurfaceFormat;
47
48 class Mixer {
49 public:
50         // The surface format is used for offscreen destinations for OpenGL contexts we need.
51         Mixer(const QSurfaceFormat &format, unsigned num_cards);
52         ~Mixer();
53         void start();
54         void quit();
55
56         void transition_clicked(int transition_num);
57         void channel_clicked(int preview_num);
58
59         enum Output {
60                 OUTPUT_LIVE = 0,
61                 OUTPUT_PREVIEW,
62                 OUTPUT_INPUT0,
63                 OUTPUT_INPUT1,
64                 OUTPUT_INPUT2,
65                 OUTPUT_INPUT3,
66                 NUM_OUTPUTS
67         };
68
69         struct DisplayFrame {
70                 // The chain for rendering this frame. To render a display frame,
71                 // first wait for <ready_fence>, then call <setup_chain>
72                 // to wire up all the inputs, and then finally call
73                 // chain->render_to_screen() or similar.
74                 movit::EffectChain *chain;
75                 std::function<void()> setup_chain;
76
77                 // Asserted when all the inputs are ready; you cannot render the chain
78                 // before this.
79                 RefCountedGLsync ready_fence;
80
81                 // Holds on to all the input frames needed for this display frame,
82                 // so they are not released while still rendering.
83                 std::vector<RefCountedFrame> input_frames;
84
85                 // Textures that should be released back to the resource pool
86                 // when this frame disappears, if any.
87                 // TODO: Refcount these as well?
88                 std::vector<GLuint> temp_textures;
89         };
90         // Implicitly frees the previous one if there's a new frame available.
91         bool get_display_frame(Output output, DisplayFrame *frame) {
92                 return output_channel[output].get_display_frame(frame);
93         }
94
95         typedef std::function<void()> new_frame_ready_callback_t;
96         void set_frame_ready_callback(Output output, new_frame_ready_callback_t callback)
97         {
98                 output_channel[output].set_frame_ready_callback(callback);
99         }
100
101         typedef std::function<void(float, float, float, float, float)> audio_level_callback_t;
102         void set_audio_level_callback(audio_level_callback_t callback)
103         {
104                 audio_level_callback = callback;
105         }
106
107         std::vector<std::string> get_transition_names()
108         {
109                 return theme->get_transition_names(pts());
110         }
111
112         unsigned get_num_channels() const
113         {
114                 return theme->get_num_channels();
115         }
116
117 private:
118         void bm_frame(unsigned card_index, uint16_t timecode,
119                 FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format,
120                 FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format);
121         void place_rectangle(movit::Effect *resample_effect, movit::Effect *padding_effect, float x0, float y0, float x1, float y1);
122         void thread_func();
123         void subsample_chroma(GLuint src_tex, GLuint dst_dst);
124         void release_display_frame(DisplayFrame *frame);
125         double pts() { return double(pts_int) / TIMEBASE; }
126
127         HTTPD httpd;
128         unsigned num_cards;
129
130         QSurface *mixer_surface, *h264_encoder_surface;
131         std::unique_ptr<movit::ResourcePool> resource_pool;
132         std::unique_ptr<Theme> theme;
133         std::unique_ptr<movit::EffectChain> display_chain;
134         GLuint cbcr_program_num;  // Owned by <resource_pool>.
135         std::unique_ptr<H264Encoder> h264_encoder;
136
137         // Effects part of <display_chain>. Owned by <display_chain>.
138         movit::FlatInput *display_input;
139
140         int64_t pts_int = 0;  // In TIMEBASE units.
141
142         std::mutex bmusb_mutex;
143         struct CaptureCard {
144                 BMUSBCapture *usb;
145                 std::unique_ptr<PBOFrameAllocator> frame_allocator;
146
147                 // Stuff for the OpenGL context (for texture uploading).
148                 QSurface *surface;
149                 QOpenGLContext *context;
150
151                 bool new_data_ready = false;  // Whether new_frame and new_frame_audio contains anything.
152                 bool should_quit = false;
153                 RefCountedFrame new_frame;
154                 GLsync new_data_ready_fence;  // Whether new_frame is ready for rendering.
155                 std::vector<float> new_frame_audio;
156                 std::condition_variable new_data_ready_changed;  // Set whenever new_data_ready is changed.
157                 unsigned dropped_frames = 0;  // Before new_frame.
158
159                 std::mutex audio_mutex;
160                 std::unique_ptr<Resampler> resampler;  // Under audio_mutex.
161                 int last_timecode = -1;  // Unwrapped.
162         };
163         CaptureCard cards[MAX_CARDS];  // protected by <bmusb_mutex>
164
165         RefCountedFrame bmusb_current_rendering_frame[MAX_CARDS];
166
167         class OutputChannel {
168         public:
169                 ~OutputChannel();
170                 void output_frame(DisplayFrame frame);
171                 bool get_display_frame(DisplayFrame *frame);
172                 void set_frame_ready_callback(new_frame_ready_callback_t callback);
173
174         private:
175                 friend class Mixer;
176
177                 Mixer *parent = nullptr;  // Not owned.
178                 std::mutex frame_mutex;
179                 DisplayFrame current_frame, ready_frame;  // protected by <frame_mutex>
180                 bool has_current_frame = false, has_ready_frame = false;  // protected by <frame_mutex>
181                 new_frame_ready_callback_t new_frame_ready_callback;
182                 bool has_new_frame_ready_callback = false;
183         };
184         OutputChannel output_channel[NUM_OUTPUTS];
185
186         std::thread mixer_thread;
187         bool should_quit = false;
188
189         audio_level_callback_t audio_level_callback = nullptr;
190         Ebu_r128_proc r128;
191
192         // TODO: Implement oversampled peak detection.
193         float peak = 0.0f;
194 };
195
196 extern Mixer *global_mixer;
197
198 #endif  // !defined(_MIXER_H)