]> git.sesse.net Git - nageru/blob - mixer.h
Make some labels and a white balance button per preview (the latter is not hooked...
[nageru] / mixer.h
1 #ifndef _MIXER_H
2 #define _MIXER_H 1
3
4 // The actual video mixer, running in its own separate background thread.
5
6 #include <epoxy/gl.h>
7 #undef Success
8 #include <stdbool.h>
9 #include <stdint.h>
10
11 #include <movit/effect_chain.h>
12 #include <movit/flat_input.h>
13 #include <condition_variable>
14 #include <cstddef>
15 #include <functional>
16 #include <memory>
17 #include <mutex>
18 #include <string>
19 #include <thread>
20 #include <vector>
21
22 #include "bmusb/bmusb.h"
23 #include "ebu_r128_proc.h"
24 #include "h264encode.h"
25 #include "httpd.h"
26 #include "pbo_frame_allocator.h"
27 #include "ref_counted_frame.h"
28 #include "ref_counted_gl_sync.h"
29 #include "resampler.h"
30 #include "theme.h"
31 #include "timebase.h"
32
33 class H264Encoder;
34 class QSurface;
35 namespace movit {
36 class Effect;
37 class EffectChain;
38 class FlatInput;
39 class ResourcePool;
40 }  // namespace movit
41
42 namespace movit {
43 class YCbCrInput;
44 }
45 class QOpenGLContext;
46 class QSurfaceFormat;
47
48 class Mixer {
49 public:
50         // The surface format is used for offscreen destinations for OpenGL contexts we need.
51         Mixer(const QSurfaceFormat &format, unsigned num_cards);
52         ~Mixer();
53         void start();
54         void quit();
55
56         void transition_clicked(int transition_num);
57         void channel_clicked(int preview_num);
58
59         enum Output {
60                 OUTPUT_LIVE = 0,
61                 OUTPUT_PREVIEW,
62                 OUTPUT_INPUT0,  // 1, 2, 3, up to 15 follow numerically.
63                 NUM_OUTPUTS = 18
64         };
65
66         struct DisplayFrame {
67                 // The chain for rendering this frame. To render a display frame,
68                 // first wait for <ready_fence>, then call <setup_chain>
69                 // to wire up all the inputs, and then finally call
70                 // chain->render_to_screen() or similar.
71                 movit::EffectChain *chain;
72                 std::function<void()> setup_chain;
73
74                 // Asserted when all the inputs are ready; you cannot render the chain
75                 // before this.
76                 RefCountedGLsync ready_fence;
77
78                 // Holds on to all the input frames needed for this display frame,
79                 // so they are not released while still rendering.
80                 std::vector<RefCountedFrame> input_frames;
81
82                 // Textures that should be released back to the resource pool
83                 // when this frame disappears, if any.
84                 // TODO: Refcount these as well?
85                 std::vector<GLuint> temp_textures;
86         };
87         // Implicitly frees the previous one if there's a new frame available.
88         bool get_display_frame(Output output, DisplayFrame *frame) {
89                 return output_channel[output].get_display_frame(frame);
90         }
91
92         typedef std::function<void()> new_frame_ready_callback_t;
93         void set_frame_ready_callback(Output output, new_frame_ready_callback_t callback)
94         {
95                 output_channel[output].set_frame_ready_callback(callback);
96         }
97
98         typedef std::function<void(float, float, float, float, float)> audio_level_callback_t;
99         void set_audio_level_callback(audio_level_callback_t callback)
100         {
101                 audio_level_callback = callback;
102         }
103
104         std::vector<std::string> get_transition_names()
105         {
106                 return theme->get_transition_names(pts());
107         }
108
109         unsigned get_num_channels() const
110         {
111                 return theme->get_num_channels();
112         }
113
114         std::string get_channel_name(unsigned channel) const
115         {
116                 return theme->get_channel_name(channel);
117         }
118
119         bool get_supports_set_wb(unsigned channel) const
120         {
121                 return theme->get_supports_set_wb(channel);
122         }
123
124 private:
125         void bm_frame(unsigned card_index, uint16_t timecode,
126                 FrameAllocator::Frame video_frame, size_t video_offset, uint16_t video_format,
127                 FrameAllocator::Frame audio_frame, size_t audio_offset, uint16_t audio_format);
128         void place_rectangle(movit::Effect *resample_effect, movit::Effect *padding_effect, float x0, float y0, float x1, float y1);
129         void thread_func();
130         void subsample_chroma(GLuint src_tex, GLuint dst_dst);
131         void release_display_frame(DisplayFrame *frame);
132         double pts() { return double(pts_int) / TIMEBASE; }
133
134         HTTPD httpd;
135         unsigned num_cards;
136
137         QSurface *mixer_surface, *h264_encoder_surface;
138         std::unique_ptr<movit::ResourcePool> resource_pool;
139         std::unique_ptr<Theme> theme;
140         std::unique_ptr<movit::EffectChain> display_chain;
141         GLuint cbcr_program_num;  // Owned by <resource_pool>.
142         std::unique_ptr<H264Encoder> h264_encoder;
143
144         // Effects part of <display_chain>. Owned by <display_chain>.
145         movit::FlatInput *display_input;
146
147         int64_t pts_int = 0;  // In TIMEBASE units.
148
149         std::mutex bmusb_mutex;
150         struct CaptureCard {
151                 BMUSBCapture *usb;
152                 std::unique_ptr<PBOFrameAllocator> frame_allocator;
153
154                 // Stuff for the OpenGL context (for texture uploading).
155                 QSurface *surface;
156                 QOpenGLContext *context;
157
158                 bool new_data_ready = false;  // Whether new_frame and new_frame_audio contains anything.
159                 bool should_quit = false;
160                 RefCountedFrame new_frame;
161                 GLsync new_data_ready_fence;  // Whether new_frame is ready for rendering.
162                 std::vector<float> new_frame_audio;
163                 std::condition_variable new_data_ready_changed;  // Set whenever new_data_ready is changed.
164                 unsigned dropped_frames = 0;  // Before new_frame.
165
166                 std::mutex audio_mutex;
167                 std::unique_ptr<Resampler> resampler;  // Under audio_mutex.
168                 int last_timecode = -1;  // Unwrapped.
169         };
170         CaptureCard cards[MAX_CARDS];  // protected by <bmusb_mutex>
171
172         RefCountedFrame bmusb_current_rendering_frame[MAX_CARDS];
173
174         class OutputChannel {
175         public:
176                 ~OutputChannel();
177                 void output_frame(DisplayFrame frame);
178                 bool get_display_frame(DisplayFrame *frame);
179                 void set_frame_ready_callback(new_frame_ready_callback_t callback);
180
181         private:
182                 friend class Mixer;
183
184                 Mixer *parent = nullptr;  // Not owned.
185                 std::mutex frame_mutex;
186                 DisplayFrame current_frame, ready_frame;  // protected by <frame_mutex>
187                 bool has_current_frame = false, has_ready_frame = false;  // protected by <frame_mutex>
188                 new_frame_ready_callback_t new_frame_ready_callback;
189                 bool has_new_frame_ready_callback = false;
190         };
191         OutputChannel output_channel[NUM_OUTPUTS];
192
193         std::thread mixer_thread;
194         bool should_quit = false;
195
196         audio_level_callback_t audio_level_callback = nullptr;
197         Ebu_r128_proc r128;
198
199         // TODO: Implement oversampled peak detection.
200         float peak = 0.0f;
201 };
202
203 extern Mixer *global_mixer;
204
205 #endif  // !defined(_MIXER_H)