]> git.sesse.net Git - casparcg/blob - core/mixer/frame_mixer_device.cpp
fc88c838653e8d82232266e12357a65c7ba9ccdd
[casparcg] / core / mixer / frame_mixer_device.cpp
1 /*\r
2 * copyright (c) 2010 Sveriges Television AB <info@casparcg.com>\r
3 *\r
4 *  This file is part of CasparCG.\r
5 *\r
6 *    CasparCG is free software: you can redistribute it and/or modify\r
7 *    it under the terms of the GNU General Public License as published by\r
8 *    the Free Software Foundation, either version 3 of the License, or\r
9 *    (at your option) any later version.\r
10 *\r
11 *    CasparCG is distributed in the hope that it will be useful,\r
12 *    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
14 *    GNU General Public License for more details.\r
15 \r
16 *    You should have received a copy of the GNU General Public License\r
17 *    along with CasparCG.  If not, see <http://www.gnu.org/licenses/>.\r
18 *\r
19 */\r
20 #include "../StdAfx.h"\r
21 \r
22 #include "frame_mixer_device.h"\r
23 \r
24 #include "gpu/gpu_read_frame.h"\r
25 #include "gpu/gpu_write_frame.h"\r
26 \r
27 #include "audio/audio_mixer.h"\r
28 #include "image/image_mixer.h"\r
29 \r
30 #include <common/exception/exceptions.h>\r
31 #include <common/concurrency/executor.h>\r
32 #include <common/diagnostics/graph.h>\r
33 #include <common/utility/tweener.h>\r
34 \r
35 #include <core/consumer/frame/read_frame.h>\r
36 #include <core/producer/frame/write_frame.h>\r
37 #include <core/producer/frame/frame_factory.h>\r
38 #include <core/producer/frame/pixel_format.h>\r
39 #include <core/producer/frame/audio_transform.h>\r
40 #include <core/producer/frame/image_transform.h>\r
41 \r
42 #include <core/video_format.h>\r
43 \r
44 #include <boost/fusion/container/map.hpp>\r
45 #include <boost/fusion/include/at_key.hpp>\r
46 #include <boost/timer.hpp>\r
47 \r
48 #include <unordered_map>\r
49 \r
50 namespace caspar { namespace mixer {\r
51                 \r
52 template<typename T>\r
53 class tweened_transform\r
54 {\r
55         T source_;\r
56         T dest_;\r
57         int duration_;\r
58         int time_;\r
59         tweener_t tweener_;\r
60 public: \r
61         tweened_transform()\r
62                 : duration_(0)\r
63                 , time_(0)\r
64                 , tweener_(get_tweener(L"linear")){}\r
65         tweened_transform(const T& source, const T& dest, int duration, const std::wstring& tween = L"linear")\r
66                 : source_(source)\r
67                 , dest_(dest)\r
68                 , duration_(duration)\r
69                 , time_(0)\r
70                 , tweener_(get_tweener(tween)){}\r
71         \r
72         T fetch()\r
73         {\r
74                 return time_ == duration_ ? dest_ : tween(static_cast<double>(time_), source_, dest_, static_cast<double>(duration_), tweener_);\r
75         }\r
76 \r
77         T fetch_and_tick(int num)\r
78         {                                               \r
79                 time_ = std::min(time_+num, duration_);\r
80                 return fetch();\r
81         }\r
82 };\r
83 \r
84 struct frame_mixer_device::implementation : boost::noncopyable\r
85 {               \r
86         const core::video_format_desc format_desc_;\r
87 \r
88         safe_ptr<diagnostics::graph> diag_;\r
89         boost::timer frame_timer_;\r
90         boost::timer tick_timer_;\r
91 \r
92         audio_mixer     audio_mixer_;\r
93         image_mixer image_mixer_;\r
94 \r
95         output_t output_;\r
96         \r
97         typedef std::unordered_map<int, tweened_transform<core::image_transform>> image_transforms;\r
98         typedef std::unordered_map<int, tweened_transform<core::audio_transform>> audio_transforms;\r
99 \r
100         boost::fusion::map<boost::fusion::pair<core::image_transform, image_transforms>,\r
101                                         boost::fusion::pair<core::audio_transform, audio_transforms>> transforms_;\r
102         \r
103         boost::fusion::map<boost::fusion::pair<core::image_transform, tweened_transform<core::image_transform>>,\r
104                                         boost::fusion::pair<core::audio_transform, tweened_transform<core::audio_transform>>> root_transforms_;\r
105 \r
106         executor executor_;\r
107 public:\r
108         implementation(const core::video_format_desc& format_desc) \r
109                 : format_desc_(format_desc)\r
110                 , diag_(diagnostics::create_graph(narrow(print())))\r
111                 , image_mixer_(format_desc)\r
112                 , executor_(L"frame_mixer_device", true)\r
113         {\r
114                 diag_->add_guide("frame-time", 0.5f);   \r
115                 diag_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
116                 diag_->set_color("tick-time", diagnostics::color(0.1f, 0.7f, 0.8f));\r
117                 diag_->set_color("input-buffer", diagnostics::color(1.0f, 1.0f, 0.0f)); \r
118                 executor_.set_capacity(1);      \r
119                 executor_.begin_invoke([]\r
120                 {\r
121                         SetThreadPriority(GetCurrentThread(), ABOVE_NORMAL_PRIORITY_CLASS);\r
122                 });\r
123 \r
124                 CASPAR_LOG(info) << print() << L" Successfully initialized.";   \r
125         }\r
126 \r
127         boost::signals2::connection connect(const output_t::slot_type& subscriber)\r
128         {\r
129                 return output_.connect(subscriber);\r
130         }\r
131 \r
132         boost::unique_future<safe_ptr<const host_buffer>> mix_image(std::map<int, safe_ptr<core::basic_frame>> frames)\r
133         {               \r
134                 auto& root_image_transform = boost::fusion::at_key<core::image_transform>(root_transforms_);\r
135                 auto& image_transforms = boost::fusion::at_key<core::image_transform>(transforms_);\r
136 \r
137                 auto image = image_mixer_.begin_pass();\r
138                 BOOST_FOREACH(auto& frame, frames)\r
139                 {\r
140                         if(format_desc_.mode != core::video_mode::progressive)\r
141                         {\r
142                                 auto frame1 = make_safe<core::basic_frame>(frame.second);\r
143                                 auto frame2 = make_safe<core::basic_frame>(frame.second);\r
144                                 \r
145                                 frame1->get_image_transform() = root_image_transform.fetch_and_tick(1)*image_transforms[frame.first].fetch_and_tick(1);\r
146                                 frame2->get_image_transform() = root_image_transform.fetch_and_tick(1)*image_transforms[frame.first].fetch_and_tick(1);\r
147 \r
148                                 if(frame1->get_image_transform() != frame2->get_image_transform())\r
149                                         core::basic_frame::interlace(frame1, frame2, format_desc_.mode)->accept(image_mixer_);\r
150                                 else\r
151                                         frame2->accept(image_mixer_);\r
152                         }\r
153                         else\r
154                         {\r
155                                 auto frame1 = make_safe<core::basic_frame>(frame.second);\r
156                                 frame1->get_image_transform() = root_image_transform.fetch_and_tick(1)*image_transforms[frame.first].fetch_and_tick(1);\r
157                                 frame1->accept(image_mixer_);\r
158                         }\r
159                 }\r
160                 image_mixer_.end_pass();\r
161                 return std::move(image);\r
162         }\r
163 \r
164         std::vector<short> mix_audio(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
165         {\r
166                 auto& root_audio_transform = boost::fusion::at_key<core::audio_transform>(root_transforms_);\r
167                 auto& audio_transforms = boost::fusion::at_key<core::audio_transform>(transforms_);\r
168 \r
169                 auto audio = audio_mixer_.begin_pass();\r
170                 BOOST_FOREACH(auto& frame, frames)\r
171                 {\r
172                         int num = format_desc_.mode == core::video_mode::progressive ? 1 : 2;\r
173 \r
174                         auto frame1 = make_safe<core::basic_frame>(frame.second);\r
175                         frame1->get_audio_transform() = root_audio_transform.fetch_and_tick(num)*audio_transforms[frame.first].fetch_and_tick(num);\r
176                         frame1->accept(audio_mixer_);\r
177                 }\r
178                 audio_mixer_.end_pass();\r
179                 return audio;\r
180         }\r
181                 \r
182         void send(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
183         {                       \r
184                 executor_.begin_invoke([=]\r
185                 {               \r
186                         diag_->set_value("input-buffer", static_cast<float>(executor_.size())/static_cast<float>(executor_.capacity()));        \r
187                         frame_timer_.restart();\r
188 \r
189                         auto image_future = mix_image(frames);\r
190                         auto audio = mix_audio(frames);\r
191                         auto image = image_future.get();\r
192 \r
193                         diag_->update_value("frame-time", static_cast<float>(frame_timer_.elapsed()*format_desc_.fps*0.5));\r
194 \r
195                         output_(make_safe<const gpu_read_frame>(std::move(image), std::move(audio)));\r
196 \r
197                         diag_->update_value("tick-time", static_cast<float>(tick_timer_.elapsed()*format_desc_.fps*0.5));\r
198                         tick_timer_.restart();\r
199                 });\r
200                 diag_->set_value("input-buffer", static_cast<float>(executor_.size())/static_cast<float>(executor_.capacity()));\r
201         }\r
202                 \r
203         safe_ptr<core::write_frame> create_frame(void* tag, const core::pixel_format_desc& desc)\r
204         {\r
205                 return make_safe<gpu_write_frame>(reinterpret_cast<int>(tag), desc, image_mixer_.create_buffers(desc));\r
206         }\r
207                         \r
208         template<typename T>    \r
209         void set_transform(const T& transform, int mix_duration, const std::wstring& tween)\r
210         {\r
211                 executor_.invoke([&]\r
212                 {\r
213                         auto& root = boost::fusion::at_key<T>(root_transforms_);\r
214 \r
215                         auto src = root.fetch();\r
216                         auto dst = transform;\r
217                         root = tweened_transform<T>(src, dst, mix_duration, tween);\r
218                 });\r
219         }\r
220                 \r
221         template<typename T>\r
222         void set_transform(int index, const T& transform, int mix_duration, const std::wstring& tween)\r
223         {\r
224                 executor_.invoke([&]\r
225                 {\r
226                         auto& transforms = boost::fusion::at_key<T>(transforms_);\r
227 \r
228                         auto src = transforms[index].fetch();\r
229                         auto dst = transform;\r
230                         transforms[index] = tweened_transform<T>(src, dst, mix_duration, tween);\r
231                 });\r
232         }\r
233                 \r
234         template<typename T>\r
235         void apply_transform(const std::function<T(const T&)>& transform, int mix_duration, const std::wstring& tween)\r
236         {\r
237                 return executor_.invoke([&]\r
238                 {\r
239                         auto& root = boost::fusion::at_key<T>(root_transforms_);\r
240 \r
241                         auto src = root.fetch();\r
242                         auto dst = transform(src);\r
243                         root = tweened_transform<T>(src, dst, mix_duration, tween);\r
244                 });\r
245         }\r
246                 \r
247         template<typename T>\r
248         void apply_transform(int index, const std::function<T(T)>& transform, int mix_duration, const std::wstring& tween)\r
249         {\r
250                 executor_.invoke([&]\r
251                 {\r
252                         auto& transforms = boost::fusion::at_key<T>(transforms_);\r
253 \r
254                         auto src = transforms[index].fetch();\r
255                         auto dst = transform(src);\r
256                         transforms[index] = tweened_transform<T>(src, dst, mix_duration, tween);\r
257                 });\r
258         }\r
259 \r
260         template<typename T>\r
261         void reset_transform(int mix_duration, const std::wstring& tween)\r
262         {\r
263                 executor_.invoke([&]\r
264                 {\r
265                         auto& transforms = boost::fusion::at_key<T>(transforms_);\r
266 \r
267                         BOOST_FOREACH(auto& t, transforms)                      \r
268                                  t.second = tweened_transform<T>(t.second.fetch(), T(), mix_duration, tween);                   \r
269                         set_transform(T(), mix_duration, tween);\r
270                 });\r
271         }\r
272 \r
273         template<typename T>\r
274         void reset_transform(int index, int mix_duration, const std::wstring& tween)\r
275         {\r
276                 executor_.invoke([&]\r
277                 {               \r
278                         set_transform(T(), mix_duration, tween);\r
279                 });\r
280         }\r
281         \r
282         std::wstring print() const\r
283         {\r
284                 return L"frame_mixer_device";\r
285         }\r
286 };\r
287         \r
288 frame_mixer_device::frame_mixer_device(const core::video_format_desc& format_desc) : impl_(new implementation(format_desc)){}\r
289 frame_mixer_device::frame_mixer_device(frame_mixer_device&& other) : impl_(std::move(other.impl_)){}\r
290 boost::signals2::connection frame_mixer_device::connect(const output_t::slot_type& subscriber){return impl_->connect(subscriber);}\r
291 void frame_mixer_device::send(const std::map<int, safe_ptr<core::basic_frame>>& frames){impl_->send(frames);}\r
292 const core::video_format_desc& frame_mixer_device::get_video_format_desc() const { return impl_->format_desc_; }\r
293 safe_ptr<core::write_frame> frame_mixer_device::create_frame(void* tag, const core::pixel_format_desc& desc){ return impl_->create_frame(tag, desc); }          \r
294 safe_ptr<core::write_frame> frame_mixer_device::create_frame(void* tag, size_t width, size_t height, core::pixel_format::type pix_fmt)\r
295 {\r
296         // Create bgra frame\r
297         core::pixel_format_desc desc;\r
298         desc.pix_fmt = pix_fmt;\r
299         desc.planes.push_back( core::pixel_format_desc::plane(width, height, 4));\r
300         return create_frame(tag, desc);\r
301 }\r
302                         \r
303 safe_ptr<core::write_frame> frame_mixer_device::create_frame(void* tag, core::pixel_format::type pix_fmt)\r
304 {\r
305         // Create bgra frame with output resolution\r
306         core::pixel_format_desc desc;\r
307         desc.pix_fmt = pix_fmt;\r
308         desc.planes.push_back( core::pixel_format_desc::plane(get_video_format_desc().width, get_video_format_desc().height, 4));\r
309         return create_frame(tag, desc);\r
310 }\r
311 void frame_mixer_device::set_image_transform(const core::image_transform& transform, int mix_duration, const std::wstring& tween){impl_->set_transform<core::image_transform>(transform, mix_duration, tween);}\r
312 void frame_mixer_device::set_image_transform(int index, const core::image_transform& transform, int mix_duration, const std::wstring& tween){impl_->set_transform<core::image_transform>(index, transform, mix_duration, tween);}\r
313 void frame_mixer_device::set_audio_transform(const core::audio_transform& transform, int mix_duration, const std::wstring& tween){impl_->set_transform<core::audio_transform>(transform, mix_duration, tween);}\r
314 void frame_mixer_device::set_audio_transform(int index, const core::audio_transform& transform, int mix_duration, const std::wstring& tween){impl_->set_transform<core::audio_transform>(index, transform, mix_duration, tween);}\r
315 void frame_mixer_device::apply_image_transform(const std::function<core::image_transform(core::image_transform)>& transform, int mix_duration, const std::wstring& tween){impl_->apply_transform<core::image_transform>(transform, mix_duration, tween);}\r
316 void frame_mixer_device::apply_image_transform(int index, const std::function<core::image_transform(core::image_transform)>& transform, int mix_duration, const std::wstring& tween){impl_->apply_transform<core::image_transform>(index, transform, mix_duration, tween);}\r
317 void frame_mixer_device::apply_audio_transform(const std::function<core::audio_transform(core::audio_transform)>& transform, int mix_duration, const std::wstring& tween){impl_->apply_transform<core::audio_transform>(transform, mix_duration, tween);}\r
318 void frame_mixer_device::apply_audio_transform(int index, const std::function<core::audio_transform(core::audio_transform)>& transform, int mix_duration, const std::wstring& tween){impl_->apply_transform<core::audio_transform>(index, transform, mix_duration, tween);}\r
319 void frame_mixer_device::reset_image_transform(int mix_duration, const std::wstring& tween){impl_->reset_transform<core::image_transform>(mix_duration, tween);}\r
320 void frame_mixer_device::reset_image_transform(int index, int mix_duration, const std::wstring& tween){impl_->reset_transform<core::image_transform>(index, mix_duration, tween);}\r
321 void frame_mixer_device::reset_audio_transform(int mix_duration, const std::wstring& tween){impl_->reset_transform<core::audio_transform>(mix_duration, tween);}\r
322 void frame_mixer_device::reset_audio_transform(int index, int mix_duration, const std::wstring& tween){impl_->reset_transform<core::audio_transform>(index, mix_duration, tween);}\r
323 \r
324 }}