]> git.sesse.net Git - casparcg/blob - core/mixer/frame_mixer_device.cpp
2.0.0.2: Fixed a bug in com_context where the old context was destroyed after the...
[casparcg] / core / mixer / frame_mixer_device.cpp
1 /*\r
2 * copyright (c) 2010 Sveriges Television AB <info@casparcg.com>\r
3 *\r
4 *  This file is part of CasparCG.\r
5 *\r
6 *    CasparCG is free software: you can redistribute it and/or modify\r
7 *    it under the terms of the GNU General Public License as published by\r
8 *    the Free Software Foundation, either version 3 of the License, or\r
9 *    (at your option) any later version.\r
10 *\r
11 *    CasparCG is distributed in the hope that it will be useful,\r
12 *    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
14 *    GNU General Public License for more details.\r
15 \r
16 *    You should have received a copy of the GNU General Public License\r
17 *    along with CasparCG.  If not, see <http://www.gnu.org/licenses/>.\r
18 *\r
19 */\r
20 #include "../StdAfx.h"\r
21 \r
22 #include "frame_mixer_device.h"\r
23 \r
24 #include "read_frame.h"\r
25 #include "write_frame.h"\r
26 \r
27 #include "audio/audio_mixer.h"\r
28 #include "image/image_mixer.h"\r
29 \r
30 #include <common/exception/exceptions.h>\r
31 #include <common/concurrency/executor.h>\r
32 #include <common/diagnostics/graph.h>\r
33 #include <common/utility/tweener.h>\r
34 \r
35 \r
36 #include <core/mixer/read_frame.h>\r
37 #include <core/mixer/write_frame.h>\r
38 #include <core/producer/frame/basic_frame.h>\r
39 #include <core/producer/frame/frame_factory.h>\r
40 #include <core/producer/frame/pixel_format.h>\r
41 #include <core/producer/frame/audio_transform.h>\r
42 #include <core/producer/frame/image_transform.h>\r
43 \r
44 #include <core/video_format.h>\r
45 \r
46 #include <boost/fusion/container/map.hpp>\r
47 #include <boost/fusion/include/at_key.hpp>\r
48 #include <boost/timer.hpp>\r
49 \r
50 #include <unordered_map>\r
51 \r
52 namespace caspar { namespace core {\r
53                 \r
54 template<typename T>\r
55 class tweened_transform\r
56 {\r
57         T source_;\r
58         T dest_;\r
59         int duration_;\r
60         int time_;\r
61         tweener_t tweener_;\r
62 public: \r
63         tweened_transform()\r
64                 : duration_(0)\r
65                 , time_(0)\r
66                 , tweener_(get_tweener(L"linear")){}\r
67         tweened_transform(const T& source, const T& dest, int duration, const std::wstring& tween = L"linear")\r
68                 : source_(source)\r
69                 , dest_(dest)\r
70                 , duration_(duration)\r
71                 , time_(0)\r
72                 , tweener_(get_tweener(tween)){}\r
73         \r
74         T fetch()\r
75         {\r
76                 return time_ == duration_ ? dest_ : tween(static_cast<double>(time_), source_, dest_, static_cast<double>(duration_), tweener_);\r
77         }\r
78 \r
79         T fetch_and_tick(int num)\r
80         {                                               \r
81                 time_ = std::min(time_+num, duration_);\r
82                 return fetch();\r
83         }\r
84 };\r
85 \r
86 struct frame_mixer_device::implementation : boost::noncopyable\r
87 {               \r
88         const core::video_format_desc format_desc_;\r
89 \r
90         safe_ptr<diagnostics::graph> diag_;\r
91         boost::timer frame_timer_;\r
92         boost::timer tick_timer_;\r
93 \r
94         audio_mixer     audio_mixer_;\r
95         image_mixer image_mixer_;\r
96 \r
97         std::function<void(const safe_ptr<const core::read_frame>&)> output_;\r
98         \r
99         typedef std::unordered_map<int, tweened_transform<core::image_transform>> image_transforms;\r
100         typedef std::unordered_map<int, tweened_transform<core::audio_transform>> audio_transforms;\r
101 \r
102         boost::fusion::map<boost::fusion::pair<core::image_transform, image_transforms>,\r
103                                         boost::fusion::pair<core::audio_transform, audio_transforms>> transforms_;\r
104         \r
105         boost::fusion::map<boost::fusion::pair<core::image_transform, tweened_transform<core::image_transform>>,\r
106                                         boost::fusion::pair<core::audio_transform, tweened_transform<core::audio_transform>>> root_transforms_;\r
107 \r
108         executor executor_;\r
109 public:\r
110         implementation(const core::video_format_desc& format_desc, const std::function<void(const safe_ptr<const core::read_frame>&)>& output) \r
111                 : format_desc_(format_desc)\r
112                 , diag_(diagnostics::create_graph(narrow(print())))\r
113                 , image_mixer_(format_desc)\r
114                 , output_(output)\r
115                 , executor_(L"frame_mixer_device")\r
116         {\r
117                 diag_->add_guide("frame-time", 0.5f);   \r
118                 diag_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
119                 diag_->set_color("tick-time", diagnostics::color(0.1f, 0.7f, 0.8f));\r
120                 diag_->set_color("input-buffer", diagnostics::color(1.0f, 1.0f, 0.0f)); \r
121                 executor_.set_capacity(1);      \r
122                 executor_.begin_invoke([]\r
123                 {\r
124                         SetThreadPriority(GetCurrentThread(), ABOVE_NORMAL_PRIORITY_CLASS);\r
125                 });\r
126 \r
127                 CASPAR_LOG(info) << print() << L" Successfully initialized.";   \r
128         }\r
129         \r
130         boost::unique_future<safe_ptr<const host_buffer>> mix_image(std::map<int, safe_ptr<core::basic_frame>> frames)\r
131         {               \r
132                 auto& root_image_transform = boost::fusion::at_key<core::image_transform>(root_transforms_);\r
133                 auto& image_transforms = boost::fusion::at_key<core::image_transform>(transforms_);\r
134                 \r
135                 BOOST_FOREACH(auto& frame, frames)\r
136                 {\r
137                         image_mixer_.begin_layer();\r
138                         \r
139                         if(format_desc_.mode != core::video_mode::progressive)\r
140                         {\r
141                                 auto frame1 = make_safe<core::basic_frame>(frame.second);\r
142                                 auto frame2 = make_safe<core::basic_frame>(frame.second);\r
143                                 \r
144                                 frame1->get_image_transform() = root_image_transform.fetch_and_tick(1)*image_transforms[frame.first].fetch_and_tick(1);\r
145                                 frame2->get_image_transform() = root_image_transform.fetch_and_tick(1)*image_transforms[frame.first].fetch_and_tick(1);\r
146 \r
147                                 if(frame1->get_image_transform() != frame2->get_image_transform())\r
148                                         core::basic_frame::interlace(frame1, frame2, format_desc_.mode)->accept(image_mixer_);\r
149                                 else\r
150                                         frame2->accept(image_mixer_);\r
151                         }\r
152                         else\r
153                         {\r
154                                 auto frame1 = make_safe<core::basic_frame>(frame.second);\r
155                                 frame1->get_image_transform() = root_image_transform.fetch_and_tick(1)*image_transforms[frame.first].fetch_and_tick(1);\r
156                                 frame1->accept(image_mixer_);\r
157                         }\r
158 \r
159                         image_mixer_.end_layer();\r
160                 }\r
161 \r
162                 return image_mixer_.render();\r
163         }\r
164 \r
165         std::vector<short> mix_audio(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
166         {\r
167                 auto& root_audio_transform = boost::fusion::at_key<core::audio_transform>(root_transforms_);\r
168                 auto& audio_transforms = boost::fusion::at_key<core::audio_transform>(transforms_);\r
169 \r
170                 audio_mixer_.begin_pass();\r
171                 BOOST_FOREACH(auto& frame, frames)\r
172                 {\r
173                         int num = format_desc_.mode == core::video_mode::progressive ? 1 : 2;\r
174 \r
175                         auto frame1 = make_safe<core::basic_frame>(frame.second);\r
176                         frame1->get_audio_transform() = root_audio_transform.fetch_and_tick(num)*audio_transforms[frame.first].fetch_and_tick(num);\r
177                         frame1->accept(audio_mixer_);\r
178                 }\r
179                 return audio_mixer_.end_pass();\r
180         }\r
181                 \r
182         void send(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
183         {                       \r
184                 executor_.invoke([=]\r
185                 {               \r
186                         diag_->set_value("input-buffer", static_cast<float>(executor_.size())/static_cast<float>(executor_.capacity()));        \r
187                         frame_timer_.restart();\r
188 \r
189                         auto image = mix_image(frames);\r
190                         auto audio = mix_audio(frames);\r
191 \r
192                         diag_->update_value("frame-time", static_cast<float>(frame_timer_.elapsed()*format_desc_.fps*0.5));\r
193 \r
194                         output_(make_safe<const read_frame>(std::move(image), std::move(audio)));\r
195 \r
196                         diag_->update_value("tick-time", static_cast<float>(tick_timer_.elapsed()*format_desc_.fps*0.5));\r
197                         tick_timer_.restart();\r
198                 });\r
199                 diag_->set_value("input-buffer", static_cast<float>(executor_.size())/static_cast<float>(executor_.capacity()));\r
200         }\r
201                 \r
202         safe_ptr<core::write_frame> create_frame(void* tag, const core::pixel_format_desc& desc)\r
203         {               \r
204                 return image_mixer_.create_frame(tag, desc);\r
205         }\r
206                         \r
207         template<typename T>    \r
208         void set_transform(const T& transform, int mix_duration, const std::wstring& tween)\r
209         {\r
210                 executor_.invoke([&]\r
211                 {\r
212                         auto& root = boost::fusion::at_key<T>(root_transforms_);\r
213 \r
214                         auto src = root.fetch();\r
215                         auto dst = transform;\r
216                         root = tweened_transform<T>(src, dst, mix_duration, tween);\r
217                 });\r
218         }\r
219                 \r
220         template<typename T>\r
221         void set_transform(int index, const T& transform, int mix_duration, const std::wstring& tween)\r
222         {\r
223                 executor_.invoke([&]\r
224                 {\r
225                         auto& transforms = boost::fusion::at_key<T>(transforms_);\r
226 \r
227                         auto src = transforms[index].fetch();\r
228                         auto dst = transform;\r
229                         transforms[index] = tweened_transform<T>(src, dst, mix_duration, tween);\r
230                 });\r
231         }\r
232                 \r
233         template<typename T>\r
234         void apply_transform(const std::function<T(const T&)>& transform, int mix_duration, const std::wstring& tween)\r
235         {\r
236                 return executor_.invoke([&]\r
237                 {\r
238                         auto& root = boost::fusion::at_key<T>(root_transforms_);\r
239 \r
240                         auto src = root.fetch();\r
241                         auto dst = transform(src);\r
242                         root = tweened_transform<T>(src, dst, mix_duration, tween);\r
243                 });\r
244         }\r
245                 \r
246         template<typename T>\r
247         void apply_transform(int index, const std::function<T(T)>& transform, int mix_duration, const std::wstring& tween)\r
248         {\r
249                 executor_.invoke([&]\r
250                 {\r
251                         auto& transforms = boost::fusion::at_key<T>(transforms_);\r
252 \r
253                         auto src = transforms[index].fetch();\r
254                         auto dst = transform(src);\r
255                         transforms[index] = tweened_transform<T>(src, dst, mix_duration, tween);\r
256                 });\r
257         }\r
258 \r
259         template<typename T>\r
260         void reset_transform(int mix_duration, const std::wstring& tween)\r
261         {\r
262                 executor_.invoke([&]\r
263                 {\r
264                         auto& transforms = boost::fusion::at_key<T>(transforms_);\r
265 \r
266                         BOOST_FOREACH(auto& t, transforms)                      \r
267                                  t.second = tweened_transform<T>(t.second.fetch(), T(), mix_duration, tween);                   \r
268                         set_transform(T(), mix_duration, tween);\r
269                 });\r
270         }\r
271 \r
272         template<typename T>\r
273         void reset_transform(int index, int mix_duration, const std::wstring& tween)\r
274         {\r
275                 executor_.invoke([&]\r
276                 {               \r
277                         set_transform(T(), mix_duration, tween);\r
278                 });\r
279         }\r
280         \r
281         std::wstring print() const\r
282         {\r
283                 return L"frame_mixer_device";\r
284         }\r
285 };\r
286         \r
287 frame_mixer_device::frame_mixer_device(const core::video_format_desc& format_desc, const std::function<void(const safe_ptr<const core::read_frame>&)>& output) : impl_(new implementation(format_desc, output)){}\r
288 frame_mixer_device::frame_mixer_device(frame_mixer_device&& other) : impl_(std::move(other.impl_)){}\r
289 void frame_mixer_device::send(const std::map<int, safe_ptr<core::basic_frame>>& frames){impl_->send(frames);}\r
290 const core::video_format_desc& frame_mixer_device::get_video_format_desc() const { return impl_->format_desc_; }\r
291 safe_ptr<core::write_frame> frame_mixer_device::create_frame(void* tag, const core::pixel_format_desc& desc){ return impl_->create_frame(tag, desc); }          \r
292 safe_ptr<core::write_frame> frame_mixer_device::create_frame(void* tag, size_t width, size_t height, core::pixel_format::type pix_fmt)\r
293 {\r
294         // Create bgra frame\r
295         core::pixel_format_desc desc;\r
296         desc.pix_fmt = pix_fmt;\r
297         desc.planes.push_back( core::pixel_format_desc::plane(width, height, 4));\r
298         return create_frame(tag, desc);\r
299 }\r
300                         \r
301 safe_ptr<core::write_frame> frame_mixer_device::create_frame(void* tag, core::pixel_format::type pix_fmt)\r
302 {\r
303         // Create bgra frame with output resolution\r
304         core::pixel_format_desc desc;\r
305         desc.pix_fmt = pix_fmt;\r
306         desc.planes.push_back( core::pixel_format_desc::plane(get_video_format_desc().width, get_video_format_desc().height, 4));\r
307         return create_frame(tag, desc);\r
308 }\r
309 void frame_mixer_device::set_image_transform(const core::image_transform& transform, int mix_duration, const std::wstring& tween){impl_->set_transform<core::image_transform>(transform, mix_duration, tween);}\r
310 void frame_mixer_device::set_image_transform(int index, const core::image_transform& transform, int mix_duration, const std::wstring& tween){impl_->set_transform<core::image_transform>(index, transform, mix_duration, tween);}\r
311 void frame_mixer_device::set_audio_transform(const core::audio_transform& transform, int mix_duration, const std::wstring& tween){impl_->set_transform<core::audio_transform>(transform, mix_duration, tween);}\r
312 void frame_mixer_device::set_audio_transform(int index, const core::audio_transform& transform, int mix_duration, const std::wstring& tween){impl_->set_transform<core::audio_transform>(index, transform, mix_duration, tween);}\r
313 void frame_mixer_device::apply_image_transform(const std::function<core::image_transform(core::image_transform)>& transform, int mix_duration, const std::wstring& tween){impl_->apply_transform<core::image_transform>(transform, mix_duration, tween);}\r
314 void frame_mixer_device::apply_image_transform(int index, const std::function<core::image_transform(core::image_transform)>& transform, int mix_duration, const std::wstring& tween){impl_->apply_transform<core::image_transform>(index, transform, mix_duration, tween);}\r
315 void frame_mixer_device::apply_audio_transform(const std::function<core::audio_transform(core::audio_transform)>& transform, int mix_duration, const std::wstring& tween){impl_->apply_transform<core::audio_transform>(transform, mix_duration, tween);}\r
316 void frame_mixer_device::apply_audio_transform(int index, const std::function<core::audio_transform(core::audio_transform)>& transform, int mix_duration, const std::wstring& tween){impl_->apply_transform<core::audio_transform>(index, transform, mix_duration, tween);}\r
317 void frame_mixer_device::reset_image_transform(int mix_duration, const std::wstring& tween){impl_->reset_transform<core::image_transform>(mix_duration, tween);}\r
318 void frame_mixer_device::reset_image_transform(int index, int mix_duration, const std::wstring& tween){impl_->reset_transform<core::image_transform>(index, mix_duration, tween);}\r
319 void frame_mixer_device::reset_audio_transform(int mix_duration, const std::wstring& tween){impl_->reset_transform<core::audio_transform>(mix_duration, tween);}\r
320 void frame_mixer_device::reset_audio_transform(int index, int mix_duration, const std::wstring& tween){impl_->reset_transform<core::audio_transform>(index, mix_duration, tween);}\r
321 \r
322 }}