]> git.sesse.net Git - casparcg/blob - core/mixer/audio/audio_mixer.cpp
2.0.0.2: image_mixer: Reduced latency.
[casparcg] / core / mixer / audio / audio_mixer.cpp
1 /*\r
2 * copyright (c) 2010 Sveriges Television AB <info@casparcg.com>\r
3 *\r
4 *  This file is part of CasparCG.\r
5 *\r
6 *    CasparCG is free software: you can redistribute it and/or modify\r
7 *    it under the terms of the GNU General Public License as published by\r
8 *    the Free Software Foundation, either version 3 of the License, or\r
9 *    (at your option) any later version.\r
10 *\r
11 *    CasparCG is distributed in the hope that it will be useful,\r
12 *    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
14 *    GNU General Public License for more details.\r
15 \r
16 *    You should have received a copy of the GNU General Public License\r
17 *    along with CasparCG.  If not, see <http://www.gnu.org/licenses/>.\r
18 *\r
19 */\r
20 #include "../../stdafx.h"\r
21 \r
22 #include "audio_mixer.h"\r
23 \r
24 #include <core/mixer/write_frame.h>\r
25 #include <core/producer/frame/audio_transform.h>\r
26 \r
27 namespace caspar { namespace core {\r
28         \r
29 struct audio_mixer::implementation\r
30 {\r
31         std::deque<std::vector<short>> audio_data_;\r
32         std::stack<core::audio_transform> transform_stack_;\r
33 \r
34         std::map<int, core::audio_transform> prev_audio_transforms_;\r
35         std::map<int, core::audio_transform> next_audio_transforms_;\r
36 \r
37 public:\r
38         implementation()\r
39         {\r
40                 transform_stack_.push(core::audio_transform());\r
41 \r
42                 // frame delay\r
43                 audio_data_.push_back(std::vector<short>());\r
44         }\r
45         \r
46         void begin(const core::basic_frame& frame)\r
47         {\r
48                 transform_stack_.push(transform_stack_.top()*frame.get_audio_transform());\r
49         }\r
50 \r
51         void visit(const core::write_frame& frame)\r
52         {\r
53                 if(!transform_stack_.top().get_has_audio())\r
54                         return;\r
55 \r
56                 auto& audio_data = frame.audio_data();\r
57                 auto tag = frame.tag(); // Get the identifier for the audio-stream.\r
58 \r
59                 if(audio_data_.back().empty())\r
60                         audio_data_.back().resize(audio_data.size(), 0);\r
61                 \r
62                 auto next = transform_stack_.top();\r
63                 auto prev = next;\r
64 \r
65                 auto it = prev_audio_transforms_.find(tag);\r
66                 if(it != prev_audio_transforms_.end())\r
67                         prev = it->second;\r
68                                 \r
69                 next_audio_transforms_[tag] = next; // Store all active tags, inactive tags will be removed in end_pass.\r
70                 \r
71                 \r
72                 if(next.get_gain() < 0.001 && prev.get_gain() < 0.001)\r
73                         return;\r
74                 \r
75                 static const int BASE = 1<<15;\r
76 \r
77                 auto next_gain = static_cast<int>(next.get_gain()*BASE);\r
78                 auto prev_gain = static_cast<int>(prev.get_gain()*BASE);\r
79                 \r
80                 int n_samples = audio_data_.back().size();\r
81 \r
82                 tbb::parallel_for\r
83                 (\r
84                         tbb::blocked_range<size_t>(0, audio_data.size()),\r
85                         [&](const tbb::blocked_range<size_t>& r)\r
86                         {\r
87                                 for(size_t n = r.begin(); n < r.end(); ++n)\r
88                                 {\r
89                                         int sample_gain = (prev_gain - (prev_gain * n)/n_samples) + (next_gain * n)/n_samples;\r
90                                         \r
91                                         int sample = (static_cast<int>(audio_data[n])*sample_gain)/BASE;\r
92                                         \r
93                                         audio_data_.back()[n] = static_cast<short>((static_cast<int>(audio_data_.back()[n]) + sample) & 0xFFFF);\r
94                                 }\r
95                         }\r
96                 );\r
97         }\r
98 \r
99 \r
100         void begin(const core::audio_transform& transform)\r
101         {\r
102                 transform_stack_.push(transform_stack_.top()*transform);\r
103         }\r
104                 \r
105         void end()\r
106         {\r
107                 transform_stack_.pop();\r
108         }\r
109 \r
110         std::vector<short> begin_pass()\r
111         {\r
112                 auto result = std::move(audio_data_.front());\r
113                 audio_data_.pop_front();\r
114                 \r
115                 audio_data_.push_back(std::vector<short>());\r
116 \r
117                 return result;\r
118         }\r
119 \r
120         void end_pass()\r
121         {\r
122                 prev_audio_transforms_ = std::move(next_audio_transforms_);\r
123         }\r
124 };\r
125 \r
126 audio_mixer::audio_mixer() : impl_(new implementation()){}\r
127 void audio_mixer::begin(const core::basic_frame& frame){impl_->begin(frame);}\r
128 void audio_mixer::visit(core::write_frame& frame){impl_->visit(frame);}\r
129 void audio_mixer::end(){impl_->end();}\r
130 std::vector<short> audio_mixer::begin_pass(){return impl_->begin_pass();}       \r
131 void audio_mixer::end_pass(){impl_->end_pass();}\r
132 \r
133 }}