]> git.sesse.net Git - casparcg/blob - core/mixer/audio/audio_mixer.cpp
2.0.0.2: Increased mixer pipeline depth to take into consideration host to device...
[casparcg] / core / mixer / audio / audio_mixer.cpp
1 /*\r
2 * copyright (c) 2010 Sveriges Television AB <info@casparcg.com>\r
3 *\r
4 *  This file is part of CasparCG.\r
5 *\r
6 *    CasparCG is free software: you can redistribute it and/or modify\r
7 *    it under the terms of the GNU General Public License as published by\r
8 *    the Free Software Foundation, either version 3 of the License, or\r
9 *    (at your option) any later version.\r
10 *\r
11 *    CasparCG is distributed in the hope that it will be useful,\r
12 *    but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
14 *    GNU General Public License for more details.\r
15 \r
16 *    You should have received a copy of the GNU General Public License\r
17 *    along with CasparCG.  If not, see <http://www.gnu.org/licenses/>.\r
18 *\r
19 */\r
20 #include "../../stdafx.h"\r
21 \r
22 #include "audio_mixer.h"\r
23 \r
24 #include "../gpu/gpu_write_frame.h"\r
25 \r
26 #include <core/producer/frame/audio_transform.h>\r
27 \r
28 namespace caspar { namespace mixer {\r
29         \r
30 struct audio_mixer::implementation\r
31 {\r
32         std::deque<std::vector<short>> audio_data_;\r
33         std::stack<core::audio_transform> transform_stack_;\r
34 \r
35         std::map<int, core::audio_transform> prev_audio_transforms_;\r
36         std::map<int, core::audio_transform> next_audio_transforms_;\r
37 \r
38 public:\r
39         implementation()\r
40         {\r
41                 transform_stack_.push(core::audio_transform());\r
42 \r
43                 // 2 frames delay\r
44                 audio_data_.push_back(std::vector<short>());\r
45                 audio_data_.push_back(std::vector<short>());\r
46         }\r
47         \r
48         void begin(const core::basic_frame& frame)\r
49         {\r
50                 transform_stack_.push(transform_stack_.top()*frame.get_audio_transform());\r
51         }\r
52 \r
53         void visit(const core::write_frame& frame)\r
54         {\r
55                 if(!transform_stack_.top().get_has_audio())\r
56                         return;\r
57 \r
58                 auto& audio_data = frame.audio_data();\r
59                 auto tag = frame.tag(); // Get the identifier for the audio-stream.\r
60 \r
61                 if(audio_data_.back().empty())\r
62                         audio_data_.back().resize(audio_data.size(), 0);\r
63                 \r
64                 auto next = transform_stack_.top();\r
65                 auto prev = next;\r
66 \r
67                 auto it = prev_audio_transforms_.find(tag);\r
68                 if(it != prev_audio_transforms_.end())\r
69                         prev = it->second;\r
70                                 \r
71                 next_audio_transforms_[tag] = next; // Store all active tags, inactive tags will be removed in end_pass.\r
72                 \r
73                 auto next_gain = next.get_gain();\r
74                 auto prev_gain = prev.get_gain();\r
75                 \r
76                 if(next_gain < 0.001 && prev_gain < 0.001)\r
77                         return;\r
78 \r
79                 tbb::parallel_for\r
80                 (\r
81                         tbb::blocked_range<size_t>(0, audio_data.size()),\r
82                         [&](const tbb::blocked_range<size_t>& r)\r
83                         {\r
84                                 for(size_t n = r.begin(); n < r.end(); ++n)\r
85                                 {\r
86                                         double alpha = static_cast<double>(n)/static_cast<double>(audio_data_.back().size());\r
87                                         double sample_gain = prev_gain * (1.0 - alpha) + next_gain * alpha;\r
88                                         int sample = static_cast<int>(audio_data[n]);\r
89                                         sample = (static_cast<int>(sample_gain*static_cast<double>(1<<15))*sample)>>15;\r
90                                         audio_data_.back()[n] = static_cast<short>((static_cast<int>(audio_data_.back()[n]) + sample) & 0xFFFF);\r
91                                 }\r
92                         }\r
93                 );\r
94         }\r
95 \r
96 \r
97         void begin(const core::audio_transform& transform)\r
98         {\r
99                 transform_stack_.push(transform_stack_.top()*transform);\r
100         }\r
101                 \r
102         void end()\r
103         {\r
104                 transform_stack_.pop();\r
105         }\r
106 \r
107         std::vector<short> begin_pass()\r
108         {\r
109                 auto result = std::move(audio_data_.front());\r
110                 audio_data_.pop_front();\r
111                 \r
112                 audio_data_.push_back(std::vector<short>());\r
113 \r
114                 return result;\r
115         }\r
116 \r
117         void end_pass()\r
118         {\r
119                 prev_audio_transforms_ = std::move(next_audio_transforms_);\r
120         }\r
121 };\r
122 \r
123 audio_mixer::audio_mixer() : impl_(new implementation()){}\r
124 void audio_mixer::begin(const core::basic_frame& frame){impl_->begin(frame);}\r
125 void audio_mixer::visit(core::write_frame& frame){impl_->visit(frame);}\r
126 void audio_mixer::end(){impl_->end();}\r
127 std::vector<short> audio_mixer::begin_pass(){return impl_->begin_pass();}       \r
128 void audio_mixer::end_pass(){impl_->end_pass();}\r
129 \r
130 }}