X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=core%2Fmixer%2Faudio%2Faudio_mixer.cpp;h=50ce413c4f707393c78215358209cdba00482129;hb=4339e2b3466b78ed27cbe88592eb2e247c5c13a4;hp=687812e219fa489acedec765626c04e950e49fb6;hpb=dfbde5d6d0fcff021c60c0240d94fece52481cf5;p=casparcg diff --git a/core/mixer/audio/audio_mixer.cpp b/core/mixer/audio/audio_mixer.cpp index 687812e21..50ce413c4 100644 --- a/core/mixer/audio/audio_mixer.cpp +++ b/core/mixer/audio/audio_mixer.cpp @@ -22,82 +22,65 @@ #include "audio_mixer.h" #include -#include +#include + +#include + +#include + +#include +#include namespace caspar { namespace core { + +struct audio_item +{ + const void* tag; + frame_transform transform; + audio_buffer audio_data; +}; struct audio_mixer::implementation { - std::deque> audio_data_; - std::stack transform_stack_; - - std::map prev_audio_transforms_; - std::map next_audio_transforms_; + std::stack transform_stack_; + std::map prev_frame_transforms_; + const core::video_format_desc format_desc_; + std::vector items; public: - implementation() + implementation(const core::video_format_desc& format_desc) + : format_desc_(format_desc) { - transform_stack_.push(core::audio_transform()); - - // frame delay - audio_data_.push_back(std::vector()); + transform_stack_.push(core::frame_transform()); } - void begin(const core::basic_frame& frame) + void begin(core::basic_frame& frame) { - transform_stack_.push(transform_stack_.top()*frame.get_audio_transform()); + transform_stack_.push(transform_stack_.top()*frame.get_frame_transform()); } - void visit(const core::write_frame& frame) + void visit(core::write_frame& frame) { - if(!transform_stack_.top().get_has_audio()) + // We only care about the last field. + if(format_desc_.field_mode == field_mode::upper && transform_stack_.top().field_mode == field_mode::upper) return; - auto& audio_data = frame.audio_data(); - auto tag = frame.tag(); // Get the identifier for the audio-stream. - - if(audio_data_.back().empty()) - audio_data_.back().resize(audio_data.size(), 0); - - auto next = transform_stack_.top(); - auto prev = next; + if(format_desc_.field_mode == field_mode::lower && transform_stack_.top().field_mode == field_mode::lower) + return; - auto it = prev_audio_transforms_.find(tag); - if(it != prev_audio_transforms_.end()) - prev = it->second; - - next_audio_transforms_[tag] = next; // Store all active tags, inactive tags will be removed in end_pass. - - - if(next.get_gain() < 0.001 && prev.get_gain() < 0.001) + // Skip empty audio. + if(transform_stack_.top().volume < 0.002 || frame.audio_data().empty()) return; - - static const int BASE = 1<<15; - auto next_gain = static_cast(next.get_gain()*BASE); - auto prev_gain = static_cast(prev.get_gain()*BASE); - - int n_samples = audio_data_.back().size(); - - tbb::parallel_for - ( - tbb::blocked_range(0, audio_data.size()), - [&](const tbb::blocked_range& r) - { - for(size_t n = r.begin(); n < r.end(); ++n) - { - int sample_gain = (prev_gain - (prev_gain * n)/n_samples) + (next_gain * n)/n_samples; - - int sample = (static_cast(audio_data[n])*sample_gain)/BASE; - - audio_data_.back()[n] = static_cast((static_cast(audio_data_.back()[n]) + sample) & 0xFFFF); - } - } - ); - } + audio_item item; + item.tag = frame.tag(); + item.transform = transform_stack_.top(); + item.audio_data = std::move(frame.audio_data()); + items.push_back(item); + } - void begin(const core::audio_transform& transform) + void begin(const core::frame_transform& transform) { transform_stack_.push(transform_stack_.top()*transform); } @@ -106,27 +89,103 @@ public: { transform_stack_.pop(); } + + audio_buffer mix() + { + auto intermediate = std::vector>(format_desc_.audio_samples_per_frame+128, 0.0f); - void begin_pass() - { - audio_data_.push_back(std::vector()); - } + std::map next_frame_transforms; - std::vector end_pass() - { - prev_audio_transforms_ = std::move(next_audio_transforms_); + BOOST_FOREACH(auto& item, items) + { + const auto next = item.transform; + auto prev = next; + + const auto it = prev_frame_transforms_.find(item.tag); + if(it != prev_frame_transforms_.end()) + prev = it->second; + + next_frame_transforms[item.tag] = next; // Store all active tags, inactive tags will be removed at the end. + + if(next.volume < 0.001 && prev.volume < 0.001) + continue; + + if(static_cast(item.audio_data.size()) != format_desc_.audio_samples_per_frame) + continue; + + CASPAR_ASSERT(format_desc_.audio_channels == 2); + CASPAR_ASSERT(format_desc_.audio_samples_per_frame % 4 == 0); + + const float prev_volume = static_cast(prev.volume); + const float next_volume = static_cast(next.volume); + const float delta = 1.0f/static_cast(format_desc_.audio_samples_per_frame/2); + + tbb::parallel_for + ( + tbb::blocked_range(0, format_desc_.audio_samples_per_frame/4), + [&](const tbb::blocked_range& r) + { + for(size_t n = r.begin(); n < r.end(); ++n) + { + const float alpha0 = (n*2) * delta; + const float volume0 = prev_volume * (1.0f - alpha0) + next_volume * alpha0; + const float volume1 = prev_volume * (1.0f - alpha0 + delta) + next_volume * (alpha0 + delta); + + auto sample_epi32 = _mm_load_si128(reinterpret_cast<__m128i*>(&item.audio_data[n*4])); + auto res_sample_ps = _mm_load_ps(&intermediate[n*4]); - auto result = std::move(audio_data_.front()); - audio_data_.pop_front(); - return result; + auto sample_ps = _mm_cvtepi32_ps(sample_epi32); + sample_ps = _mm_mul_ps(sample_ps, _mm_setr_ps(volume1, volume1, volume0, volume0)); + res_sample_ps = _mm_add_ps(sample_ps, res_sample_ps); + + _mm_store_ps(&intermediate[n*4], res_sample_ps); + } + } + ); + } + + auto result = audio_buffer(format_desc_.audio_samples_per_frame+128, 0); + + auto intermediate_128 = reinterpret_cast<__m128i*>(intermediate.data()); + auto result_128 = reinterpret_cast<__m128i*>(result.data()); + for(size_t n = 0; n < format_desc_.audio_samples_per_frame/32; ++n) + { + auto xmm0 = _mm_load_ps(reinterpret_cast(intermediate_128++)); + auto xmm1 = _mm_load_ps(reinterpret_cast(intermediate_128++)); + auto xmm2 = _mm_load_ps(reinterpret_cast(intermediate_128++)); + auto xmm3 = _mm_load_ps(reinterpret_cast(intermediate_128++)); + auto xmm4 = _mm_load_ps(reinterpret_cast(intermediate_128++)); + auto xmm5 = _mm_load_ps(reinterpret_cast(intermediate_128++)); + auto xmm6 = _mm_load_ps(reinterpret_cast(intermediate_128++)); + auto xmm7 = _mm_load_ps(reinterpret_cast(intermediate_128++)); + + _mm_stream_si128(result_128++, _mm_cvtps_epi32(xmm0)); + _mm_stream_si128(result_128++, _mm_cvtps_epi32(xmm1)); + _mm_stream_si128(result_128++, _mm_cvtps_epi32(xmm2)); + _mm_stream_si128(result_128++, _mm_cvtps_epi32(xmm3)); + _mm_stream_si128(result_128++, _mm_cvtps_epi32(xmm4)); + _mm_stream_si128(result_128++, _mm_cvtps_epi32(xmm5)); + _mm_stream_si128(result_128++, _mm_cvtps_epi32(xmm6)); + _mm_stream_si128(result_128++, _mm_cvtps_epi32(xmm7)); + } + + items.clear(); + prev_frame_transforms_ = std::move(next_frame_transforms); + + result.resize(format_desc_.audio_samples_per_frame); + return std::move(result); } }; -audio_mixer::audio_mixer() : impl_(new implementation()){} -void audio_mixer::begin(const core::basic_frame& frame){impl_->begin(frame);} +audio_mixer::audio_mixer(const core::video_format_desc& format_desc) : impl_(new implementation(format_desc)){} +void audio_mixer::begin(core::basic_frame& frame){impl_->begin(frame);} void audio_mixer::visit(core::write_frame& frame){impl_->visit(frame);} void audio_mixer::end(){impl_->end();} -void audio_mixer::begin_pass(){ impl_->begin_pass();} -std::vector audio_mixer::end_pass(){return impl_->end_pass();} +audio_buffer audio_mixer::mix(){return impl_->mix();} +audio_mixer& audio_mixer::operator=(audio_mixer&& other) +{ + impl_ = std::move(other.impl_); + return *this; +} }} \ No newline at end of file