\r
#include <tbb/parallel_for.h>\r
\r
+#include <safeint.h>\r
+\r
#include <stack>\r
#include <deque>\r
\r
{\r
const void* tag;\r
frame_transform transform;\r
- std::vector<int16_t> audio_data;\r
+ std::vector<int32_t> audio_data;\r
};\r
\r
struct audio_mixer::implementation\r
audio_item item;\r
item.tag = frame.tag();\r
item.transform = transform_stack_.top();\r
- item.audio_data = std::vector<int16_t>(frame.audio_data().begin(), frame.audio_data().end());\r
+ item.audio_data = std::vector<int32_t>(frame.audio_data().begin(), frame.audio_data().end());\r
\r
items.push_back(item); \r
}\r
transform_stack_.pop();\r
}\r
\r
- std::vector<int16_t> mix()\r
+ std::vector<int32_t> mix()\r
{\r
- auto result = std::vector<int16_t>(format_desc_.audio_samples_per_frame);\r
+ auto result = std::vector<int32_t>(format_desc_.audio_samples_per_frame);\r
\r
std::map<const void*, core::frame_transform> next_frame_transforms;\r
\r
if(next.volume < 0.001 && prev.volume < 0.001)\r
continue;\r
\r
- static const int BASE = 1<<15;\r
+ static const int BASE = 1<<31;\r
\r
- const auto next_volume = static_cast<int>(next.volume*BASE);\r
- const auto prev_volume = static_cast<int>(prev.volume*BASE);\r
+ const auto next_volume = static_cast<int64_t>(next.volume*BASE);\r
+ const auto prev_volume = static_cast<int64_t>(prev.volume*BASE);\r
\r
const int n_samples = result.size();\r
\r
{\r
for(size_t n = r.begin(); n < r.end(); ++n)\r
{\r
- const int sample_volume = (prev_volume - (prev_volume * n)/n_samples) + (next_volume * n)/n_samples;\r
- const int sample = (static_cast<int>(item.audio_data[n])*sample_volume)/BASE;\r
- result[n] = static_cast<int16_t>((static_cast<int>(result[n]) + sample) & 0xFFFF);\r
+ const auto sample_volume = (prev_volume - (prev_volume * n)/n_samples) + (next_volume * n)/n_samples;\r
+ const auto sample = static_cast<int32_t>((static_cast<int64_t>(item.audio_data[n])*sample_volume)/BASE);\r
+ result[n] = result[n] + sample;\r
}\r
}\r
);\r
void audio_mixer::begin(core::basic_frame& frame){impl_->begin(frame);}\r
void audio_mixer::visit(core::write_frame& frame){impl_->visit(frame);}\r
void audio_mixer::end(){impl_->end();}\r
-std::vector<int16_t> audio_mixer::mix(){return impl_->mix();}\r
+std::vector<int32_t> audio_mixer::mix(){return impl_->mix();}\r
audio_mixer& audio_mixer::operator=(audio_mixer&& other)\r
{\r
impl_ = std::move(other.impl_);\r
virtual void visit(core::write_frame& frame);\r
virtual void end();\r
\r
- std::vector<int16_t> mix();\r
+ std::vector<int32_t> mix();\r
\r
audio_mixer& operator=(audio_mixer&& other);\r
private:\r
std::unordered_map<int, tweened_transform<core::frame_transform>> transforms_; \r
std::unordered_map<int, blend_mode::type> blend_modes_;\r
\r
- std::queue<std::pair<boost::unique_future<safe_ptr<host_buffer>>, std::vector<int16_t>>> buffer_;\r
+ std::queue<std::pair<boost::unique_future<safe_ptr<host_buffer>>, std::vector<int32_t>>> buffer_;\r
\r
const size_t buffer_size_;\r
\r
size_t size_;\r
safe_ptr<host_buffer> image_data_;\r
tbb::mutex mutex_;\r
- std::vector<int16_t> audio_data_;\r
+ std::vector<int32_t> audio_data_;\r
\r
public:\r
- implementation(ogl_device& ogl, size_t size, safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data) \r
+ implementation(ogl_device& ogl, size_t size, safe_ptr<host_buffer>&& image_data, std::vector<int32_t>&& audio_data) \r
: ogl_(ogl)\r
, size_(size)\r
, image_data_(std::move(image_data))\r
auto ptr = static_cast<const uint8_t*>(image_data_->data());\r
return boost::iterator_range<const uint8_t*>(ptr, ptr + image_data_->size());\r
}\r
- const boost::iterator_range<const int16_t*> audio_data()\r
+ const boost::iterator_range<const int32_t*> audio_data()\r
{\r
- return boost::iterator_range<const int16_t*>(audio_data_.data(), audio_data_.data() + audio_data_.size());\r
+ return boost::iterator_range<const int32_t*>(audio_data_.data(), audio_data_.data() + audio_data_.size());\r
}\r
};\r
\r
-read_frame::read_frame(ogl_device& ogl, size_t size, safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data) \r
+read_frame::read_frame(ogl_device& ogl, size_t size, safe_ptr<host_buffer>&& image_data, std::vector<int32_t>&& audio_data) \r
: impl_(new implementation(ogl, size, std::move(image_data), std::move(audio_data))){}\r
read_frame::read_frame(){}\r
const boost::iterator_range<const uint8_t*> read_frame::image_data()\r
return impl_ ? impl_->image_data() : boost::iterator_range<const uint8_t*>();\r
}\r
\r
-const boost::iterator_range<const int16_t*> read_frame::audio_data()\r
+const boost::iterator_range<const int32_t*> read_frame::audio_data()\r
{\r
- return impl_ ? impl_->audio_data() : boost::iterator_range<const int16_t*>();\r
+ return impl_ ? impl_->audio_data() : boost::iterator_range<const int32_t*>();\r
}\r
\r
size_t read_frame::image_size() const{return impl_ ? impl_->size_ : 0;}\r
{\r
public:\r
read_frame();\r
- read_frame(ogl_device& ogl, size_t size, safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data);\r
+ read_frame(ogl_device& ogl, size_t size, safe_ptr<host_buffer>&& image_data, std::vector<int32_t>&& audio_data);\r
\r
virtual const boost::iterator_range<const uint8_t*> image_data();\r
- virtual const boost::iterator_range<const int16_t*> audio_data();\r
+ virtual const boost::iterator_range<const int32_t*> audio_data();\r
\r
virtual size_t image_size() const;\r
\r
ogl_device* ogl_;\r
std::vector<std::shared_ptr<host_buffer>> buffers_;\r
std::vector<safe_ptr<device_buffer>> textures_;\r
- std::vector<int16_t> audio_data_;\r
+ std::vector<int32_t> audio_data_;\r
const core::pixel_format_desc desc_;\r
const void* tag_;\r
core::field_mode::type mode_;\r
void write_frame::swap(write_frame& other){impl_.swap(other.impl_);}\r
\r
boost::iterator_range<uint8_t*> write_frame::image_data(size_t index){return impl_->image_data(index);}\r
-std::vector<int16_t>& write_frame::audio_data() { return impl_->audio_data_; }\r
+std::vector<int32_t>& write_frame::audio_data() { return impl_->audio_data_; }\r
const boost::iterator_range<const uint8_t*> write_frame::image_data(size_t index) const\r
{\r
return boost::iterator_range<const uint8_t*>(impl_->image_data(index).begin(), impl_->image_data(index).end());\r
}\r
-const boost::iterator_range<const int16_t*> write_frame::audio_data() const\r
+const boost::iterator_range<const int32_t*> write_frame::audio_data() const\r
{\r
- return boost::iterator_range<const int16_t*>(impl_->audio_data_.data(), impl_->audio_data_.data() + impl_->audio_data_.size());\r
+ return boost::iterator_range<const int32_t*>(impl_->audio_data_.data(), impl_->audio_data_.data() + impl_->audio_data_.size());\r
}\r
const void* write_frame::tag() const {return impl_->tag_;}\r
const core::pixel_format_desc& write_frame::get_pixel_format_desc() const{return impl_->desc_;}\r
boost::iterator_range<uint8_t*> image_data(size_t plane_index = 0); \r
const boost::iterator_range<const uint8_t*> image_data(size_t plane_index = 0) const;\r
\r
- std::vector<int16_t>& audio_data();\r
- const boost::iterator_range<const int16_t*> audio_data() const;\r
+ std::vector<int32_t>& audio_data();\r
+ const boost::iterator_range<const int32_t*> audio_data() const;\r
\r
void commit(uint32_t plane_index);\r
void commit();\r
(m == field_mode::progressive ? 1 : 2),\\r
((w)*(h)*4),\\r
(name),\\r
- (2),\\r
(48000),\\r
(2),\\r
(static_cast<size_t>(48000.0*2.0/((double)scale/(double)duration)+0.99))\\r
size_t size; // output frame size in bytes \r
std::wstring name; // name of output format\r
\r
- size_t audio_bytes_per_sample;\r
size_t audio_sample_rate;\r
size_t audio_channels;\r
size_t audio_samples_per_frame;\r
\r
void schedule_next_video(const safe_ptr<core::read_frame>& frame)\r
{\r
- static std::vector<int16_t> silence(MAX_HANC_BUFFER_SIZE, 0);\r
+ static std::vector<int32_t> silence(MAX_HANC_BUFFER_SIZE, 0);\r
\r
executor_.begin_invoke([=]\r
{\r
\r
if(embedded_audio_)\r
{ \r
- auto frame_audio_data = frame->audio_data().empty() ? silence.data() : const_cast<int16_t*>(frame->audio_data().begin());\r
+ auto frame_audio_data = frame->audio_data().empty() ? silence.data() : const_cast<int32_t*>(frame->audio_data().begin());\r
+ \r
+ std::vector<int16_t> frame_audio_data16(audio_samples);\r
+ for(size_t n = 0; n < frame_audio_data16.size(); ++n) \r
+ frame_audio_data16[n] = (frame_audio_data[n] >> 16) & 0xffff; \r
\r
- encode_hanc(reinterpret_cast<BLUE_UINT32*>(reserved_frames_.front()->hanc_data()), frame_audio_data, audio_samples, audio_nchannels);\r
+ encode_hanc(reinterpret_cast<BLUE_UINT32*>(reserved_frames_.front()->hanc_data()), frame_audio_data16.data(), audio_samples, audio_nchannels);\r
\r
blue_->system_buffer_write_async(const_cast<uint8_t*>(reserved_frames_.front()->image_data()), \r
reserved_frames_.front()->image_size(), \r
\r
size_t preroll_count_;\r
\r
- boost::circular_buffer<std::vector<int16_t>> audio_container_;\r
+ boost::circular_buffer<std::vector<int32_t>> audio_container_;\r
\r
tbb::concurrent_bounded_queue<std::shared_ptr<core::read_frame>> video_frame_buffer_;\r
tbb::concurrent_bounded_queue<std::shared_ptr<core::read_frame>> audio_frame_buffer_;\r
\r
void enable_audio()\r
{\r
- if(FAILED(output_->EnableAudioOutput(bmdAudioSampleRate48kHz, bmdAudioSampleType16bitInteger, 2, bmdAudioOutputStreamTimestamped)))\r
+ if(FAILED(output_->EnableAudioOutput(bmdAudioSampleRate48kHz, bmdAudioSampleType32bitInteger, 2, bmdAudioOutputStreamTimestamped)))\r
BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(narrow(print()) + " Could not enable audio output."));\r
\r
if(FAILED(output_->SetAudioCallback(this)))\r
{\r
const int sample_frame_count = frame->audio_data().size()/format_desc_.audio_channels;\r
\r
- audio_container_.push_back(std::vector<int16_t>(frame->audio_data().begin(), frame->audio_data().end()));\r
+ audio_container_.push_back(std::vector<int32_t>(frame->audio_data().begin(), frame->audio_data().end()));\r
\r
if(FAILED(output_->ScheduleAudioSamples(audio_container_.back().data(), sample_frame_count, (audio_scheduled_++) * sample_frame_count, format_desc_.audio_sample_rate, nullptr)))\r
CASPAR_LOG(error) << print() << L" Failed to schedule audio.";\r
{\r
auto sample_frame_count = audio->GetSampleFrameCount();\r
auto audio_data = reinterpret_cast<short*>(bytes);\r
- muxer_.push(std::make_shared<std::vector<int16_t>>(audio_data, audio_data + sample_frame_count*2));\r
+ muxer_.push(std::make_shared<std::vector<int32_t>>(audio_data, audio_data + sample_frame_count*2));\r
}\r
else\r
- muxer_.push(std::make_shared<std::vector<int16_t>>(frame_factory_->get_video_format_desc().audio_samples_per_frame, 0));\r
+ muxer_.push(std::make_shared<std::vector<int32_t>>(frame_factory_->get_video_format_desc().audio_samples_per_frame, 0));\r
\r
muxer_.commit();\r
\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
</ClCompile>\r
+ <ClCompile Include="producer\audio\audio_resampler.cpp">\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
+ </ClCompile>\r
<ClCompile Include="producer\ffmpeg_producer.cpp">\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../StdAfx.h</PrecompiledHeaderFile>\r
<ClInclude Include="ffmpeg.h" />\r
<ClInclude Include="ffmpeg_error.h" />\r
<ClInclude Include="producer\audio\audio_decoder.h" />\r
+ <ClInclude Include="producer\audio\audio_resampler.h" />\r
<ClInclude Include="producer\ffmpeg_producer.h" />\r
<ClInclude Include="producer\filter\filter.h" />\r
<ClInclude Include="producer\filter\parallel_yadif.h" />\r
<ClCompile Include="producer\format\flv.cpp">\r
<Filter>source\producer\format</Filter>\r
</ClCompile>\r
+ <ClCompile Include="producer\audio\audio_resampler.cpp">\r
+ <Filter>source\producer\audio</Filter>\r
+ </ClCompile>\r
</ItemGroup>\r
<ItemGroup>\r
<ClInclude Include="producer\ffmpeg_producer.h">\r
<ClInclude Include="producer\format\flv.h">\r
<Filter>source\producer\format</Filter>\r
</ClInclude>\r
+ <ClInclude Include="producer\audio\audio_resampler.h">\r
+ <Filter>source\producer\audio</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
\r
#include "audio_decoder.h"\r
\r
+#include "audio_resampler.h"\r
+\r
#include "../../ffmpeg_error.h"\r
\r
#include <core/video_format.h>\r
std::shared_ptr<AVCodecContext> codec_context_; \r
const core::video_format_desc format_desc_;\r
int index_;\r
- std::shared_ptr<ReSampleContext> resampler_;\r
+ std::unique_ptr<audio_resampler> resampler_;\r
\r
std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> buffer1_;\r
- std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> buffer2_;\r
\r
std::queue<std::shared_ptr<AVPacket>> packets_;\r
\r
\r
buffer1_.resize(AVCODEC_MAX_AUDIO_FRAME_SIZE*2);\r
\r
- if(codec_context_->sample_rate != static_cast<int>(format_desc_.audio_sample_rate) || \r
- codec_context_->channels != static_cast<int>(format_desc_.audio_channels) ||\r
- codec_context_->sample_fmt != AV_SAMPLE_FMT_S16)\r
- { \r
- auto resampler = av_audio_resample_init(format_desc_.audio_channels, codec_context_->channels,\r
- format_desc_.audio_sample_rate, codec_context_->sample_rate,\r
- AV_SAMPLE_FMT_S16, codec_context_->sample_fmt,\r
- 16, 10, 0, 0.8);\r
-\r
- buffer2_.resize(AVCODEC_MAX_AUDIO_FRAME_SIZE*2);\r
-\r
- CASPAR_LOG(warning) << L"Invalid audio format. Resampling." <<\r
- L" sample_rate:" << static_cast<int>(codec_context_->sample_rate) <<\r
- L" audio_channels:" << static_cast<int>(codec_context_->channels) <<\r
- L" sample_fmt:" << static_cast<int>(codec_context_->sample_fmt);\r
-\r
- if(resampler)\r
- resampler_.reset(resampler, audio_resample_close);\r
- else\r
- codec_context_ = nullptr;\r
- } \r
+ resampler_.reset(new audio_resampler(format_desc_.audio_channels, codec_context_->channels,\r
+ format_desc_.audio_sample_rate, codec_context_->sample_rate,\r
+ AV_SAMPLE_FMT_S32, codec_context_->sample_fmt)); \r
}\r
catch(...)\r
{\r
packets_.push(packet);\r
} \r
\r
- std::vector<std::shared_ptr<std::vector<int16_t>>> poll()\r
+ std::vector<std::shared_ptr<std::vector<int32_t>>> poll()\r
{\r
- std::vector<std::shared_ptr<std::vector<int16_t>>> result;\r
+ std::vector<std::shared_ptr<std::vector<int32_t>>> result;\r
\r
if(packets_.empty())\r
return result;\r
return result;\r
}\r
\r
- std::vector<std::shared_ptr<std::vector<int16_t>>> empty_poll()\r
+ std::vector<std::shared_ptr<std::vector<int32_t>>> empty_poll()\r
{\r
auto packet = packets_.front();\r
packets_.pop();\r
if(!packet) \r
return boost::assign::list_of(nullptr);\r
\r
- return boost::assign::list_of(std::make_shared<std::vector<int16_t>>(format_desc_.audio_samples_per_frame, 0)); \r
+ return boost::assign::list_of(std::make_shared<std::vector<int32_t>>(format_desc_.audio_samples_per_frame, 0)); \r
}\r
\r
- std::shared_ptr<std::vector<int16_t>> decode(AVPacket& pkt)\r
+ std::shared_ptr<std::vector<int32_t>> decode(AVPacket& pkt)\r
{ \r
+ buffer1_.resize(AVCODEC_MAX_AUDIO_FRAME_SIZE*2);\r
int written_bytes = buffer1_.size() - FF_INPUT_BUFFER_PADDING_SIZE;\r
-\r
+ \r
int ret = THROW_ON_ERROR2(avcodec_decode_audio3(codec_context_.get(), reinterpret_cast<int16_t*>(buffer1_.data()), &written_bytes, &pkt), "[audio_decoder]");\r
\r
// There might be several frames in one packet.\r
pkt.size -= ret;\r
pkt.data += ret;\r
\r
- if(resampler_)\r
- {\r
- auto ret = audio_resample(resampler_.get(),\r
- reinterpret_cast<short*>(buffer2_.data()), \r
- reinterpret_cast<short*>(buffer1_.data()), \r
- written_bytes / (av_get_bytes_per_sample(codec_context_->sample_fmt) * codec_context_->channels)); \r
- written_bytes = ret * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * format_desc_.audio_channels;\r
- std::swap(buffer1_, buffer2_);\r
- }\r
+ buffer1_.resize(written_bytes);\r
\r
- const auto n_samples = written_bytes / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);\r
- const auto samples = reinterpret_cast<int16_t*>(buffer1_.data());\r
+ buffer1_ = resampler_->resample(std::move(buffer1_));\r
+ \r
+ const auto n_samples = buffer1_.size() / av_get_bytes_per_sample(AV_SAMPLE_FMT_S32);\r
+ const auto samples = reinterpret_cast<int32_t*>(buffer1_.data());\r
\r
- return std::make_shared<std::vector<int16_t>>(samples, samples + n_samples);\r
+ return std::make_shared<std::vector<int32_t>>(samples, samples + n_samples);\r
}\r
\r
bool ready() const\r
audio_decoder::audio_decoder(const safe_ptr<AVFormatContext>& context, const core::video_format_desc& format_desc) : impl_(new implementation(context, format_desc)){}\r
void audio_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}\r
bool audio_decoder::ready() const{return impl_->ready();}\r
-std::vector<std::shared_ptr<std::vector<int16_t>>> audio_decoder::poll(){return impl_->poll();}\r
+std::vector<std::shared_ptr<std::vector<int32_t>>> audio_decoder::poll(){return impl_->poll();}\r
int64_t audio_decoder::nb_frames() const{return impl_->nb_frames_;}\r
}
\ No newline at end of file
\r
void push(const std::shared_ptr<AVPacket>& packet);\r
bool ready() const;\r
- std::vector<std::shared_ptr<std::vector<int16_t>>> poll();\r
+ std::vector<std::shared_ptr<std::vector<int32_t>>> poll();\r
\r
int64_t nb_frames() const;\r
\r
--- /dev/null
+#include "../../StdAfx.h"\r
+\r
+#include "audio_resampler.h"\r
+\r
+#include <common/exception/exceptions.h>\r
+\r
+#if defined(_MSC_VER)\r
+#pragma warning (push)\r
+#pragma warning (disable : 4244)\r
+#endif\r
+extern "C" \r
+{\r
+ #include <libavcodec/avcodec.h>\r
+}\r
+#if defined(_MSC_VER)\r
+#pragma warning (pop)\r
+#endif\r
+\r
+\r
+namespace caspar {\r
+\r
+struct audio_resampler::implementation\r
+{ \r
+ std::shared_ptr<ReSampleContext> resampler_;\r
+ \r
+ std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> copy_buffer_;\r
+ std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> buffer2_;\r
+\r
+ const size_t output_channels_;\r
+ const AVSampleFormat output_sample_format_;\r
+\r
+ const size_t input_channels_;\r
+ const AVSampleFormat input_sample_format_;\r
+\r
+ implementation(size_t output_channels, size_t input_channels, size_t output_sample_rate, size_t input_sample_rate, AVSampleFormat output_sample_format, AVSampleFormat input_sample_format)\r
+ : output_channels_(output_channels)\r
+ , output_sample_format_(output_sample_format)\r
+ , input_channels_(input_channels)\r
+ , input_sample_format_(input_sample_format)\r
+ {\r
+ if(input_channels != output_channels || \r
+ input_sample_rate != output_sample_rate ||\r
+ input_sample_format != output_sample_format)\r
+ { \r
+ auto resampler = av_audio_resample_init(output_channels, input_channels,\r
+ output_sample_rate, input_sample_rate,\r
+ output_sample_format, input_sample_format,\r
+ 16, 10, 0, 0.8);\r
+\r
+ buffer2_.resize(AVCODEC_MAX_AUDIO_FRAME_SIZE*2);\r
+\r
+ CASPAR_LOG(warning) << L"Resampling." <<\r
+ L" sample_rate:" << input_channels <<\r
+ L" audio_channels:" << input_channels <<\r
+ L" sample_fmt:" << input_sample_format;\r
+\r
+ if(resampler)\r
+ resampler_.reset(resampler, audio_resample_close);\r
+ else\r
+ BOOST_THROW_EXCEPTION(caspar_exception());\r
+ } \r
+ }\r
+\r
+ std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> resample(std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>>&& data)\r
+ {\r
+ if(resampler_)\r
+ {\r
+ buffer2_.resize(AVCODEC_MAX_AUDIO_FRAME_SIZE*2);\r
+ auto ret = audio_resample(resampler_.get(),\r
+ reinterpret_cast<short*>(buffer2_.data()), \r
+ reinterpret_cast<short*>(data.data()), \r
+ data.size() / (av_get_bytes_per_sample(input_sample_format_) * input_channels_)); \r
+ buffer2_.resize(ret * av_get_bytes_per_sample(output_sample_format_) * output_channels_);\r
+ std::swap(data, buffer2_);\r
+ }\r
+\r
+ return std::move(data);\r
+ }\r
+};\r
+\r
+\r
+audio_resampler::audio_resampler(size_t output_channels, size_t input_channels, size_t output_sample_rate, size_t input_sample_rate, AVSampleFormat output_sample_format, AVSampleFormat input_sample_format)\r
+ : impl_(new implementation(output_channels, input_channels, output_sample_rate, input_sample_rate, output_sample_format, input_sample_format)){}\r
+std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> audio_resampler::resample(std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>>&& data){return impl_->resample(std::move(data));}\r
+\r
+}
\ No newline at end of file
--- /dev/null
+#pragma once\r
+\r
+#include <memory>\r
+\r
+#include <libavutil/samplefmt.h>\r
+\r
+namespace caspar {\r
+\r
+class audio_resampler\r
+{\r
+public:\r
+ audio_resampler(size_t output_channels, size_t input_channels, \r
+ size_t output_sample_rate, size_t input_sample_rate, \r
+ AVSampleFormat output_sample_format, AVSampleFormat input_sample_format);\r
+ \r
+ std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>> resample(std::vector<int8_t, tbb::cache_aligned_allocator<int8_t>>&& data);\r
+private:\r
+ struct implementation;\r
+ std::shared_ptr<implementation> impl_;\r
+};\r
+\r
+}
\ No newline at end of file
struct frame_muxer::implementation : boost::noncopyable\r
{ \r
std::deque<std::queue<safe_ptr<write_frame>>> video_streams_;\r
- std::deque<std::vector<int16_t>> audio_streams_;\r
+ std::deque<std::vector<int32_t>> audio_streams_;\r
std::deque<safe_ptr<basic_frame>> frame_buffer_;\r
display_mode::type display_mode_;\r
const double in_fps_;\r
BOOST_THROW_EXCEPTION(invalid_operation() << source_info("frame_muxer") << msg_info("video-stream overflow. This can be caused by incorrect frame-rate. Check clip meta-data."));\r
}\r
\r
- void push(const std::shared_ptr<std::vector<int16_t>>& audio_samples)\r
+ void push(const std::shared_ptr<std::vector<int32_t>>& audio_samples)\r
{\r
if(!audio_samples) \r
{\r
CASPAR_LOG(debug) << L"audio-chunk-count: " << audio_sample_count_/format_desc_.audio_samples_per_frame;\r
- audio_streams_.push_back(std::vector<int16_t>());\r
+ audio_streams_.push_back(std::vector<int32_t>());\r
audio_sample_count_ = 0;\r
return;\r
}\r
return frame;\r
}\r
\r
- std::vector<int16_t> pop_audio()\r
+ std::vector<int32_t> pop_audio()\r
{\r
CASPAR_VERIFY(audio_streams_.front().size() >= format_desc_.audio_samples_per_frame);\r
\r
auto begin = audio_streams_.front().begin();\r
auto end = begin + format_desc_.audio_samples_per_frame;\r
\r
- auto samples = std::vector<int16_t>(begin, end);\r
+ auto samples = std::vector<int32_t>(begin, end);\r
audio_streams_.front().erase(begin, end);\r
\r
return samples;\r
frame_muxer::frame_muxer(double in_fps, const safe_ptr<core::frame_factory>& frame_factory)\r
: impl_(new implementation(in_fps, frame_factory)){}\r
void frame_muxer::push(const std::shared_ptr<AVFrame>& video_frame, int hints){impl_->push(video_frame, hints);}\r
-void frame_muxer::push(const std::shared_ptr<std::vector<int16_t>>& audio_samples){return impl_->push(audio_samples);}\r
+void frame_muxer::push(const std::shared_ptr<std::vector<int32_t>>& audio_samples){return impl_->push(audio_samples);}\r
void frame_muxer::commit(){impl_->commit();}\r
safe_ptr<basic_frame> frame_muxer::pop(){return impl_->pop();}\r
size_t frame_muxer::size() const {return impl_->size();}\r
frame_muxer(double in_fps, const safe_ptr<core::frame_factory>& frame_factory);\r
\r
void push(const std::shared_ptr<AVFrame>& video_frame, int hints = 0);\r
- void push(const std::shared_ptr<std::vector<int16_t>>& audio_samples);\r
+ void push(const std::shared_ptr<std::vector<int32_t>>& audio_samples);\r
\r
void commit();\r
\r
safe_ptr<diagnostics::graph> graph_;\r
boost::timer perf_timer_;\r
\r
- tbb::concurrent_bounded_queue<std::vector<short>> input_;\r
+ tbb::concurrent_bounded_queue<std::shared_ptr<std::vector<short>>> input_;\r
boost::circular_buffer<std::vector<short>> container_;\r
tbb::atomic<bool> is_running_;\r
\r
~oal_consumer()\r
{\r
is_running_ = false;\r
- input_.try_push(std::vector<short>());\r
- input_.try_push(std::vector<short>());\r
+ input_.try_push(std::make_shared<std::vector<short>>());\r
+ input_.try_push(std::make_shared<std::vector<short>>());\r
Stop();\r
CASPAR_LOG(info) << print() << L" Shutting down."; \r
}\r
{ \r
if(preroll_count_ < input_.capacity())\r
{\r
- while(input_.try_push(std::vector<int16_t>(format_desc_.audio_samples_per_frame, 0)))\r
+ while(input_.try_push(std::make_shared<std::vector<int16_t>>(format_desc_.audio_samples_per_frame, 0)))\r
++preroll_count_;\r
Play(); \r
}\r
\r
- input_.push(std::vector<int16_t>(frame->audio_data().begin(), frame->audio_data().end())); \r
+ std::vector<int16_t> audio16(frame->audio_data().size());\r
+ for(size_t n = 0; n < audio16.size(); ++n) \r
+ audio16[n] = (frame->audio_data()[n] >> 16) & 0xffff; \r
+\r
+ input_.push(std::make_shared<std::vector<int16_t>>(std::move(audio16)));\r
\r
return true;\r
}\r
\r
virtual bool OnGetData(sf::SoundStream::Chunk& data)\r
{ \r
- std::vector<short> audio_data; \r
+ std::shared_ptr<std::vector<short>> audio_data; \r
input_.pop(audio_data);\r
\r
- container_.push_back(std::move(audio_data));\r
+ container_.push_back(std::move(*audio_data));\r
data.Samples = container_.back().data();\r
data.NbSamples = container_.back().size(); \r
\r