* Reimplemented multichannel audio support from 2.0.7 but using ffmpeg's pan filter.
- Fixed bug in caspar::array where it assumed std::uint8_t instead of T.
- Started using caspar::array for audio as well, to allow for AVFrame to be the storage to avoid unnecessary copying when pan filtering audio.
* Made win32_exception a caspar_exception to enable full stack trace information when an access violation or similar occurs.
* FFMpeg Consumer now send more data via OSC to enable clients to indicate recording progress.
set(OPENAL_INCLUDE_PATH "${DEPENDENCIES_FOLDER}/openal/include")
set(BLUEFISH_INCLUDE_PATH "${DEPENDENCIES_FOLDER}/bluefish/include")
set(CEF_INCLUDE_PATH "${DEPENDENCIES_FOLDER}/cef/include")
+set(GTEST_INCLUDE_PATH "${DEPENDENCIES_FOLDER}/gtest/include")
if (MSVC)
set(PLATFORM_FOLDER_NAME "win32")
link_directories("${DEPENDENCIES_FOLDER}/bluefish/lib")
link_directories("${DEPENDENCIES_FOLDER}/zlib/lib")
link_directories("${DEPENDENCIES_FOLDER}/cef/lib/${PLATFORM_FOLDER_NAME}")
+link_directories("${DEPENDENCIES_FOLDER}/gtest/lib/${PLATFORM_FOLDER_NAME}")
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
add_subdirectory(protocol)
add_subdirectory(shell)
+add_subdirectory(unit-test)
return renderer_(std::move(items_), format_desc);
}
- core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc)
+ core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc, const core::audio_channel_layout& channel_layout)
{
std::vector<array<std::uint8_t>> buffers;
for (auto& plane : desc.planes)
auto buf = spl::make_shared<buffer>(plane.size);
buffers.push_back(array<std::uint8_t>(buf->data(), plane.size, true, buf));
}
- return core::mutable_frame(std::move(buffers), core::audio_buffer(), tag, desc);
+ return core::mutable_frame(std::move(buffers), core::mutable_audio_buffer(), tag, desc, channel_layout);
}
};
void image_mixer::visit(const core::const_frame& frame){impl_->visit(frame);}
void image_mixer::pop(){impl_->pop();}
std::future<array<const std::uint8_t>> image_mixer::operator()(const core::video_format_desc& format_desc, bool /* straighten_alpha */){return impl_->render(format_desc);}
-core::mutable_frame image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc) {return impl_->create_frame(tag, desc);}
+core::mutable_frame image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc, const core::audio_channel_layout& channel_layout) {return impl_->create_frame(tag, desc, channel_layout);}
}}}
std::future<array<const std::uint8_t>> operator()(const core::video_format_desc& format_desc, bool straighten_alpha) override;
- core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc) override;
+ core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc, const core::audio_channel_layout& channel_layout) override;
// Properties
return renderer_(std::move(layers_), format_desc, straighten_alpha);
}
- core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc) override
+ core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc, const core::audio_channel_layout& channel_layout) override
{
std::vector<array<std::uint8_t>> buffers;
for (auto& plane : desc.planes)
buffers.push_back(ogl_->create_array(plane.size));
- return core::mutable_frame(std::move(buffers), core::audio_buffer(), tag, desc);
+ return core::mutable_frame(std::move(buffers), core::mutable_audio_buffer(), tag, desc, channel_layout);
}
};
void image_mixer::visit(const core::const_frame& frame){impl_->visit(frame);}
void image_mixer::pop(){impl_->pop();}
std::future<array<const std::uint8_t>> image_mixer::operator()(const core::video_format_desc& format_desc, bool straighten_alpha){return impl_->render(format_desc, straighten_alpha);}
-core::mutable_frame image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc) {return impl_->create_frame(tag, desc);}
+core::mutable_frame image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc, const core::audio_channel_layout& channel_layout) {return impl_->create_frame(tag, desc, channel_layout);}
}}}
// Methods
std::future<array<const std::uint8_t>> operator()(const core::video_format_desc& format_desc, bool straighten_alpha) override;
- core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc) override;
+ core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc, const core::audio_channel_layout& channel_layout) override;
// core::image_mixer
template<typename T>
class array final
{
- array(const array<std::uint8_t>&);
- array& operator=(const array<std::uint8_t>&);
+ array(const array<T>&);
+ array& operator=(const array<T>&);
template<typename> friend class array;
public:
+ // Boost Range support
+
+ typedef T* iterator;
+ typedef const T* const_iterator;
+
// Static Members
// Constructors
template<typename T2>
- explicit array(std::uint8_t* ptr, std::size_t size, bool cacheable, T2&& storage)
+ explicit array(T* ptr, std::size_t size, bool cacheable, T2&& storage)
: ptr_(ptr)
, size_(size)
, cacheable_(cacheable)
T* begin() const {return ptr_;}
T* data() const {return ptr_;}
- T* end() const {return reinterpret_cast<T*>(reinterpret_cast<char*>(ptr_) + size_);}
+ T* end() const {return ptr_ + size_;}
std::size_t size() const {return size_;}
bool empty() const {return size() == 0;}
bool cacheable() const {return cacheable_;}
return boost::any_cast<T2>(storage_.get());
}
private:
- T* ptr_;
- std::size_t size_;
- bool cacheable_;
+ T* ptr_;
+ std::size_t size_;
+ bool cacheable_;
std::unique_ptr<boost::any> storage_;
};
{
public:
+ // Boost Range support
+
+ typedef const T* iterator;
+ typedef const T* const_iterator;
+
// Static Members
// Constructors
array() = default; // Needed by std::future
template<typename T2>
- explicit array(const std::uint8_t* ptr, std::size_t size, bool cacheable, T2&& storage)
+ explicit array(const T* ptr, std::size_t size, bool cacheable, T2&& storage)
: ptr_(ptr)
, size_(size)
, cacheable_(cacheable)
{
}
- explicit array(const std::uint8_t* ptr, std::size_t size, bool cacheable)
+ explicit array(const T* ptr, std::size_t size, bool cacheable)
: ptr_(ptr)
, size_(size)
, cacheable_(cacheable)
const T* begin() const {return ptr_;}
const T* data() const {return ptr_;}
- const T* end() const {return reinterpret_cast<const T*>(reinterpret_cast<const char*>(ptr_) + size_);}
+ const T* end() const {return ptr_ + size_;}
std::size_t size() const {return size_;}
bool empty() const {return size() == 0;}
bool cacheable() const {return cacheable_;}
#pragma once
#include "log.h"
+#include "except.h"
#ifdef _MSC_VER
#define _CASPAR_DBG_BREAK _CrtDbgBreak()
_CASPAR_DBG_BREAK;\
}}while(0);
+#define CASPAR_ENSURE(expr) do{if(!(expr)){ CASPAR_THROW_EXCEPTION(programming_error() << msg_info(std::string("Assertion Failed: ") + CASPAR_VERIFY_EXPR_STR(expr))); \
+ }}while(0);
+
#ifdef _DEBUG
#define CASPAR_ASSERT(expr) CASPAR_VERIFY(expr)
#else
struct caspar_exception : virtual boost::exception, virtual std::exception
{
caspar_exception(){}
- explicit caspar_exception(const char* msg) : msg_(msg) {}
const char* what() const throw() override
{
- return msg_;
+ return boost::diagnostic_information_what(*this);
}
-private:
- const char* msg_ = "";
};
struct io_error : virtual caspar_exception {};
diagnostics/osd_graph.cpp
diagnostics/subject_diagnostics.cpp
+ frame/audio_channel_layout.cpp
frame/draw_frame.cpp
frame/frame.cpp
frame/frame_transform.cpp
producer/color/color_producer.cpp
- producer/draw/freehand_producer.cpp
-
producer/media_info/in_memory_media_info_repository.cpp
producer/scene/const_producer.cpp
diagnostics/osd_graph.h
diagnostics/subject_diagnostics.h
+ frame/audio_channel_layout.h
frame/draw_frame.h
frame/frame.h
frame/frame_factory.h
producer/color/color_producer.h
- producer/draw/freehand_producer.h
-
producer/media_info/in_memory_media_info_repository.h
producer/media_info/media_info.h
producer/media_info/media_info_repository.h
source_group(sources\\help help/*)
source_group(sources\\interaction interaction/*)
source_group(sources\\mixer mixer/*)
-source_group(sources\\producer\\draw producer/draw/*)
source_group(sources\\producer\\media_info producer/media_info/*)
source_group(sources\\producer\\scene producer/scene/*)
source_group(sources\\producer\\text\\utils producer/text/utils/*)
#include <core/video_format.h>
#include <core/frame/frame.h>
+#include <core/frame/audio_channel_layout.h>
#include <boost/thread.hpp>
}).detach();
}
- std::future<bool> send(const_frame frame) override {return consumer_->send(std::move(frame));}
- virtual void initialize(const video_format_desc& format_desc, int channel_index) override {return consumer_->initialize(format_desc, channel_index);}
- std::wstring print() const override {return consumer_->print();}
- std::wstring name() const override {return consumer_->name();}
- boost::property_tree::wptree info() const override {return consumer_->info();}
- bool has_synchronization_clock() const override {return consumer_->has_synchronization_clock();}
- int buffer_depth() const override {return consumer_->buffer_depth();}
- int index() const override {return consumer_->index();}
- int64_t presentation_frame_age_millis() const override {return consumer_->presentation_frame_age_millis();}
- monitor::subject& monitor_output() override {return consumer_->monitor_output();}
+ std::future<bool> send(const_frame frame) override {return consumer_->send(std::move(frame));}
+ void initialize(const video_format_desc& format_desc, const audio_channel_layout& channel_layout, int channel_index) override {return consumer_->initialize(format_desc, channel_layout, channel_index);}
+ std::wstring print() const override {return consumer_->print();}
+ std::wstring name() const override {return consumer_->name();}
+ boost::property_tree::wptree info() const override {return consumer_->info();}
+ bool has_synchronization_clock() const override {return consumer_->has_synchronization_clock();}
+ int buffer_depth() const override {return consumer_->buffer_depth();}
+ int index() const override {return consumer_->index();}
+ int64_t presentation_frame_age_millis() const override {return consumer_->presentation_frame_age_millis();}
+ monitor::subject& monitor_output() override {return consumer_->monitor_output();}
};
class print_consumer_proxy : public frame_consumer
CASPAR_LOG(info) << str << L" Uninitialized.";
}
- std::future<bool> send(const_frame frame) override {return consumer_->send(std::move(frame));}
- virtual void initialize(const video_format_desc& format_desc, int channel_index) override {return consumer_->initialize(format_desc, channel_index);}
- std::wstring print() const override {return consumer_->print();}
- std::wstring name() const override {return consumer_->name();}
- boost::property_tree::wptree info() const override {return consumer_->info();}
- bool has_synchronization_clock() const override {return consumer_->has_synchronization_clock();}
- int buffer_depth() const override {return consumer_->buffer_depth();}
- int index() const override {return consumer_->index();}
- int64_t presentation_frame_age_millis() const override {return consumer_->presentation_frame_age_millis();}
- monitor::subject& monitor_output() override {return consumer_->monitor_output();}
+ std::future<bool> send(const_frame frame) override {return consumer_->send(std::move(frame));}
+ void initialize(const video_format_desc& format_desc, const audio_channel_layout& channel_layout, int channel_index) override { return consumer_->initialize(format_desc, channel_layout, channel_index); }
+ std::wstring print() const override {return consumer_->print();}
+ std::wstring name() const override {return consumer_->name();}
+ boost::property_tree::wptree info() const override {return consumer_->info();}
+ bool has_synchronization_clock() const override {return consumer_->has_synchronization_clock();}
+ int buffer_depth() const override {return consumer_->buffer_depth();}
+ int index() const override {return consumer_->index();}
+ int64_t presentation_frame_age_millis() const override {return consumer_->presentation_frame_age_millis();}
+ monitor::subject& monitor_output() override {return consumer_->monitor_output();}
};
class recover_consumer_proxy : public frame_consumer
std::shared_ptr<frame_consumer> consumer_;
int channel_index_ = -1;
video_format_desc format_desc_;
+ audio_channel_layout channel_layout_ = audio_channel_layout::invalid();
public:
recover_consumer_proxy(spl::shared_ptr<frame_consumer>&& consumer)
: consumer_(std::move(consumer))
{
}
- virtual std::future<bool> send(const_frame frame)
+ std::future<bool> send(const_frame frame) override
{
try
{
CASPAR_LOG_CURRENT_EXCEPTION();
try
{
- consumer_->initialize(format_desc_, channel_index_);
+ consumer_->initialize(format_desc_, channel_layout_, channel_index_);
return consumer_->send(frame);
}
catch(...)
}
}
- virtual void initialize(const video_format_desc& format_desc, int channel_index)
+ void initialize(const video_format_desc& format_desc, const audio_channel_layout& channel_layout, int channel_index) override
{
format_desc_ = format_desc;
+ channel_layout_ = channel_layout;
channel_index_ = channel_index;
- return consumer_->initialize(format_desc, channel_index);
+ return consumer_->initialize(format_desc, channel_layout, channel_index);
}
std::wstring print() const override {return consumer_->print();}
spl::shared_ptr<frame_consumer> consumer_;
std::vector<int> audio_cadence_;
video_format_desc format_desc_;
+ audio_channel_layout channel_layout_ = audio_channel_layout::invalid();
boost::circular_buffer<std::size_t> sync_buffer_;
public:
cadence_guard(const spl::shared_ptr<frame_consumer>& consumer)
{
}
- void initialize(const video_format_desc& format_desc, int channel_index) override
+ void initialize(const video_format_desc& format_desc, const audio_channel_layout& channel_layout, int channel_index) override
{
audio_cadence_ = format_desc.audio_cadence;
sync_buffer_ = boost::circular_buffer<std::size_t>(format_desc.audio_cadence.size());
format_desc_ = format_desc;
- consumer_->initialize(format_desc, channel_index);
+ channel_layout_ = channel_layout;
+ consumer_->initialize(format_desc, channel_layout, channel_index);
}
std::future<bool> send(const_frame frame) override
std::future<bool> result = make_ready_future(true);
- if(boost::range::equal(sync_buffer_, audio_cadence_) && audio_cadence_.front() * format_desc_.audio_channels == static_cast<int>(frame.audio_data().size()))
+ if(boost::range::equal(sync_buffer_, audio_cadence_) && audio_cadence_.front() * channel_layout_.num_channels == static_cast<int>(frame.audio_data().size()))
{
// Audio sent so far is in sync, now we can send the next chunk.
result = consumer_->send(frame);
else
CASPAR_LOG(trace) << print() << L" Syncing audio.";
- sync_buffer_.push_back(static_cast<int>(frame.audio_data().size() / format_desc_.audio_channels));
+ sync_buffer_.push_back(static_cast<int>(frame.audio_data().size() / channel_layout_.num_channels));
return std::move(result);
}
{
public:
std::future<bool> send(const_frame) override { return make_ready_future(false); }
- void initialize(const video_format_desc&, int) override{}
+ void initialize(const video_format_desc&, const audio_channel_layout&, int) override{}
std::wstring print() const override {return L"empty";}
std::wstring name() const override {return L"empty";}
bool has_synchronization_clock() const override {return false;}
int buffer_depth() const override {return 0;};
int index() const override {return -1;}
int64_t presentation_frame_age_millis() const override {return -1;}
- monitor::subject& monitor_output() override {static monitor::subject monitor_subject(""); return monitor_subject;}
+ monitor::subject& monitor_output() override {static monitor::subject monitor_subject(""); return monitor_subject;}
boost::property_tree::wptree info() const override
{
boost::property_tree::wptree info;
// Methods
virtual std::future<bool> send(const_frame frame) = 0;
- virtual void initialize(const video_format_desc& format_desc, int channel_index) = 0;
+ virtual void initialize(const video_format_desc& format_desc, const audio_channel_layout& channel_layout, int channel_index) = 0;
// monitor::observable
#include "../video_format.h"
#include "../frame/frame.h"
+#include "../frame/audio_channel_layout.h"
#include <common/assert.h>
#include <common/future.h>
spl::shared_ptr<monitor::subject> monitor_subject_ = spl::make_shared<monitor::subject>("/output");
const int channel_index_;
video_format_desc format_desc_;
- std::map<int, port> ports_;
+ audio_channel_layout channel_layout_;
+ std::map<int, port> ports_;
prec_timer sync_timer_;
boost::circular_buffer<const_frame> frames_;
std::map<int, int64_t> send_to_consumers_delays_;
executor executor_ { L"output " + boost::lexical_cast<std::wstring>(channel_index_) };
public:
- impl(spl::shared_ptr<diagnostics::graph> graph, const video_format_desc& format_desc, int channel_index)
+ impl(spl::shared_ptr<diagnostics::graph> graph, const video_format_desc& format_desc, const audio_channel_layout& channel_layout, int channel_index)
: graph_(std::move(graph))
, channel_index_(channel_index)
, format_desc_(format_desc)
+ , channel_layout_(channel_layout)
{
graph_->set_color("consume-time", diagnostics::color(1.0f, 0.4f, 0.0f, 0.8f));
}
{
remove(index);
- consumer->initialize(format_desc_, channel_index_);
+ consumer->initialize(format_desc_, channel_layout_, channel_index_);
executor_.begin_invoke([this, index, consumer]
{
remove(consumer->index());
}
- void set_video_format_desc(const core::video_format_desc& format_desc)
+ void change_channel_format(const core::video_format_desc& format_desc, const core::audio_channel_layout& channel_layout)
{
executor_.invoke([&]
{
- if(format_desc_ == format_desc)
+ if(format_desc_ == format_desc && channel_layout_ == channel_layout)
return;
auto it = ports_.begin();
{
try
{
- it->second.video_format_desc(format_desc);
+ it->second.change_channel_format(format_desc, channel_layout);
++it;
}
catch(...)
}
format_desc_ = format_desc;
+ channel_layout_ = channel_layout;
frames_.clear();
});
}
.any();
}
- void operator()(const_frame input_frame, const core::video_format_desc& format_desc)
+ void operator()(const_frame input_frame, const core::video_format_desc& format_desc, const core::audio_channel_layout& channel_layout)
{
caspar::timer frame_timer;
- set_video_format_desc(format_desc);
+ change_channel_format(format_desc, channel_layout);
executor_.invoke([=]
{
}
};
-output::output(spl::shared_ptr<diagnostics::graph> graph, const video_format_desc& format_desc, int channel_index) : impl_(new impl(std::move(graph), format_desc, channel_index)){}
+output::output(spl::shared_ptr<diagnostics::graph> graph, const video_format_desc& format_desc, const core::audio_channel_layout& channel_layout, int channel_index) : impl_(new impl(std::move(graph), format_desc, channel_layout, channel_index)){}
void output::add(int index, const spl::shared_ptr<frame_consumer>& consumer){impl_->add(index, consumer);}
void output::add(const spl::shared_ptr<frame_consumer>& consumer){impl_->add(consumer);}
void output::remove(int index){impl_->remove(index);}
void output::remove(const spl::shared_ptr<frame_consumer>& consumer){impl_->remove(consumer);}
std::future<boost::property_tree::wptree> output::info() const{return impl_->info();}
std::future<boost::property_tree::wptree> output::delay_info() const{ return impl_->delay_info(); }
-void output::operator()(const_frame frame, const video_format_desc& format_desc){ (*impl_)(std::move(frame), format_desc); }
+void output::operator()(const_frame frame, const video_format_desc& format_desc, const core::audio_channel_layout& channel_layout){ (*impl_)(std::move(frame), format_desc, channel_layout); }
monitor::subject& output::monitor_output() {return *impl_->monitor_subject_;}
}}
// Constructors
- explicit output(spl::shared_ptr<diagnostics::graph> graph, const video_format_desc& format_desc, int channel_index);
+ explicit output(spl::shared_ptr<caspar::diagnostics::graph> graph, const video_format_desc& format_desc, const core::audio_channel_layout& channel_layout, int channel_index);
// Methods
- void operator()(const_frame frame, const video_format_desc& format_desc);
+ void operator()(const_frame frame, const video_format_desc& format_desc, const core::audio_channel_layout& channel_layout);
void add(const spl::shared_ptr<frame_consumer>& consumer);
void add(int index, const spl::shared_ptr<frame_consumer>& consumer);
struct port::impl
{
int index_;
- spl::shared_ptr<monitor::subject> monitor_subject_ = spl::make_shared<monitor::subject>("/port" + boost::lexical_cast<std::string>(index_));
+ spl::shared_ptr<monitor::subject> monitor_subject_ = spl::make_shared<monitor::subject>("/port/" + boost::lexical_cast<std::string>(index_));
std::shared_ptr<frame_consumer> consumer_;
int channel_index_;
public:
consumer_->monitor_output().attach_parent(monitor_subject_);
}
- void video_format_desc(const core::video_format_desc& format_desc)
+ void change_channel_format(const core::video_format_desc& format_desc, const audio_channel_layout& channel_layout)
{
- consumer_->initialize(format_desc, channel_index_);
+ consumer_->initialize(format_desc, channel_layout, channel_index_);
}
std::future<bool> send(const_frame frame)
port& port::operator=(port&& other){impl_ = std::move(other.impl_); return *this;}
std::future<bool> port::send(const_frame frame){return impl_->send(std::move(frame));}
monitor::subject& port::monitor_output() {return *impl_->monitor_subject_;}
-void port::video_format_desc(const core::video_format_desc& format_desc){impl_->video_format_desc(format_desc);}
+void port::change_channel_format(const core::video_format_desc& format_desc, const audio_channel_layout& channel_layout){impl_->change_channel_format(format_desc, channel_layout);}
int port::buffer_depth() const{return impl_->buffer_depth();}
std::wstring port::print() const{ return impl_->print();}
bool port::has_synchronization_clock() const{return impl_->has_synchronization_clock();}
// Properties
- void video_format_desc(const video_format_desc& format_desc);
+ void change_channel_format(const video_format_desc& format_desc, const audio_channel_layout& channel_layout);
std::wstring print() const;
int buffer_depth() const;
bool has_synchronization_clock() const;
--- /dev/null
+/*
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Helge Norberg, helge.norberg@svt.se
+*/
+
+#include "../StdAfx.h"
+
+#include "audio_channel_layout.h"
+
+#include <boost/algorithm/string/split.hpp>
+#include <boost/algorithm/string/join.hpp>
+#include <boost/range/algorithm/equal.hpp>
+#include <boost/lexical_cast.hpp>
+#include <boost/thread/mutex.hpp>
+#include <boost/property_tree/ptree.hpp>
+
+#include <map>
+
+namespace caspar { namespace core {
+
+audio_channel_layout::audio_channel_layout()
+ : num_channels(0)
+{
+}
+
+audio_channel_layout::audio_channel_layout(int num_channels, std::wstring type_, const std::wstring& channel_order_)
+ : num_channels(num_channels)
+ , type(std::move(type_))
+{
+ if (num_channels < 1)
+ CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"num_channels cannot be less than 1"));
+
+ if (boost::contains(channel_order_, L"=") ||
+ boost::contains(channel_order_, L"<") ||
+ boost::contains(channel_order_, L"+") ||
+ boost::contains(channel_order_, L"*") ||
+ boost::contains(channel_order_, L"|"))
+ {
+ CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(
+ channel_order_ + L" contains illegal characters =<+*| reserved for mix config syntax"));
+ }
+
+ boost::to_upper(type);
+ boost::split(channel_order, channel_order_, boost::is_any_of(L" "), boost::algorithm::token_compress_on);
+
+ if (channel_order.size() == 1 && channel_order.front().empty())
+ channel_order.clear();
+
+ if (channel_order.size() > num_channels)
+ CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(
+ channel_order_ + L" contains more than " + boost::lexical_cast<std::wstring>(num_channels)));
+}
+
+std::vector<int> audio_channel_layout::indexes_of(const std::wstring& channel_name) const
+{
+ std::vector<int> result;
+ for (int i = 0; i < channel_order.size(); ++i)
+ if (channel_name == channel_order.at(i))
+ result.push_back(i);
+
+ return result;
+}
+
+std::wstring audio_channel_layout::print() const
+{
+ auto channels = boost::join(channel_order, L" ");
+
+ return L"[audio_channel_layout] num_channels=" + boost::lexical_cast<std::wstring>(num_channels) + L" type=" + type + L" channel_order=" + channels;
+}
+
+const audio_channel_layout& audio_channel_layout::invalid()
+{
+ static const audio_channel_layout instance;
+
+ return instance;
+}
+
+bool operator==(const audio_channel_layout& lhs, const audio_channel_layout& rhs)
+{
+ return lhs.num_channels == rhs.num_channels
+ && boost::equal(lhs.channel_order, rhs.channel_order)
+ && lhs.type == rhs.type;
+}
+
+bool operator!=(const audio_channel_layout& lhs, const audio_channel_layout& rhs)
+{
+ return !(lhs == rhs);
+}
+
+struct audio_channel_layout_repository::impl
+{
+ mutable boost::mutex mutex_;
+ std::map<std::wstring, audio_channel_layout> layouts_;
+};
+
+audio_channel_layout_repository::audio_channel_layout_repository()
+ : impl_(new impl)
+{
+}
+
+void audio_channel_layout_repository::register_layout(std::wstring name, audio_channel_layout layout)
+{
+ auto& self = *impl_;
+ boost::lock_guard<boost::mutex> lock(self.mutex_);
+
+ boost::to_upper(name);
+ self.layouts_.insert(std::make_pair(std::move(name), std::move(layout)));
+}
+
+void audio_channel_layout_repository::register_all_layouts(const boost::property_tree::wptree& layouts)
+{
+ auto& self = *impl_;
+ boost::lock_guard<boost::mutex> lock(self.mutex_);
+
+ for (auto& layout : layouts)
+ {
+ CASPAR_VERIFY(layout.first == L"channel-layout");
+
+ auto name = layout.second.get<std::wstring>(L"<xmlattr>.name");
+ auto type = layout.second.get<std::wstring>(L"<xmlattr>.type");
+ auto num_channels = layout.second.get<int>(L"<xmlattr>.num-channels");
+ auto channel_order = layout.second.get<std::wstring>(L"<xmlattr>.channel-order", L"");
+
+ boost::to_upper(name);
+ self.layouts_.insert(std::make_pair(
+ std::move(name),
+ audio_channel_layout(num_channels, std::move(type), channel_order)));
+ }
+}
+
+boost::optional<audio_channel_layout> audio_channel_layout_repository::get_layout(const std::wstring& name) const
+{
+ auto& self = *impl_;
+ boost::lock_guard<boost::mutex> lock(self.mutex_);
+
+ auto found = self.layouts_.find(boost::to_upper_copy(name));
+
+ if (found == self.layouts_.end())
+ return boost::none;
+
+ return found->second;
+}
+
+spl::shared_ptr<audio_channel_layout_repository> audio_channel_layout_repository::get_default()
+{
+ static spl::shared_ptr<audio_channel_layout_repository> instance;
+
+ return instance;
+}
+
+struct audio_mix_config_repository::impl
+{
+ mutable boost::mutex mutex_;
+ std::map<std::wstring, std::map<std::wstring, std::wstring>> from_to_configs_;
+};
+
+audio_mix_config_repository::audio_mix_config_repository()
+ : impl_(new impl)
+{
+}
+
+void audio_mix_config_repository::register_config(
+ const std::wstring& from_type,
+ const std::vector<std::wstring>& to_types,
+ const std::wstring& mix_config)
+{
+ auto& self = *impl_;
+ boost::lock_guard<boost::mutex> lock(self.mutex_);
+
+ for (auto& to_type : to_types)
+ self.from_to_configs_[boost::to_upper_copy(from_type)][boost::to_upper_copy(to_type)] = mix_config;
+}
+
+void audio_mix_config_repository::register_all_configs(const boost::property_tree::wptree& configs)
+{
+ auto& self = *impl_;
+ boost::lock_guard<boost::mutex> lock(self.mutex_);
+
+ for (auto& config : configs)
+ {
+ CASPAR_VERIFY(config.first == L"mix-config");
+
+ auto from_type = config.second.get<std::wstring>(L"<xmlattr>.from-type");
+ auto to_types_str = config.second.get<std::wstring>(L"<xmlattr>.to-types");
+ auto mix_config = config.second.get<std::wstring>(L"<xmlattr>.mix");
+
+ boost::to_upper(from_type);
+ std::vector<std::wstring> to_types;
+ boost::split(to_types, to_types_str, boost::is_any_of(L","), boost::algorithm::token_compress_off);
+
+ for (auto& to_type : to_types)
+ {
+ boost::trim(to_type);
+
+ if (to_type.empty())
+ continue;
+
+ boost::to_upper(to_type);
+ self.from_to_configs_[from_type][to_type] = mix_config;
+ }
+ }
+}
+
+boost::optional<std::wstring> audio_mix_config_repository::get_config(
+ const std::wstring& from_type,
+ const std::wstring& to_type) const
+{
+ auto& self = *impl_;
+ boost::lock_guard<boost::mutex> lock(self.mutex_);
+
+ auto from_found = self.from_to_configs_.find(boost::to_upper_copy(from_type));
+
+ if (from_found == self.from_to_configs_.end())
+ return boost::none;
+
+ auto to_found = from_found->second.find(boost::to_upper_copy(to_type));
+
+ if (to_found == from_found->second.end())
+ return boost::none;
+
+ return to_found->second;
+}
+
+spl::shared_ptr<audio_mix_config_repository> audio_mix_config_repository::get_default()
+{
+ static spl::shared_ptr<audio_mix_config_repository> instance;
+
+ return instance;
+}
+
+}}
--- /dev/null
+/*
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Helge Norberg, helge.norberg@svt.se
+*/
+
+#pragma once
+
+#include <common/memory.h>
+
+#include <core/mixer/audio/audio_mixer.h>
+
+#include <boost/noncopyable.hpp>
+#include <boost/optional.hpp>
+#include <boost/property_tree/ptree_fwd.hpp>
+
+#include <string>
+
+namespace caspar { namespace core {
+
+struct audio_channel_layout final
+{
+ int num_channels;
+ std::wstring type;
+ std::vector<std::wstring> channel_order;
+
+ audio_channel_layout(int num_channels, std::wstring type, const std::wstring& channel_order);
+ std::vector<int> indexes_of(const std::wstring& channel_name) const;
+ std::wstring print() const;
+ static const audio_channel_layout& invalid();
+private:
+ audio_channel_layout();
+};
+
+bool operator==(const audio_channel_layout& lhs, const audio_channel_layout& rhs);
+bool operator!=(const audio_channel_layout& lhs, const audio_channel_layout& rhs);
+
+class audio_channel_layout_repository : boost::noncopyable
+{
+public:
+ audio_channel_layout_repository();
+ void register_layout(std::wstring name, audio_channel_layout layout);
+ void register_all_layouts(const boost::property_tree::wptree& layouts);
+ boost::optional<audio_channel_layout> get_layout(const std::wstring& name) const;
+ static spl::shared_ptr<audio_channel_layout_repository> get_default();
+private:
+ struct impl;
+ spl::shared_ptr<impl> impl_;
+};
+
+class audio_mix_config_repository : boost::noncopyable
+{
+public:
+ audio_mix_config_repository();
+ void register_config(
+ const std::wstring& from_type,
+ const std::vector<std::wstring>& to_types,
+ const std::wstring& mix_config);
+ void register_all_configs(const boost::property_tree::wptree& configs);
+ boost::optional<std::wstring> get_config(const std::wstring& from_type, const std::wstring& to_type) const;
+ static spl::shared_ptr<audio_mix_config_repository> get_default();
+private:
+ struct impl;
+ spl::shared_ptr<impl> impl_;
+};
+
+// Implementation in ffmpeg module.
+class audio_channel_remapper : boost::noncopyable
+{
+public:
+ audio_channel_remapper(
+ audio_channel_layout input_layout,
+ audio_channel_layout output_layout,
+ spl::shared_ptr<audio_mix_config_repository> mix_repo = audio_mix_config_repository::get_default());
+
+ /**
+ * Perform downmix/upmix/rearranging of audio data if needed.
+ *
+ * @param input The input audio buffer.
+ *
+ * @return input if the input layout is the same as the output layout.
+ * otherwise the mixed buffer (valid until the next call).
+ */
+ audio_buffer mix_and_rearrange(audio_buffer input);
+private:
+ struct impl;
+ spl::shared_ptr<impl> impl_;
+};
+
+}}
#include <core/frame/frame_visitor.h>
#include <core/frame/pixel_format.h>
#include <core/frame/geometry.h>
+#include <core/frame/audio_channel_layout.h>
#include <cstdint>
#include <vector>
struct mutable_frame::impl : boost::noncopyable
{
std::vector<array<std::uint8_t>> buffers_;
- core::audio_buffer audio_data_;
+ core::mutable_audio_buffer audio_data_;
const core::pixel_format_desc desc_;
+ const core::audio_channel_layout channel_layout_;
const void* tag_;
core::frame_geometry geometry_ = frame_geometry::get_default();
caspar::timer since_created_timer_;
- impl(std::vector<array<std::uint8_t>> buffers, audio_buffer audio_buffer, const void* tag, const core::pixel_format_desc& desc)
+ impl(
+ std::vector<array<std::uint8_t>> buffers,
+ mutable_audio_buffer audio_data,
+ const void* tag,
+ const core::pixel_format_desc& desc,
+ const core::audio_channel_layout& channel_layout)
: buffers_(std::move(buffers))
- , audio_data_(std::move(audio_buffer))
+ , audio_data_(std::move(audio_data))
, desc_(desc)
+ , channel_layout_(channel_layout)
, tag_(tag)
{
for (auto& buffer : buffers_)
}
};
-mutable_frame::mutable_frame(std::vector<array<std::uint8_t>> image_buffers, audio_buffer audio_buffer, const void* tag, const core::pixel_format_desc& desc)
- : impl_(new impl(std::move(image_buffers), std::move(audio_buffer), tag, desc)){}
+mutable_frame::mutable_frame(
+ std::vector<array<std::uint8_t>> image_buffers,
+ mutable_audio_buffer audio_data,
+ const void* tag,
+ const core::pixel_format_desc& desc,
+ const core::audio_channel_layout& channel_layout)
+ : impl_(new impl(std::move(image_buffers), std::move(audio_data), tag, desc, channel_layout)){}
mutable_frame::~mutable_frame(){}
mutable_frame::mutable_frame(mutable_frame&& other) : impl_(std::move(other.impl_)){}
mutable_frame& mutable_frame::operator=(mutable_frame&& other)
}
void mutable_frame::swap(mutable_frame& other){impl_.swap(other.impl_);}
const core::pixel_format_desc& mutable_frame::pixel_format_desc() const{return impl_->desc_;}
+const core::audio_channel_layout& mutable_frame::audio_channel_layout() const { return impl_->channel_layout_; }
const array<std::uint8_t>& mutable_frame::image_data(std::size_t index) const{return impl_->buffers_.at(index);}
-const core::audio_buffer& mutable_frame::audio_data() const{return impl_->audio_data_;}
+const core::mutable_audio_buffer& mutable_frame::audio_data() const{return impl_->audio_data_;}
array<std::uint8_t>& mutable_frame::image_data(std::size_t index){return impl_->buffers_.at(index);}
-core::audio_buffer& mutable_frame::audio_data(){return impl_->audio_data_;}
+core::mutable_audio_buffer& mutable_frame::audio_data(){return impl_->audio_data_;}
std::size_t mutable_frame::width() const{return impl_->desc_.planes.at(0).width;}
std::size_t mutable_frame::height() const{return impl_->desc_.planes.at(0).height;}
const void* mutable_frame::stream_tag()const{return impl_->tag_;}
struct const_frame::impl : boost::noncopyable
{
mutable std::vector<std::shared_future<array<const std::uint8_t>>> future_buffers_;
- core::audio_buffer audio_data_;
+ mutable core::audio_buffer audio_data_;
const core::pixel_format_desc desc_;
+ const core::audio_channel_layout channel_layout_;
const void* tag_;
core::frame_geometry geometry_;
caspar::timer since_created_timer_;
mutable tbb::atomic<int64_t> recorded_age_;
impl(const void* tag)
- : desc_(core::pixel_format::invalid)
+ : audio_data_(0, 0, true, 0)
+ , desc_(core::pixel_format::invalid)
+ , channel_layout_(audio_channel_layout::invalid())
, tag_(tag)
, geometry_(frame_geometry::get_default())
, should_record_age_(true)
recorded_age_ = 0;
}
- impl(std::shared_future<array<const std::uint8_t>> image, audio_buffer audio_buffer, const void* tag, const core::pixel_format_desc& desc)
- : audio_data_(std::move(audio_buffer))
+ impl(
+ std::shared_future<array<const std::uint8_t>> image,
+ audio_buffer audio_data,
+ const void* tag,
+ const core::pixel_format_desc& desc,
+ const core::audio_channel_layout& channel_layout)
+ : audio_data_(std::move(audio_data))
, desc_(desc)
+ , channel_layout_(channel_layout)
, tag_(tag)
, geometry_(frame_geometry::get_default())
, should_record_age_(false)
}
impl(mutable_frame&& other)
- : audio_data_(other.audio_data())
+ : audio_data_(0, 0, true, 0) // Complex init done in body instead.
, desc_(other.pixel_format_desc())
+ , channel_layout_(other.audio_channel_layout())
, tag_(other.stream_tag())
, geometry_(other.geometry())
, since_created_timer_(other.since_created())
, should_record_age_(true)
{
+ spl::shared_ptr<mutable_audio_buffer> shared_audio_data(new mutable_audio_buffer(std::move(other.audio_data())));
+ // pointer returned by vector::data() should be the same after move, but just to be safe.
+ audio_data_ = audio_buffer(shared_audio_data->data(), shared_audio_data->size(), true, std::move(shared_audio_data));
+
for (std::size_t n = 0; n < desc_.planes.size(); ++n)
{
future_buffers_.push_back(make_ready_future<array<const std::uint8_t>>(std::move(other.image_data(n))).share());
};
const_frame::const_frame(const void* tag) : impl_(new impl(tag)){}
-const_frame::const_frame(std::shared_future<array<const std::uint8_t>> image, audio_buffer audio_buffer, const void* tag, const core::pixel_format_desc& desc)
- : impl_(new impl(std::move(image), std::move(audio_buffer), tag, desc)){}
+const_frame::const_frame(
+ std::shared_future<array<const std::uint8_t>> image,
+ audio_buffer audio_data,
+ const void* tag,
+ const core::pixel_format_desc& desc,
+ const core::audio_channel_layout& channel_layout)
+ : impl_(new impl(std::move(image), std::move(audio_data), tag, desc, channel_layout)){}
const_frame::const_frame(mutable_frame&& other) : impl_(new impl(std::move(other))){}
const_frame::~const_frame(){}
const_frame::const_frame(const_frame&& other) : impl_(std::move(other.impl_)){}
bool const_frame::operator<(const const_frame& other){return impl_ < other.impl_;}
bool const_frame::operator>(const const_frame& other){return impl_ > other.impl_;}
const core::pixel_format_desc& const_frame::pixel_format_desc()const{return impl_->desc_;}
+const core::audio_channel_layout& const_frame::audio_channel_layout()const { return impl_->channel_layout_; }
array<const std::uint8_t> const_frame::image_data(int index)const{return impl_->image_data(index);}
const core::audio_buffer& const_frame::audio_data()const{return impl_->audio_data_;}
std::size_t const_frame::width()const{return impl_->width();}
namespace caspar { namespace core {
-typedef cache_aligned_vector<int32_t> audio_buffer;
+typedef caspar::array<const int32_t> audio_buffer;
+typedef cache_aligned_vector<int32_t> mutable_audio_buffer;
class frame_geometry;
class mutable_frame final
// Constructors
explicit mutable_frame(std::vector<array<std::uint8_t>> image_buffers,
- audio_buffer audio_buffer,
+ mutable_audio_buffer audio_data,
const void* tag,
- const pixel_format_desc& desc);
+ const pixel_format_desc& desc,
+ const audio_channel_layout& channel_layout);
~mutable_frame();
// Methods
// Properties
const core::pixel_format_desc& pixel_format_desc() const;
+ const core::audio_channel_layout& audio_channel_layout() const;
const array<std::uint8_t>& image_data(std::size_t index = 0) const;
- const core::audio_buffer& audio_data() const;
+ const core::mutable_audio_buffer& audio_data() const;
array<std::uint8_t>& image_data(std::size_t index = 0);
- core::audio_buffer& audio_data();
+ core::mutable_audio_buffer& audio_data();
std::size_t width() const;
std::size_t height() const;
explicit const_frame(const void* tag = nullptr);
explicit const_frame(std::shared_future<array<const std::uint8_t>> image,
- audio_buffer audio_buffer,
+ audio_buffer audio_data,
const void* tag,
- const pixel_format_desc& desc);
+ const pixel_format_desc& desc,
+ const audio_channel_layout& channel_layout);
const_frame(mutable_frame&& other);
~const_frame();
// Properties
const core::pixel_format_desc& pixel_format_desc() const;
+ const core::audio_channel_layout& audio_channel_layout() const;
array<const std::uint8_t> image_data(int index = 0) const;
const core::audio_buffer& audio_data() const;
// Methods
- virtual mutable_frame create_frame(const void* video_stream_tag, const pixel_format_desc& desc) = 0;
+ virtual mutable_frame create_frame(
+ const void* video_stream_tag,
+ const pixel_format_desc& desc,
+ const core::audio_channel_layout& channel_layout) = 0;
// Properties
};
FORWARD2(caspar, core, class help_repository);
FORWARD2(caspar, core, struct module_dependencies);
FORWARD2(caspar, core, class frame_producer_registry);
+FORWARD2(caspar, core, struct audio_channel_layout);
+FORWARD2(caspar, core, class audio_channel_layout_repository);
+FORWARD2(caspar, core, class audio_mix_config_repository);
#include <core/frame/frame.h>
#include <core/frame/frame_transform.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/monitor/monitor.h>
#include <common/diagnostics/graph.h>
struct audio_item
{
- const void* tag = nullptr;
- audio_transform transform;
- audio_buffer audio_data;
+ const void* tag = nullptr;
+ audio_transform transform;
+ audio_buffer audio_data;
+ audio_channel_layout channel_layout = audio_channel_layout::invalid();
audio_item()
{
: tag(std::move(other.tag))
, transform(std::move(other.transform))
, audio_data(std::move(other.audio_data))
+ , channel_layout(std::move(other.channel_layout))
{
}
};
struct audio_stream
{
- audio_transform prev_transform;
- audio_buffer_ps audio_data;
- bool is_still = false;
+ audio_transform prev_transform;
+ audio_buffer_ps audio_data;
+ std::unique_ptr<audio_channel_remapper> channel_remapper;
+ bool remapping_failed = false;
+ bool is_still = false;
};
struct audio_mixer::impl : boost::noncopyable
std::vector<audio_item> items_;
std::vector<int> audio_cadence_;
video_format_desc format_desc_;
+ audio_channel_layout channel_layout_ = audio_channel_layout::invalid();
float master_volume_ = 1.0f;
float previous_master_volume_ = master_volume_;
spl::shared_ptr<diagnostics::graph> graph_;
return;
audio_item item;
- item.tag = frame.stream_tag();
- item.transform = transform_stack_.top();
- item.audio_data = frame.audio_data();
+ item.tag = frame.stream_tag();
+ item.transform = transform_stack_.top();
+ item.audio_data = frame.audio_data();
+ item.channel_layout = frame.audio_channel_layout();
if(item.transform.is_still)
item.transform.volume = 0.0;
return master_volume_;
}
- audio_buffer mix(const video_format_desc& format_desc)
+ audio_buffer mix(const video_format_desc& format_desc, const audio_channel_layout& channel_layout)
{
- if(format_desc_ != format_desc)
+ if(format_desc_ != format_desc || channel_layout_ != channel_layout)
{
audio_streams_.clear();
audio_cadence_ = format_desc.audio_cadence;
format_desc_ = format_desc;
+ channel_layout_ = channel_layout;
}
std::map<const void*, audio_stream> next_audio_streams;
for (auto& item : items_)
{
audio_buffer_ps next_audio;
+ std::unique_ptr<audio_channel_remapper> channel_remapper;
+ bool remapping_failed = false;
auto next_transform = item.transform;
auto prev_transform = next_transform;
used_tags.push_back(tag);
const auto it = audio_streams_.find(tag);
- if(it != audio_streams_.end())
- {
- prev_transform = it->second.prev_transform;
- next_audio = std::move(it->second.audio_data);
+ if (it != audio_streams_.end())
+ {
+ prev_transform = it->second.prev_transform;
+ next_audio = std::move(it->second.audio_data);
+ channel_remapper = std::move(it->second.channel_remapper);
+ remapping_failed = it->second.remapping_failed;
}
+ if (remapping_failed)
+ {
+ CASPAR_LOG(trace) << "[audio_mixer] audio channel remapping already failed for stream.";
+ next_audio_streams[tag].remapping_failed = true;
+ continue;
+ }
+
// Skip it if there is no existing audio stream and item has no audio-data.
if(it == audio_streams_.end() && item.audio_data.empty())
continue;
-
+
+ if (item.channel_layout == audio_channel_layout::invalid())
+ {
+ CASPAR_LOG(debug) << "[audio_mixer] invalid audio channel layout for item";
+ continue;
+ }
+
+ if (!channel_remapper)
+ {
+ try
+ {
+ channel_remapper.reset(new audio_channel_remapper(item.channel_layout, channel_layout_));
+ }
+ catch (...)
+ {
+ CASPAR_LOG_CURRENT_EXCEPTION();
+ CASPAR_LOG(error) << "[audio_mixer] audio channel remapping failed for stream.";
+ next_audio_streams[tag].remapping_failed = true;
+ continue;
+ }
+ }
+
+ item.audio_data = channel_remapper->mix_and_rearrange(item.audio_data);
+
const float prev_volume = static_cast<float>(prev_transform.volume) * previous_master_volume_;
const float next_volume = static_cast<float>(next_transform.volume) * master_volume_;
// TODO: Move volume mixing into code below, in order to support audio sample counts not corresponding to frame audio samples.
- auto alpha = (next_volume-prev_volume)/static_cast<float>(item.audio_data.size()/format_desc.audio_channels);
+ auto alpha = (next_volume-prev_volume)/static_cast<float>(item.audio_data.size()/channel_layout_.num_channels);
for(size_t n = 0; n < item.audio_data.size(); ++n)
{
- auto sample_multiplier = (prev_volume + (n/format_desc_.audio_channels) * alpha);
- next_audio.push_back(item.audio_data[n] * sample_multiplier);
+ auto sample_multiplier = (prev_volume + (n / channel_layout_.num_channels) * alpha);
+ next_audio.push_back(item.audio_data.data()[n] * sample_multiplier);
}
- next_audio_streams[tag].prev_transform = std::move(next_transform); // Store all active tags, inactive tags will be removed at the end.
- next_audio_streams[tag].audio_data = std::move(next_audio);
- next_audio_streams[tag].is_still = item.transform.is_still;
- }
+ next_audio_streams[tag].prev_transform = std::move(next_transform); // Store all active tags, inactive tags will be removed at the end.
+ next_audio_streams[tag].audio_data = std::move(next_audio);
+ next_audio_streams[tag].channel_remapper = std::move(channel_remapper);
+ next_audio_streams[tag].remapping_failed = remapping_failed;
+ next_audio_streams[tag].is_still = item.transform.is_still;
+ }
previous_master_volume_ = master_volume_;
items_.clear();
auto nb_invalid_streams = cpplinq::from(audio_streams_)
.select(values())
- .where([&](const audio_stream& x) { return x.audio_data.size() < audio_size(audio_cadence_.front()); })
+ .where([&](const audio_stream& x)
+ {
+ return !x.remapping_failed && x.audio_data.size() < audio_size(audio_cadence_.front());
+ })
.count();
if(nb_invalid_streams > 0)
boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);
- audio_buffer result;
+ auto result_owner = spl::make_shared<mutable_audio_buffer>();
+ auto& result = *result_owner;
result.reserve(result_ps.size());
boost::range::transform(result_ps, std::back_inserter(result), [](float sample){return static_cast<int32_t>(sample);});
- const int num_channels = format_desc_.audio_channels;
+ const int num_channels = channel_layout_.num_channels;
monitor_subject_ << monitor::message("/nb_channels") % num_channels;
auto max = std::vector<int32_t>(num_channels, std::numeric_limits<int32_t>::min());
graph_->set_value("volume", static_cast<double>(*boost::max_element(max)) / std::numeric_limits<int32_t>::max());
- return result;
+ return caspar::array<int32_t>(result.data(), result.size(), true, std::move(result_owner));
}
size_t audio_size(size_t num_samples) const
{
- return num_samples * format_desc_.audio_channels;
+ return num_samples * channel_layout_.num_channels;
}
};
void audio_mixer::pop(){impl_->pop();}
void audio_mixer::set_master_volume(float volume) { impl_->set_master_volume(volume); }
float audio_mixer::get_master_volume() { return impl_->get_master_volume(); }
-audio_buffer audio_mixer::operator()(const video_format_desc& format_desc){ return impl_->mix(format_desc); }
+audio_buffer audio_mixer::operator()(const video_format_desc& format_desc, const audio_channel_layout& channel_layout){ return impl_->mix(format_desc, channel_layout); }
monitor::subject& audio_mixer::monitor_output(){ return impl_->monitor_subject_; }
}}
#include <common/forward.h>
#include <common/memory.h>
#include <common/cache_aligned_vector.h>
+#include <common/array.h>
#include <core/frame/frame_visitor.h>
#include <core/monitor/monitor.h>
namespace caspar { namespace core {
-typedef cache_aligned_vector<int32_t> audio_buffer;
+typedef caspar::array<const int32_t> audio_buffer;
class audio_mixer final : public frame_visitor
{
// Constructors
- audio_mixer(spl::shared_ptr<diagnostics::graph> graph);
+ audio_mixer(spl::shared_ptr<::caspar::diagnostics::graph> graph);
// Methods
- audio_buffer operator()(const struct video_format_desc& format_desc);
+ audio_buffer operator()(const struct video_format_desc& format_desc, const struct audio_channel_layout& channel_layout);
void set_master_volume(float volume);
float get_master_volume();
monitor::subject& monitor_output();
virtual std::future<array<const std::uint8_t>> operator()(const struct video_format_desc& format_desc, bool straighten_alpha) = 0;
- virtual class mutable_frame create_frame(const void* tag, const struct pixel_format_desc& desc) = 0;
+ virtual class mutable_frame create_frame(const void* tag, const struct pixel_format_desc& desc, const core::audio_channel_layout& channel_layout) = 0;
// Properties
};
#include <core/frame/frame_factory.h>
#include <core/frame/frame_transform.h>
#include <core/frame/pixel_format.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/video_format.h>
#include <boost/property_tree/ptree.hpp>
audio_mixer_.monitor_output().attach_parent(monitor_subject_);
}
- const_frame operator()(std::map<int, draw_frame> frames, const video_format_desc& format_desc)
+ const_frame operator()(std::map<int, draw_frame> frames, const video_format_desc& format_desc, const core::audio_channel_layout& channel_layout)
{
caspar::timer frame_timer;
}
auto image = (*image_mixer_)(format_desc, straighten_alpha_);
- auto audio = audio_mixer_(format_desc);
+ auto audio = audio_mixer_(format_desc, channel_layout);
auto desc = core::pixel_format_desc(core::pixel_format::bgra);
desc.planes.push_back(core::pixel_format_desc::plane(format_desc.width, format_desc.height, 4));
- return const_frame(std::move(image), std::move(audio), this, desc);
+ return const_frame(std::move(image), std::move(audio), this, desc, channel_layout);
}
catch(...)
{
bool mixer::get_straight_alpha_output() { return impl_->get_straight_alpha_output(); }
std::future<boost::property_tree::wptree> mixer::info() const{return impl_->info();}
std::future<boost::property_tree::wptree> mixer::delay_info() const{ return impl_->delay_info(); }
-const_frame mixer::operator()(std::map<int, draw_frame> frames, const video_format_desc& format_desc){ return (*impl_)(std::move(frames), format_desc); }
-mutable_frame mixer::create_frame(const void* tag, const core::pixel_format_desc& desc) {return impl_->image_mixer_->create_frame(tag, desc);}
+const_frame mixer::operator()(std::map<int, draw_frame> frames, const video_format_desc& format_desc, const core::audio_channel_layout& channel_layout){ return (*impl_)(std::move(frames), format_desc, channel_layout); }
+mutable_frame mixer::create_frame(const void* tag, const core::pixel_format_desc& desc, const core::audio_channel_layout& channel_layout) {return impl_->image_mixer_->create_frame(tag, desc, channel_layout);}
monitor::subject& mixer::monitor_output() { return *impl_->monitor_subject_; }
}}
// Constructors
- explicit mixer(int channel_index, spl::shared_ptr<diagnostics::graph> graph, spl::shared_ptr<image_mixer> image_mixer);
+ explicit mixer(int channel_index, spl::shared_ptr<caspar::diagnostics::graph> graph, spl::shared_ptr<image_mixer> image_mixer);
// Methods
- const_frame operator()(std::map<int, draw_frame> frames, const video_format_desc& format_desc);
+ const_frame operator()(std::map<int, draw_frame> frames, const video_format_desc& format_desc, const core::audio_channel_layout& channel_layout);
void set_master_volume(float volume);
float get_master_volume();
void set_straight_alpha_output(bool value);
bool get_straight_alpha_output();
- mutable_frame create_frame(const void* tag, const pixel_format_desc& desc);
+ mutable_frame create_frame(const void* tag, const pixel_format_desc& desc, const core::audio_channel_layout& channel_layout);
// Properties
#include <core/frame/draw_frame.h>
#include <core/frame/frame_factory.h>
#include <core/frame/pixel_format.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/monitor/monitor.h>
#include <common/except.h>
{
core::pixel_format_desc desc(pixel_format::bgra);
desc.planes.push_back(core::pixel_format_desc::plane(1, 1, 4));
- auto frame = frame_factory->create_frame(tag, desc);
+ auto frame = frame_factory->create_frame(tag, desc, core::audio_channel_layout::invalid());
*reinterpret_cast<uint32_t*>(frame.image_data(0).begin()) = value;
+++ /dev/null
-/*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
-*
-* This file is part of CasparCG (www.casparcg.com).
-*
-* CasparCG is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation, either version 3 of the License, or
-* (at your option) any later version.
-*
-* CasparCG is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
-*
-* Author: Helge Norberg, helge.norberg@svt.se
-*/
-
-#include "../../StdAfx.h"
-
-#include "freehand_producer.h"
-
-#include <core/producer/frame_producer.h>
-#include <core/frame/frame.h>
-#include <core/frame/draw_frame.h>
-#include <core/frame/frame_factory.h>
-#include <core/frame/pixel_format.h>
-#include <core/monitor/monitor.h>
-
-namespace caspar { namespace core {
-
-cache_aligned_vector<uint8_t> empty_drawing(int width, int height)
-{
- cache_aligned_vector<uint8_t> result;
-
- result.resize(width * height * 4);
- std::fill(result.begin(), result.end(), 0);
-
- return std::move(result);
-}
-
-class freehand_producer : public frame_producer_base
-{
- monitor::subject monitor_subject_;
- cache_aligned_vector<uint8_t> drawing_;
- spl::shared_ptr<core::frame_factory> frame_factory_;
- constraints constraints_;
- draw_frame frame_;
- bool button_pressed_;
- bool modified_;
-public:
- explicit freehand_producer(
- const spl::shared_ptr<core::frame_factory>& frame_factory,
- int width,
- int height)
- : drawing_(std::move(empty_drawing(width, height)))
- , frame_factory_(frame_factory)
- , constraints_(width, height)
- , frame_(create_frame())
- , button_pressed_(false)
- , modified_(false)
- {
- CASPAR_LOG(info) << print() << L" Initialized";
- }
-
- draw_frame create_frame()
- {
- pixel_format_desc desc(pixel_format::bgra);
- desc.planes.push_back(pixel_format_desc::plane(
- static_cast<int>(constraints_.width.get()),
- static_cast<int>(constraints_.height.get()),
- 4));
- auto frame = frame_factory_->create_frame(this, desc);
-
- std::memcpy(frame.image_data().begin(), drawing_.data(), drawing_.size());
-
- return draw_frame(std::move(frame));
- }
-
- // frame_producer
-
- void on_interaction(const interaction_event::ptr& event) override
- {
- if (is<mouse_move_event>(event) && button_pressed_)
- {
- auto mouse_move = as<mouse_move_event>(event);
-
- int x = static_cast<int>(mouse_move->x * constraints_.width.get());
- int y = static_cast<int>(mouse_move->y * constraints_.height.get());
-
- if (x >= constraints_.width.get()
- || y >= constraints_.height.get()
- || x < 0
- || y < 0)
- return;
-
- uint8_t* b = drawing_.data() + (y * static_cast<int>(constraints_.width.get()) + x) * 4;
- uint8_t* g = b + 1;
- uint8_t* r = b + 2;
- uint8_t* a = b + 3;
-
- *b = 255;
- *g = 255;
- *r = 255;
- *a = 255;
-
- modified_ = true;
- }
- else if (is<mouse_button_event>(event))
- {
- auto button_event = as<mouse_button_event>(event);
-
- if (button_event->button == 0)
- button_pressed_ = button_event->pressed;
- else if (button_event->button == 1 && button_event->pressed)
- {
- std::memset(drawing_.data(), 0, drawing_.size());
- modified_ = true;
- }
- }
- }
-
- bool collides(double x, double y) const override
- {
- return true;
- }
-
- draw_frame receive_impl() override
- {
- if (modified_)
- {
- frame_ = create_frame();
- modified_ = false;
- }
-
- return frame_;
- }
-
- constraints& pixel_constraints() override
- {
- return constraints_;
- }
-
- std::wstring print() const override
- {
- return L"freehand[]";
- }
-
- std::wstring name() const override
- {
- return L"freehand";
- }
-
- boost::property_tree::wptree info() const override
- {
- boost::property_tree::wptree info;
- info.add(L"type", L"freehand");
- return info;
- }
-
- monitor::subject& monitor_output()
- {
- return monitor_subject_;
- }
-};
-
-spl::shared_ptr<frame_producer> create_freehand_producer(const spl::shared_ptr<frame_factory>& frame_factory, const std::vector<std::wstring>& params)
-{
- if(params.size() < 3 || !boost::iequals(params.at(0), L"[FREEHAND]"))
- return core::frame_producer::empty();
-
- int width = boost::lexical_cast<int>(params.at(1));
- int height = boost::lexical_cast<int>(params.at(2));
-
- return spl::make_shared<freehand_producer>(frame_factory, width, height);
-}
-
-}}
+++ /dev/null
-/*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
-*
-* This file is part of CasparCG (www.casparcg.com).
-*
-* CasparCG is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation, either version 3 of the License, or
-* (at your option) any later version.
-*
-* CasparCG is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
-*
-* Author: Helge Norberg, helge.norberg@svt.se
-*/
-
-#pragma once
-
-#include <common/memory.h>
-
-#include <core/fwd.h>
-
-#include <string>
-#include <vector>
-
-namespace caspar { namespace core {
-
-spl::shared_ptr<frame_producer> create_freehand_producer(const spl::shared_ptr<frame_factory>& frame_factory, const std::vector<std::wstring>& params);
-
-}}
#include "../frame/frame_transform.h"
#include "color/color_producer.h"
-#include "draw/freehand_producer.h"
#include "separated/separated_producer.h"
#include "variable.h"
if(producer == frame_producer::empty())
producer = create_color_producer(dependencies.frame_factory, params);
- if (producer == frame_producer::empty())
- producer = create_freehand_producer(dependencies.frame_factory, params);
-
if(producer == frame_producer::empty())
return producer;
// Constructors
- explicit stage(int channel_index, spl::shared_ptr<diagnostics::graph> graph);
+ explicit stage(int channel_index, spl::shared_ptr<caspar::diagnostics::graph> graph);
// Methods
#include <core/frame/draw_frame.h>
#include <core/frame/frame_factory.h>
#include <core/frame/pixel_format.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/monitor/monitor.h>
#include <core/consumer/frame_consumer.h>
#include <core/module_dependencies.h>
text::string_metrics metrics;
font_.set_tracking(static_cast<int>(tracking_.value().get()));
auto vertex_stream = font_.create_vertex_stream(text_.value().get(), x_, y_, parent_width_, parent_height_, &metrics);
- auto frame = frame_factory_->create_frame(vertex_stream.data(), pfd);
+ auto frame = frame_factory_->create_frame(vertex_stream.data(), pfd, core::audio_channel_layout::invalid());
memcpy(frame.image_data().data(), atlas_.data(), frame.image_data().size());
frame.set_geometry(frame_geometry(frame_geometry::geometry_type::quad_list, std::move(vertex_stream)));
#include "frame/frame.h"
#include "frame/draw_frame.h"
#include "frame/frame_transform.h"
+#include "frame/audio_channel_layout.h"
#include "producer/media_info/media_info.h"
#include "producer/media_info/media_info_repository.h"
std::shared_ptr<void> ticket(nullptr, [&thumbnail_ready](void*) { thumbnail_ready.set_value(); });
- auto mixed_frame = mixer_(std::move(frames), format_desc_);
+ auto mixed_frame = mixer_(std::move(frames), format_desc_, audio_channel_layout(2, L"stereo", L""));
output_->send(std::move(mixed_frame), ticket);
ticket.reset();
#include "frame/frame.h"
#include "frame/draw_frame.h"
#include "frame/frame_factory.h"
+#include "frame/audio_channel_layout.h"
#include <common/diagnostics/graph.h>
#include <common/env.h>
mutable tbb::spin_mutex format_desc_mutex_;
core::video_format_desc format_desc_;
-
+ mutable tbb::spin_mutex channel_layout_mutex_;
+ core::audio_channel_layout channel_layout_;
+
const spl::shared_ptr<caspar::diagnostics::graph> graph_ = [](int index)
{
core::diagnostics::scoped_call_context save;
executor executor_ { L"video_channel " + boost::lexical_cast<std::wstring>(index_) };
public:
- impl(int index, const core::video_format_desc& format_desc, std::unique_ptr<image_mixer> image_mixer)
+ impl(
+ int index,
+ const core::video_format_desc& format_desc,
+ const core::audio_channel_layout& channel_layout,
+ std::unique_ptr<image_mixer> image_mixer)
: monitor_subject_(spl::make_shared<monitor::subject>(
"/channel/" + boost::lexical_cast<std::string>(index)))
, index_(index)
, format_desc_(format_desc)
- , output_(graph_, format_desc, index)
+ , channel_layout_(channel_layout)
+ , output_(graph_, format_desc, channel_layout, index)
, image_mixer_(std::move(image_mixer))
, mixer_(index, graph_, image_mixer_)
, stage_(index, graph_)
{
CASPAR_LOG(info) << print() << " Uninitializing.";
}
-
+
core::video_format_desc video_format_desc() const
{
return lock(format_desc_mutex_, [&]
return format_desc_;
});
}
-
+
void video_format_desc(const core::video_format_desc& format_desc)
{
lock(format_desc_mutex_, [&]
});
}
+ core::audio_channel_layout audio_channel_layout() const
+ {
+ return lock(channel_layout_mutex_, [&]
+ {
+ return channel_layout_;
+ });
+ }
+
+ void audio_channel_layout(const core::audio_channel_layout& channel_layout)
+ {
+ lock(channel_layout_mutex_, [&]
+ {
+ channel_layout_ = channel_layout;
+ stage_.clear();
+ });
+ }
+
void tick()
{
try
{
- auto format_desc = video_format_desc();
+ auto format_desc = video_format_desc();
+ auto channel_layout = audio_channel_layout();
caspar::timer frame_timer;
// Mix
- auto mixed_frame = mixer_(std::move(stage_frames), format_desc);
+ auto mixed_frame = mixer_(std::move(stage_frames), format_desc, channel_layout);
// Consume
- output_(std::move(mixed_frame), format_desc);
+ output_(std::move(mixed_frame), format_desc, channel_layout);
auto frame_time = frame_timer.elapsed()*format_desc.fps*0.5;
graph_->set_value("tick-time", frame_time);
auto mixer_info = mixer_.info();
auto output_info = output_.info();
- info.add(L"video-mode", format_desc_.name);
+ info.add(L"video-mode", video_format_desc().name);
+ info.add(L"audio-channel-layout", audio_channel_layout().print());
info.add_child(L"stage", stage_info.get());
info.add_child(L"mixer", mixer_info.get());
info.add_child(L"output", output_info.get());
}
};
-video_channel::video_channel(int index, const core::video_format_desc& format_desc, std::unique_ptr<image_mixer> image_mixer) : impl_(new impl(index, format_desc, std::move(image_mixer))){}
+video_channel::video_channel(
+ int index,
+ const core::video_format_desc& format_desc,
+ const core::audio_channel_layout& channel_layout,
+ std::unique_ptr<image_mixer> image_mixer) : impl_(new impl(index, format_desc, channel_layout, std::move(image_mixer))){}
video_channel::~video_channel(){}
-const stage& video_channel::stage() const { return impl_->stage_;}
-stage& video_channel::stage() { return impl_->stage_;}
-const mixer& video_channel::mixer() const{ return impl_->mixer_;}
-mixer& video_channel::mixer() { return impl_->mixer_;}
-const output& video_channel::output() const { return impl_->output_;}
-output& video_channel::output() { return impl_->output_;}
-spl::shared_ptr<frame_factory> video_channel::frame_factory() { return impl_->image_mixer_;}
+const stage& video_channel::stage() const { return impl_->stage_;}
+stage& video_channel::stage() { return impl_->stage_;}
+const mixer& video_channel::mixer() const{ return impl_->mixer_;}
+mixer& video_channel::mixer() { return impl_->mixer_;}
+const output& video_channel::output() const { return impl_->output_;}
+output& video_channel::output() { return impl_->output_;}
+spl::shared_ptr<frame_factory> video_channel::frame_factory() { return impl_->image_mixer_;}
core::video_format_desc video_channel::video_format_desc() const{return impl_->video_format_desc();}
void core::video_channel::video_format_desc(const core::video_format_desc& format_desc){impl_->video_format_desc(format_desc);}
+core::audio_channel_layout video_channel::audio_channel_layout() const { return impl_->audio_channel_layout(); }
+void core::video_channel::audio_channel_layout(const core::audio_channel_layout& channel_layout) { impl_->audio_channel_layout(channel_layout); }
boost::property_tree::wptree video_channel::info() const{return impl_->info();}
boost::property_tree::wptree video_channel::delay_info() const { return impl_->delay_info(); }
int video_channel::index() const { return impl_->index(); }
// Constructors
- explicit video_channel(int index, const video_format_desc& format_desc, std::unique_ptr<image_mixer> image_mixer);
+ explicit video_channel(
+ int index,
+ const video_format_desc& format_desc,
+ const audio_channel_layout& channel_layout,
+ std::unique_ptr<image_mixer> image_mixer);
~video_channel();
// Methods
core::video_format_desc video_format_desc() const;
void video_format_desc(const core::video_format_desc& format_desc);
+ core::audio_channel_layout audio_channel_layout() const;
+ void audio_channel_layout(const core::audio_channel_layout& channel_layout);
spl::shared_ptr<core::frame_factory> frame_factory();
, size(width*height*4)
, name(name)
, audio_sample_rate(48000)
- , audio_channels(2)
, audio_cadence(audio_cadence)
{
}
std::wstring name; // name of output format
int audio_sample_rate;
- int audio_channels;
std::vector<int> audio_cadence; // rotating optimal number of samples per frame
video_format_desc(video_format format,
#include <core/video_format.h>
#include <core/frame/frame.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/help/help_repository.h>
#include <core/help/help_sink.h>
struct bluefish_consumer : boost::noncopyable
{
- spl::shared_ptr<CBlueVelvet4> blue_;
- const unsigned int device_index_;
- const core::video_format_desc format_desc_;
- const int channel_index_;
-
- const std::wstring model_name_;
-
- spl::shared_ptr<diagnostics::graph> graph_;
- boost::timer frame_timer_;
- boost::timer tick_timer_;
- boost::timer sync_timer_;
+ spl::shared_ptr<CBlueVelvet4> blue_;
+ const unsigned int device_index_;
+ const core::video_format_desc format_desc_;
+ const core::audio_channel_layout channel_layout_;
+ core::audio_channel_remapper channel_remapper_;
+ const int channel_index_;
+
+ const std::wstring model_name_;
+
+ spl::shared_ptr<diagnostics::graph> graph_;
+ boost::timer frame_timer_;
+ boost::timer tick_timer_;
+ boost::timer sync_timer_;
- unsigned int vid_fmt_;
+ unsigned int vid_fmt_;
- std::array<blue_dma_buffer_ptr, 4> reserved_frames_;
- tbb::concurrent_bounded_queue<core::const_frame> frame_buffer_;
- tbb::atomic<int64_t> presentation_delay_millis_;
- core::const_frame previous_frame_ = core::const_frame::empty();
+ std::array<blue_dma_buffer_ptr, 4> reserved_frames_;
+ tbb::concurrent_bounded_queue<core::const_frame> frame_buffer_;
+ tbb::atomic<int64_t> presentation_delay_millis_;
+ core::const_frame previous_frame_ = core::const_frame::empty();
- const bool embedded_audio_;
- const bool key_only_;
+ const bool embedded_audio_;
+ const bool key_only_;
- executor executor_;
+ executor executor_;
public:
- bluefish_consumer(const core::video_format_desc& format_desc, int device_index, bool embedded_audio, bool key_only, int channel_index)
+ bluefish_consumer(
+ const core::video_format_desc& format_desc,
+ const core::audio_channel_layout& in_channel_layout,
+ const core::audio_channel_layout& out_channel_layout,
+ int device_index,
+ bool embedded_audio,
+ bool key_only,
+ int channel_index)
: blue_(create_blue(device_index))
, device_index_(device_index)
- , format_desc_(format_desc)
+ , format_desc_(format_desc)
+ , channel_layout_(out_channel_layout)
+ , channel_remapper_(in_channel_layout, out_channel_layout)
, channel_index_(channel_index)
, model_name_(get_card_desc(*blue_))
, vid_fmt_(get_video_mode(*blue_, format_desc))
}
else
{
- if(BLUE_FAIL(set_card_property(blue_, EMBEDEDDED_AUDIO_OUTPUT, blue_emb_audio_enable | blue_emb_audio_group1_enable)))
+ ULONG audio_value =
+ EMBEDDED_AUDIO_OUTPUT | blue_emb_audio_group1_enable;
+
+ if (channel_layout_.num_channels > 4)
+ audio_value |= blue_emb_audio_group2_enable;
+
+ if (channel_layout_.num_channels > 8)
+ audio_value |= blue_emb_audio_group3_enable;
+
+ if (channel_layout_.num_channels > 12)
+ audio_value |= blue_emb_audio_group4_enable;
+
+ if(BLUE_FAIL(set_card_property(blue_, EMBEDEDDED_AUDIO_OUTPUT, audio_value)))
CASPAR_LOG(warning) << print() << TEXT(" Failed to enable embedded audio.");
CASPAR_LOG(info) << print() << TEXT(" Enabled embedded-audio.");
}
// Send and display
if(embedded_audio_)
- {
- auto frame_audio = core::audio_32_to_24(frame.audio_data());
+ {
+ auto remapped_audio = channel_remapper_.mix_and_rearrange(frame.audio_data());
+ auto frame_audio = core::audio_32_to_24(remapped_audio);
encode_hanc(reinterpret_cast<BLUE_UINT32*>(reserved_frames_.front()->hanc_data()),
frame_audio.data(),
- static_cast<int>(frame.audio_data().size()/format_desc_.audio_channels),
- static_cast<int>(format_desc_.audio_channels));
+ static_cast<int>(frame.audio_data().size()/channel_layout_.num_channels),
+ static_cast<int>(channel_layout_.num_channels));
blue_->system_buffer_write_async(const_cast<uint8_t*>(reserved_frames_.front()->image_data()),
static_cast<unsigned long>(reserved_frames_.front()->image_size()),
void encode_hanc(BLUE_UINT32* hanc_data, void* audio_data, int audio_samples, int audio_nchannels)
{
const auto sample_type = AUDIO_CHANNEL_24BIT | AUDIO_CHANNEL_LITTLEENDIAN;
- const auto emb_audio_flag = blue_emb_audio_enable | blue_emb_audio_group1_enable;
+ auto emb_audio_flag = blue_emb_audio_enable | blue_emb_audio_group1_enable;
+
+ if (audio_nchannels > 4)
+ emb_audio_flag |= blue_emb_audio_group2_enable;
+
+ if (audio_nchannels > 8)
+ emb_audio_flag |= blue_emb_audio_group3_enable;
+
+ if (audio_nchannels > 12)
+ emb_audio_flag |= blue_emb_audio_group4_enable;
hanc_stream_info_struct hanc_stream_info;
memset(&hanc_stream_info, 0, sizeof(hanc_stream_info));
std::vector<int> audio_cadence_;
core::video_format_desc format_desc_;
+ core::audio_channel_layout in_channel_layout_ = core::audio_channel_layout::invalid();
+ core::audio_channel_layout out_channel_layout_;
public:
- bluefish_consumer_proxy(int device_index, bool embedded_audio, bool key_only)
+ bluefish_consumer_proxy(int device_index, bool embedded_audio, bool key_only, const core::audio_channel_layout& out_channel_layout)
: device_index_(device_index)
, embedded_audio_(embedded_audio)
, key_only_(key_only)
+ , out_channel_layout_(out_channel_layout)
{
}
// frame_consumer
- void initialize(const core::video_format_desc& format_desc, int channel_index) override
+ void initialize(const core::video_format_desc& format_desc, const core::audio_channel_layout& channel_layout, int channel_index) override
{
- format_desc_ = format_desc;
- audio_cadence_ = format_desc.audio_cadence;
+ format_desc_ = format_desc;
+ in_channel_layout_ = channel_layout;
+ audio_cadence_ = format_desc.audio_cadence;
+
+ if (out_channel_layout_ == core::audio_channel_layout::invalid())
+ out_channel_layout_ = in_channel_layout_;
consumer_.reset();
- consumer_.reset(new bluefish_consumer(format_desc, device_index_, embedded_audio_, key_only_, channel_index));
+ consumer_.reset(new bluefish_consumer(format_desc, in_channel_layout_, out_channel_layout_, device_index_, embedded_audio_, key_only_, channel_index));
}
std::future<bool> send(core::const_frame frame) override
{
- CASPAR_VERIFY(audio_cadence_.front() * format_desc_.audio_channels == static_cast<size_t>(frame.audio_data().size()));
+ CASPAR_VERIFY(audio_cadence_.front() * in_channel_layout_.num_channels == static_cast<size_t>(frame.audio_data().size()));
boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);
return consumer_->send(frame);
}
void describe_consumer(core::help_sink& sink, const core::help_repository& repo)
{
sink.short_description(L"Sends video on an SDI output using Bluefish video cards.");
- sink.syntax(L"BLUEFISH {[device_index:int]|1} {[embedded_audio:EMBEDDED_AUDIO]} {[key_only:KEY_ONLY]}");
+ sink.syntax(L"BLUEFISH {[device_index:int]|1} {[embedded_audio:EMBEDDED_AUDIO]} {[key_only:KEY_ONLY]} {CHANNEL_LAYOUT [channel_layout:string]}");
sink.para()
->text(L"Sends video on an SDI output using Bluefish video cards. Multiple video cards can be ")
->text(L"installed in the same machine and used at the same time, they will be addressed via ")
->text(L"Specifying ")->code(L"key_only")->text(L" will extract only the alpha channel from the ")
->text(L"channel. This is useful when you have two SDI video cards, and neither has native support ")
->text(L"for separate fill/key output");
+ sink.para()->text(L"Specify ")->code(L"channel_layout")->text(L" to output a different audio channel layout than the channel uses.");
sink.para()->text(L"Examples:");
sink.example(L">> ADD 1 BLUEFISH", L"uses the default device_index of 1.");
sink.example(L">> ADD 1 BLUEFISH 2", L"for device_index 2.");
const auto device_index = params.size() > 1 ? boost::lexical_cast<int>(params.at(1)) : 1;
- const auto embedded_audio = contains_param(L"EMBEDDED_AUDIO", params);
- const auto key_only = contains_param(L"KEY_ONLY", params);
+ const auto embedded_audio = contains_param( L"EMBEDDED_AUDIO", params);
+ const auto key_only = contains_param( L"KEY_ONLY", params);
+ const auto channel_layout = get_param( L"CHANNEL_LAYOUT", params);
- return spl::make_shared<bluefish_consumer_proxy>(device_index, embedded_audio, key_only);
+ auto layout = core::audio_channel_layout::invalid();
+
+ if (!channel_layout.empty())
+ {
+ auto found_layout = core::audio_channel_layout_repository::get_default()->get_layout(channel_layout);
+
+ if (!found_layout)
+ CASPAR_THROW_EXCEPTION(file_not_found() << msg_info(L"Channel layout " + channel_layout + L" not found"));
+
+ layout = *found_layout;
+ }
+
+ return spl::make_shared<bluefish_consumer_proxy>(device_index, embedded_audio, key_only, layout);
}
spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(
const boost::property_tree::wptree& ptree, core::interaction_sink*)
{
- const auto device_index = ptree.get(L"device", 1);
- const auto embedded_audio = ptree.get(L"embedded-audio", false);
- const auto key_only = ptree.get(L"key-only", false);
+ const auto device_index = ptree.get( L"device", 1);
+ const auto embedded_audio = ptree.get( L"embedded-audio", false);
+ const auto key_only = ptree.get( L"key-only", false);
+ const auto channel_layout = ptree.get_optional<std::wstring>( L"channel-layout");
+
+ auto layout = core::audio_channel_layout::invalid();
+
+ if (channel_layout)
+ {
+ auto found_layout = core::audio_channel_layout_repository::get_default()->get_layout(*channel_layout);
+
+ if (!found_layout)
+ CASPAR_THROW_EXCEPTION(file_not_found() << msg_info(L"Channel layout " + *channel_layout + L" not found"));
+
+ layout = *found_layout;
+ }
- return spl::make_shared<bluefish_consumer_proxy>(device_index, embedded_audio, key_only);
+ return spl::make_shared<bluefish_consumer_proxy>(device_index, embedded_audio, key_only, layout);
}
}}
\ No newline at end of file
#include "../decklink_api.h"
#include <core/frame/frame.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/mixer/audio/audio_mixer.h>
#include <core/consumer/frame_consumer.h>
#include <core/diagnostics/call_context.h>
default_latency
};
- int device_index = 1;
- int key_device_idx = 0;
- bool embedded_audio = true;
- keyer_t keyer = keyer_t::default_keyer;
- latency_t latency = latency_t::default_latency;
- bool key_only = false;
- int base_buffer_depth = 3;
+ int device_index = 1;
+ int key_device_idx = 0;
+ bool embedded_audio = true;
+ keyer_t keyer = keyer_t::default_keyer;
+ latency_t latency = latency_t::default_latency;
+ bool key_only = false;
+ int base_buffer_depth = 3;
+ core::audio_channel_layout out_channel_layout = core::audio_channel_layout::invalid();
int buffer_depth() const
{
{
return key_device_idx == 0 ? device_index + 1 : key_device_idx;
}
+
+ core::audio_channel_layout get_adjusted_layout(const core::audio_channel_layout& in_layout) const
+ {
+ auto adjusted = out_channel_layout == core::audio_channel_layout::invalid() ? in_layout : out_channel_layout;
+
+ if (adjusted.num_channels == 1) // Duplicate mono-signal into both left and right.
+ {
+ adjusted.num_channels = 2;
+ adjusted.channel_order.push_back(adjusted.channel_order.at(0)); // Usually FC -> FC FC
+ }
+ else if (adjusted.num_channels == 2)
+ {
+ adjusted.num_channels = 2;
+ }
+ else if (adjusted.num_channels <= 8)
+ {
+ adjusted.num_channels = 8;
+ }
+ else // Over 8 always pad to 16 or drop >16
+ {
+ adjusted.num_channels = 16;
+ }
+
+ return adjusted;
+ }
};
void set_latency(
const std::wstring model_name_ = get_model_name(decklink_);
const core::video_format_desc format_desc_;
+ const core::audio_channel_layout in_channel_layout_;
+ const core::audio_channel_layout out_channel_layout_ = config_.get_adjusted_layout(in_channel_layout_);
+ core::audio_channel_remapper channel_remapper_ { in_channel_layout_, out_channel_layout_ };
const int buffer_size_ = config_.buffer_depth(); // Minimum buffer-size 3.
long long video_scheduled_ = 0;
std::unique_ptr<key_video_context> key_context_;
public:
- decklink_consumer(const configuration& config, const core::video_format_desc& format_desc, int channel_index)
+ decklink_consumer(
+ const configuration& config,
+ const core::video_format_desc& format_desc,
+ const core::audio_channel_layout& in_channel_layout,
+ int channel_index)
: channel_index_(channel_index)
, config_(config)
, format_desc_(format_desc)
+ , in_channel_layout_(in_channel_layout)
{
is_running_ = true;
current_presentation_delay_ = 0;
void enable_audio()
{
- if(FAILED(output_->EnableAudioOutput(bmdAudioSampleRate48kHz, bmdAudioSampleType32bitInteger, 2, bmdAudioOutputStreamTimestamped)))
+ if(FAILED(output_->EnableAudioOutput(bmdAudioSampleRate48kHz, bmdAudioSampleType32bitInteger, out_channel_layout_.num_channels, bmdAudioOutputStreamTimestamped)))
CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(u8(print()) + " Could not enable audio output."));
if(FAILED(output_->SetAudioCallback(this)))
{
graph_->set_tag("late-frame");
video_scheduled_ += format_desc_.duration;
- audio_scheduled_ += dframe->audio_data().size() / format_desc_.audio_channels;
+ audio_scheduled_ += dframe->audio_data().size() / out_channel_layout_.num_channels;
//++video_scheduled_;
//audio_scheduled_ += format_desc_.audio_cadence[0];
//++audio_scheduled_;
}
else
{
- schedule_next_audio(core::audio_buffer(format_desc_.audio_cadence[preroll % format_desc_.audio_cadence.size()] * format_desc_.audio_channels, 0));
+ schedule_next_audio(core::mutable_audio_buffer(format_desc_.audio_cadence[preroll % format_desc_.audio_cadence.size()] * out_channel_layout_.num_channels, 0));
}
}
else
while(audio_frame_buffer_.try_pop(frame))
{
send_completion_.try_completion();
- schedule_next_audio(frame.audio_data());
+ schedule_next_audio(channel_remapper_.mix_and_rearrange(frame.audio_data()));
}
}
UINT32 buffered;
output_->GetBufferedAudioSampleFrameCount(&buffered);
- graph_->set_value("buffered-audio", static_cast<double>(buffered) / (format_desc_.audio_cadence[0] * format_desc_.audio_channels * 2));
+ graph_->set_value("buffered-audio", static_cast<double>(buffered) / (format_desc_.audio_cadence[0] * config_.buffer_depth()));
}
catch(...)
{
template<typename T>
void schedule_next_audio(const T& audio_data)
{
- auto sample_frame_count = static_cast<int>(audio_data.size()/format_desc_.audio_channels);
+ auto sample_frame_count = static_cast<int>(audio_data.size()/out_channel_layout_.num_channels);
audio_container_.push_back(std::vector<int32_t>(audio_data.begin(), audio_data.end()));
// frame_consumer
- void initialize(const core::video_format_desc& format_desc, int channel_index) override
+ void initialize(const core::video_format_desc& format_desc, const core::audio_channel_layout& channel_layout, int channel_index) override
{
format_desc_ = format_desc;
executor_.invoke([=]
{
consumer_.reset();
- consumer_.reset(new decklink_consumer(config_, format_desc, channel_index));
+ consumer_.reset(new decklink_consumer(config_, format_desc, channel_layout, channel_index));
});
}
L"{[keyer:INTERNAL_KEY,EXTERNAL_KEY,EXTERNAL_SEPARATE_DEVICE_KEY]} "
L"{[low_latency:LOW_LATENCY]} "
L"{[embedded_audio:EMBEDDED_AUDIO]} "
- L"{[key_only:KEY_ONLY]}");
+ L"{[key_only:KEY_ONLY]} "
+ L"{CHANNEL_LAYOUT [channel_layout:string]}");
sink.para()->text(L"Sends video on an SDI output using Blackmagic Decklink video cards.");
sink.definitions()
->item(L"device_index", L"The Blackmagic video card to use (See Blackmagic control panel for card order). Default is 1.")
->item(L"key_only",
L" will extract only the alpha channel from the "
L"channel. This is useful when you have two SDI video cards, and neither has native support "
- L"for separate fill/key output");
+ L"for separate fill/key output")
+ ->item(L"channel_layout", L"If specified, overrides the audio channel layout used by the channel.");
sink.para()->text(L"Examples:");
sink.example(L">> ADD 1 DECKLINK", L"for using the default device_index of 1.");
sink.example(L">> ADD 1 DECKLINK 2", L"uses device_index 2.");
config.embedded_audio = contains_param(L"EMBEDDED_AUDIO", params);
config.key_only = contains_param(L"KEY_ONLY", params);
+ auto channel_layout = get_param(L"CHANNEL_LAYOUT", params);
+
+ if (!channel_layout.empty())
+ {
+ auto found_layout = core::audio_channel_layout_repository::get_default()->get_layout(channel_layout);
+
+ if (!found_layout)
+ CASPAR_THROW_EXCEPTION(file_not_found() << msg_info(L"Channel layout " + channel_layout + L" not found."));
+
+ config.out_channel_layout = *found_layout;
+ }
+
return spl::make_shared<decklink_consumer_proxy>(config);
}
else if(latency == L"normal")
config.latency = configuration::latency_t::normal_latency;
+ auto channel_layout = ptree.get_optional<std::wstring>(L"channel-layout");
+
+ if (channel_layout)
+ {
+ auto found_layout = core::audio_channel_layout_repository::get_default()->get_layout(*channel_layout);
+
+ if (!found_layout)
+ CASPAR_THROW_EXCEPTION(file_not_found() << msg_info(L"Channel layout " + *channel_layout + L" not found."));
+
+ config.out_channel_layout = *found_layout;
+ }
+
config.key_only = ptree.get(L"key-only", config.key_only);
config.device_index = ptree.get(L"device", config.device_index);
config.key_device_idx = ptree.get(L"key-device", config.key_device_idx);
#include <common/param.h>
#include <common/timer.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/frame/frame.h>
#include <core/frame/draw_frame.h>
#include <core/frame/frame_transform.h>
#include <functional>
namespace caspar { namespace decklink {
+
+core::audio_channel_layout get_adjusted_channel_layout(core::audio_channel_layout layout)
+{
+ if (layout.num_channels <= 2)
+ layout.num_channels = 2;
+ else if (layout.num_channels <= 8)
+ layout.num_channels = 8;
+ else
+ layout.num_channels = 16;
+
+ return layout;
+}
class decklink_producer : boost::noncopyable, public IDeckLinkInputCallback
{
std::vector<int> audio_cadence_ = out_format_desc_.audio_cadence;
boost::circular_buffer<size_t> sync_buffer_ { audio_cadence_.size() };
spl::shared_ptr<core::frame_factory> frame_factory_;
- ffmpeg::frame_muxer muxer_ { in_format_desc_.fps, frame_factory_, out_format_desc_, filter_ };
+ core::audio_channel_layout channel_layout_;
+ ffmpeg::frame_muxer muxer_ { in_format_desc_.fps, frame_factory_, out_format_desc_, channel_layout_, filter_ };
core::constraints constraints_ { in_format_desc_.width, in_format_desc_.height };
tbb::concurrent_bounded_queue<core::draw_frame> frame_buffer_;
- std::exception_ptr exception_;
+ std::exception_ptr exception_;
public:
decklink_producer(
const core::video_format_desc& in_format_desc,
int device_index,
const spl::shared_ptr<core::frame_factory>& frame_factory,
- const core::video_format_desc& out_format_desc,
+ const core::video_format_desc& out_format_desc,
+ const core::audio_channel_layout& channel_layout,
const std::wstring& filter)
: device_index_(device_index)
, filter_(filter)
, in_format_desc_(in_format_desc)
, out_format_desc_(out_format_desc)
, frame_factory_(frame_factory)
- {
+ , channel_layout_(get_adjusted_channel_layout(channel_layout))
+ {
frame_buffer_.set_capacity(2);
- graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
+ graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f));
graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));
graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));
<< msg_info(print() + L" Could not enable video input.")
<< boost::errinfo_api_function("EnableVideoInput"));
- if(FAILED(input_->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType32bitInteger, static_cast<int>(in_format_desc.audio_channels))))
- CASPAR_THROW_EXCEPTION(caspar_exception()
+ if(FAILED(input_->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType32bitInteger, static_cast<int>(channel_layout_.num_channels))))
+ CASPAR_THROW_EXCEPTION(caspar_exception()
<< msg_info(print() + L" Could not enable audio input.")
<< boost::errinfo_api_function("EnableAudioInput"));
auto audio_frame = ffmpeg::create_frame();
audio_frame->data[0] = reinterpret_cast<uint8_t*>(audio_bytes);
- audio_frame->linesize[0] = audio->GetSampleFrameCount()*out_format_desc_.audio_channels*sizeof(int32_t);
+ audio_frame->linesize[0] = audio->GetSampleFrameCount() * channel_layout_.num_channels * sizeof(int32_t);
audio_frame->nb_samples = audio->GetSampleFrameCount();
audio_frame->format = AV_SAMPLE_FMT_S32;
const uint32_t length_;
executor executor_;
public:
- explicit decklink_producer_proxy(const core::video_format_desc& in_format_desc,
- const spl::shared_ptr<core::frame_factory>& frame_factory,
- const core::video_format_desc& out_format_desc,
- int device_index,
- const std::wstring& filter_str, uint32_t length)
+ explicit decklink_producer_proxy(
+ const core::video_format_desc& in_format_desc,
+ const spl::shared_ptr<core::frame_factory>& frame_factory,
+ const core::video_format_desc& out_format_desc,
+ const core::audio_channel_layout& channel_layout,
+ int device_index,
+ const std::wstring& filter_str,
+ uint32_t length)
: executor_(L"decklink_producer[" + boost::lexical_cast<std::wstring>(device_index) + L"]")
, length_(length)
{
executor_.invoke([=]
{
com_initialize();
- producer_.reset(new decklink_producer(in_format_desc, device_index, frame_factory, out_format_desc, filter_str));
+ producer_.reset(new decklink_producer(in_format_desc, device_index, frame_factory, out_format_desc, channel_layout, filter_str));
});
}
void describe_producer(core::help_sink& sink, const core::help_repository& repo)
{
sink.short_description(L"Allows video sources to be input from BlackMagic Design cards.");
- sink.syntax(L"DECKLINK [device:int],DEVICE [device:int] {FILTER [filter:string]} {LENGTH [length:int]} {FORMAT [format:string]}");
+ sink.syntax(L"DECKLINK [device:int],DEVICE [device:int] {FILTER [filter:string]} {LENGTH [length:int]} {FORMAT [format:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
sink.para()->text(L"Allows video sources to be input from BlackMagic Design cards. Parameters:");
sink.definitions()
->item(L"device", L"The decklink device to stream the input from. See the Blackmagic control panel for the order of devices in your system.")
->item(L"filter", L"If specified, sets an FFmpeg video filter to use.")
->item(L"length", L"Optionally specify a limit on how many frames to produce.")
- ->item(L"format", L"Specifies what video format to expect on the incoming SDI/HDMI signal. If not specified the video format of the channel is assumed.");
+ ->item(L"format", L"Specifies what video format to expect on the incoming SDI/HDMI signal. If not specified the video format of the channel is assumed.")
+ ->item(L"channel_layout", L"Specifies what audio channel layout to expect on the incoming SDI/HDMI signal. If not specified, stereo is assumed.");
sink.para()->text(L"Examples:");
sink.example(L">> PLAY 1-10 DECKLINK DEVICE 2", L"Play using decklink device 2 expecting the video signal to have the same video format as the channel.");
sink.example(L">> PLAY 1-10 DECKLINK DEVICE 2 FORMAT PAL FILTER yadif=1:-1", L"Play using decklink device 2 expecting the video signal to be in PAL and deinterlace it.");
sink.example(L">> PLAY 1-10 DECKLINK DEVICE 2 LENGTH 1000", L"Play using decklink device 2 but only produce 1000 frames.");
+ sink.example(L">> PLAY 1-10 DECKLINK DEVICE 2 CHANNEL_LAYOUT smpte", L"Play using decklink device 2 and expect smpte surround sound.");
}
spl::shared_ptr<core::frame_producer> create_producer(const core::frame_producer_dependencies& dependencies, const std::vector<std::wstring>& params)
if(in_format_desc.format == core::video_format::invalid)
in_format_desc = dependencies.format_desc;
+
+ auto channel_layout_spec = get_param(L"CHANNEL_LAYOUT", params);
+ auto channel_layout = *core::audio_channel_layout_repository::get_default()->get_layout(L"stereo");
+
+ if (!channel_layout_spec.empty())
+ {
+ auto found_layout = core::audio_channel_layout_repository::get_default()->get_layout(channel_layout_spec);
+
+ if (!found_layout)
+ CASPAR_THROW_EXCEPTION(file_not_found() << msg_info(L"Channel layout not found."));
+
+ channel_layout = *found_layout;
+ }
- return create_destroy_proxy(spl::make_shared<decklink_producer_proxy>(in_format_desc, dependencies.frame_factory, dependencies.format_desc, device_index, filter_str, length));
+ return create_destroy_proxy(spl::make_shared<decklink_producer_proxy>(
+ in_format_desc,
+ dependencies.frame_factory,
+ dependencies.format_desc,
+ channel_layout,
+ device_index,
+ filter_str,
+ length));
}
}}
producer/audio/audio_decoder.cpp
+ producer/filter/audio_filter.cpp
producer/filter/filter.cpp
producer/input/input.cpp
producer/ffmpeg_producer.cpp
producer/tbb_avcodec.cpp
+ audio_channel_remapper.cpp
ffmpeg.cpp
ffmpeg_error.cpp
StdAfx.cpp
producer/audio/audio_decoder.h
+ producer/filter/audio_filter.h
producer/filter/filter.h
producer/input/input.h
--- /dev/null
+/*
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Helge Norberg, helge.norberg@svt.se
+*/
+
+#include "StdAfx.h"
+
+#include <core/frame/audio_channel_layout.h>
+
+#include <common/except.h>
+#include <common/assert.h>
+
+#include "producer/filter/audio_filter.h"
+#include "producer/util/util.h"
+
+#include <asmlib.h>
+
+#include <boost/algorithm/string/split.hpp>
+#include <boost/format.hpp>
+
+#include <cstdint>
+#include <sstream>
+
+#if defined(_MSC_VER)
+#pragma warning (push)
+#pragma warning (disable : 4244)
+#endif
+extern "C"
+{
+#include <libavfilter/avfilter.h>
+#include <libavutil/channel_layout.h>
+}
+#if defined(_MSC_VER)
+#pragma warning (pop)
+#endif
+
+namespace caspar { namespace core {
+
+std::wstring generate_pan_filter_str(
+ const audio_channel_layout& input,
+ const audio_channel_layout& output,
+ boost::optional<std::wstring> mix_config)
+{
+ std::wstringstream result;
+
+ result << L"pan=" << (boost::wformat(L"0x%|1$x|") % ffmpeg::create_channel_layout_bitmask(output.num_channels)).str();
+
+ if (!mix_config)
+ {
+ if (input.type == output.type && !input.channel_order.empty() && !input.channel_order.empty())
+ { // No config needed because the layouts are of the same type. Generate mix config string.
+ std::vector<std::wstring> mappings;
+
+ for (auto& input_name : input.channel_order)
+ mappings.push_back(input_name + L"=" + input_name);
+
+ mix_config = boost::join(mappings, L"|");
+ }
+ else
+ { // Fallback to passthru c0=c0| c1=c1 | ...
+ for (int i = 0; i < output.num_channels; ++i)
+ result << L"|c" << i << L"=c" << i;
+
+ CASPAR_LOG(debug) << "[audio_channel_remapper] Passthru " << input.num_channels << " channels into " << output.num_channels;
+
+ return result.str();
+ }
+ }
+
+ CASPAR_LOG(debug) << L"[audio_channel_remapper] Using mix config: " << *mix_config;
+
+ // Split on | to find the output sections
+ std::vector<std::wstring> output_sections;
+ boost::split(output_sections, *mix_config, boost::is_any_of(L"|"), boost::algorithm::token_compress_off);
+
+ for (auto& output_section : output_sections)
+ {
+ bool normalize_ratios = boost::contains(output_section, L"<");
+ std::wstring mix_char = normalize_ratios ? L"<" : L"=";
+
+ // Split on either = or < to get the output name and mix spec
+ std::vector<std::wstring> output_and_spec;
+ boost::split(output_and_spec, output_section, boost::is_any_of(mix_char), boost::algorithm::token_compress_off);
+ auto& mix_spec = output_and_spec.at(1);
+
+ // Replace each occurance of each channel name with c<index>
+ for (int i = 0; i < input.channel_order.size(); ++i)
+ boost::replace_all(mix_spec, input.channel_order.at(i), L"c" + boost::lexical_cast<std::wstring>(i));
+
+ auto output_name = boost::trim_copy(output_and_spec.at(0));
+ auto actual_output_indexes = output.indexes_of(output_name);
+
+ for (auto actual_output_index : actual_output_indexes)
+ {
+ result << L"|c" << actual_output_index << L" " << mix_char;
+ result << mix_spec;
+ }
+ }
+
+ return result.str();
+}
+
+struct audio_channel_remapper::impl
+{
+ const audio_channel_layout input_layout_;
+ const audio_channel_layout output_layout_;
+ const bool the_same_layouts_ = input_layout_ == output_layout_;
+ std::unique_ptr<ffmpeg::audio_filter> filter_;
+
+ impl(
+ audio_channel_layout input_layout,
+ audio_channel_layout output_layout,
+ spl::shared_ptr<audio_mix_config_repository> mix_repo)
+ : input_layout_(std::move(input_layout))
+ , output_layout_(std::move(output_layout))
+ {
+ if (input_layout_ == audio_channel_layout::invalid())
+ CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"Input audio channel layout is invalid"));
+
+ if (output_layout_ == audio_channel_layout::invalid())
+ CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"Output audio channel layout is invalid"));
+
+ CASPAR_LOG(debug) << L"[audio_channel_remapper] Input: " << input_layout_.print();
+ CASPAR_LOG(debug) << L"[audio_channel_remapper] Output: " << output_layout_.print();
+
+ if (!the_same_layouts_)
+ {
+ auto mix_config = mix_repo->get_config(input_layout_.type, output_layout_.type);
+ auto pan_filter = u8(generate_pan_filter_str(input_layout_, output_layout_, mix_config));
+
+ CASPAR_LOG(debug) << "[audio_channel_remapper] Using audio filter: " << pan_filter;
+ filter_.reset(new ffmpeg::audio_filter(
+ boost::rational<int>(1, 1),
+ 48000,
+ AV_SAMPLE_FMT_S32,
+ ffmpeg::create_channel_layout_bitmask(input_layout_.num_channels),
+ { 48000 },
+ { AV_SAMPLE_FMT_S32 },
+ { ffmpeg::create_channel_layout_bitmask(output_layout_.num_channels) },
+ pan_filter));
+ }
+ else
+ CASPAR_LOG(debug) << "[audio_channel_remapper] No remapping/mixing needed because the input and output layout is equal.";
+ }
+
+ audio_buffer mix_and_rearrange(audio_buffer input)
+ {
+ CASPAR_ENSURE(input.size() % input_layout_.num_channels == 0);
+
+ if (the_same_layouts_)
+ return std::move(input);
+
+ auto num_samples = input.size() / input_layout_.num_channels;
+ auto expected_output_size = num_samples * output_layout_.num_channels;
+ auto input_frame = std::shared_ptr<AVFrame>(av_frame_alloc(), [](AVFrame* p)
+ {
+ if (p)
+ av_frame_free(&p);
+ });
+
+ input_frame->channels = input_layout_.num_channels;
+ input_frame->channel_layout = ffmpeg::create_channel_layout_bitmask(input_layout_.num_channels);
+ input_frame->sample_rate = 48000;
+ input_frame->nb_samples = static_cast<int>(num_samples);
+ input_frame->format = AV_SAMPLE_FMT_S32;
+ input_frame->pts = 0;
+
+ av_samples_fill_arrays(
+ input_frame->extended_data,
+ input_frame->linesize,
+ reinterpret_cast<const std::uint8_t*>(input.data()),
+ input_frame->channels,
+ input_frame->nb_samples,
+ static_cast<AVSampleFormat>(input_frame->format),
+ 16);
+
+ filter_->push(input_frame);
+
+ auto frames = filter_->poll_all();
+
+ CASPAR_ENSURE(frames.size() == 1); // Expect 1:1 from pan filter
+
+ auto& frame = frames.front();
+ auto output_size = frame->channels * frame->nb_samples;
+
+ CASPAR_ENSURE(output_size == expected_output_size);
+
+ return audio_buffer(
+ reinterpret_cast<std::int32_t*>(frame->extended_data[0]),
+ output_size,
+ true,
+ std::move(frame));
+ }
+};
+
+audio_channel_remapper::audio_channel_remapper(
+ audio_channel_layout input_layout,
+ audio_channel_layout output_layout,
+ spl::shared_ptr<audio_mix_config_repository> mix_repo)
+ : impl_(new impl(std::move(input_layout), std::move(output_layout), std::move(mix_repo)))
+{
+}
+
+audio_buffer audio_channel_remapper::mix_and_rearrange(audio_buffer input)
+{
+ return impl_->mix_and_rearrange(std::move(input));
+}
+
+}}
#include "../producer/tbb_avcodec.h"
#include <core/frame/frame.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/mixer/audio/audio_util.h>
#include <core/consumer/frame_consumer.h>
#include <core/video_format.h>
struct ffmpeg_consumer : boost::noncopyable
{
const spl::shared_ptr<diagnostics::graph> graph_;
- const std::string filename_;
+ const std::string filename_;
+ const std::string full_filename_ = u8(env::media_folder()) + filename_;
const std::shared_ptr<AVFormatContext> oc_ { avformat_alloc_context(), avformat_free_context };
- const core::video_format_desc format_desc_;
+ const core::video_format_desc format_desc_;
+ const core::audio_channel_layout channel_layout_;
core::monitor::subject monitor_subject_;
executor executor_;
public:
- ffmpeg_consumer(const std::string& filename, const core::video_format_desc& format_desc, std::vector<option> options, bool key_only)
+ ffmpeg_consumer(
+ const std::string& filename,
+ const core::video_format_desc& format_desc,
+ const core::audio_channel_layout& channel_layout,
+ std::vector<option> options,
+ bool key_only)
: filename_(filename)
, format_desc_(format_desc)
- , output_format_(format_desc, filename, options)
+ , channel_layout_(channel_layout)
+ , output_format_(format_desc, full_filename_, options)
, key_only_(key_only)
, executor_(print())
{
check_space();
// TODO: Ask stakeholders about case where file already exists.
- boost::filesystem::remove(boost::filesystem::path(env::media_folder() + u16(filename))); // Delete the file if it exists
+ boost::filesystem::remove(boost::filesystem::path(full_filename_)); // Delete the file if it exists
graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));
graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));
oc_->oformat = output_format_.format;
- std::strcpy(oc_->filename, filename_.c_str());
+ std::strcpy(oc_->filename, full_filename_.c_str());
// Add the audio and video streams using the default format codecs and initialize the codecs.
video_st_ = add_video_stream(options);
if (!key_only)
audio_st_ = add_audio_stream(options);
- av_dump_format(oc_.get(), 0, filename_.c_str(), 1);
+ av_dump_format(oc_.get(), 0, full_filename_.c_str(), 1);
// Open the output ffmpeg, if needed.
if (!(oc_->oformat->flags & AVFMT_NOFILE))
- THROW_ON_ERROR2(avio_open(&oc_->pb, filename.c_str(), AVIO_FLAG_WRITE), "[ffmpeg_consumer]");
+ THROW_ON_ERROR2(avio_open(&oc_->pb, full_filename_.c_str(), AVIO_FLAG_WRITE), "[ffmpeg_consumer]");
THROW_ON_ERROR2(avformat_write_header(oc_.get(), nullptr), "[ffmpeg_consumer]");
av_frame->top_field_first = format_desc_.field_mode == core::field_mode::upper;
av_frame->pts = frame_number_++;
- monitor_subject_ << core::monitor::message("/frame")
- % static_cast<int64_t>(frame_number_)
- % static_cast<int64_t>(std::numeric_limits<int64_t>::max());
+ monitor_subject_
+ << core::monitor::message("/frame") % static_cast<int64_t>(frame_number_)
+ << core::monitor::message("/path") % filename_
+ << core::monitor::message("/fps") % format_desc_.fps;
AVPacket pkt;
av_init_packet(&pkt);
{
swr_ = std::shared_ptr<SwrContext>(swr_alloc_set_opts(nullptr,
get_channel_layout(c), c->sample_fmt, c->sample_rate,
- av_get_default_channel_layout(format_desc_.audio_channels), AV_SAMPLE_FMT_S32, format_desc_.audio_sample_rate,
+ av_get_default_channel_layout(channel_layout_.num_channels), AV_SAMPLE_FMT_S32, format_desc_.audio_sample_rate,
0, nullptr), [](SwrContext* p){swr_free(&p);});
if(!swr_)
auto channel_samples = swr_convert(swr_.get(),
out, static_cast<int>(buffer.size()) / c->channels / av_get_bytes_per_sample(c->sample_fmt),
- in, static_cast<int>(frame.audio_data().size()/format_desc_.audio_channels));
+ in, static_cast<int>(frame.audio_data().size()/channel_layout_.num_channels));
buffer.resize(channel_samples * c->channels * av_get_bytes_per_sample(c->sample_fmt));
void check_space()
{
- auto space = boost::filesystem::space(boost::filesystem::path(filename_).parent_path());
+ auto space = boost::filesystem::space(boost::filesystem::path(full_filename_).parent_path());
if(space.available < 512*1000000)
CASPAR_THROW_EXCEPTION(file_write_error() << msg_info("out of space"));
}
{
}
- void initialize(const core::video_format_desc& format_desc, int) override
+ void initialize(const core::video_format_desc& format_desc, const core::audio_channel_layout& channel_layout, int) override
{
if(consumer_)
CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info("Cannot reinitialize ffmpeg-consumer."));
- consumer_.reset(new ffmpeg_consumer(u8(filename_), format_desc, options_, false));
+ consumer_.reset(new ffmpeg_consumer(u8(filename_), format_desc, channel_layout, options_, false));
if (separate_key_)
{
boost::filesystem::path fill_file(filename_);
auto without_extension = u16(fill_file.stem().string());
- auto key_file = env::media_folder() + without_extension + L"_A" + u16(fill_file.extension().string());
+ auto key_file = without_extension + L"_A" + u16(fill_file.extension().string());
- key_only_consumer_.reset(new ffmpeg_consumer(u8(key_file), format_desc, options_, true));
+ key_only_consumer_.reset(new ffmpeg_consumer(u8(key_file), format_desc, channel_layout, options_, true));
}
}
sink.example(L">> ADD 1 FILE output.mov -vcodec dnxhd");
sink.example(L">> ADD 1 FILE output.mov -vcodec prores");
sink.example(L">> ADD 1 FILE output.mov -vcodec dvvideo");
- sink.example(L">> ADD 1 FILE output.mov - vcodec libx264 -preset ultrafast -tune fastdecode -crf 25");
+ sink.example(L">> ADD 1 FILE output.mov -vcodec libx264 -preset ultrafast -tune fastdecode -crf 25");
sink.example(L">> ADD 1 FILE output.mov -vcodec dnxhd SEPARATE_KEY", L"for creating output.mov with fill and output_A.mov with key/alpha");
}
options.push_back(option(name, value));
}
- return spl::make_shared<ffmpeg_consumer_proxy>(env::media_folder() + path["PATH"].str(), options, separate_key);
+ return spl::make_shared<ffmpeg_consumer_proxy>(path["PATH"].str(), options, separate_key);
}
spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(
std::vector<option> options;
options.push_back(option("vcodec", u8(codec)));
- return spl::make_shared<ffmpeg_consumer_proxy>(env::media_folder() + filename, options, separate_key);
+ return spl::make_shared<ffmpeg_consumer_proxy>(filename, options, separate_key);
}
}}
#include <core/consumer/frame_consumer.h>
#include <core/frame/frame.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/video_format.h>
#include <core/monitor/monitor.h>
#include <core/help/help_repository.h>
bool compatibility_mode_;
core::video_format_desc in_video_format_;
+ core::audio_channel_layout in_channel_layout_ = core::audio_channel_layout::invalid();
std::shared_ptr<AVFormatContext> oc_;
tbb::atomic<bool> abort_request_;
void initialize(
const core::video_format_desc& format_desc,
+ const core::audio_channel_layout& channel_layout,
int channel_index) override
{
try
CASPAR_VERIFY(format_desc.format != core::video_format::invalid);
in_video_format_ = format_desc;
+ in_channel_layout_ = channel_layout;
CASPAR_VERIFY(oc_->oformat);
const auto asrc_options = (boost::format("sample_rate=%1%:sample_fmt=%2%:channels=%3%:time_base=%4%/%5%:channel_layout=%6%")
% in_video_format_.audio_sample_rate
% av_get_sample_fmt_name(AV_SAMPLE_FMT_S32)
- % in_video_format_.audio_channels
+ % in_channel_layout_.num_channels
% 1 % in_video_format_.audio_sample_rate
% boost::io::group(
std::hex,
std::showbase,
- av_get_default_channel_layout(in_video_format_.audio_channels))).str();
+ av_get_default_channel_layout(in_channel_layout_.num_channels))).str();
AVFilterContext* filt_asrc = nullptr;
FF(avfilter_graph_create_filter(
av_frame_free(&p);
});
- src_av_frame->channels = in_video_format_.audio_channels;
- src_av_frame->channel_layout = av_get_default_channel_layout(in_video_format_.audio_channels);
+ src_av_frame->channels = in_channel_layout_.num_channels;
+ src_av_frame->channel_layout = av_get_default_channel_layout(in_channel_layout_.num_channels);
src_av_frame->sample_rate = in_video_format_.audio_sample_rate;
src_av_frame->nb_samples = static_cast<int>(frame_ptr.audio_data().size()) / src_av_frame->channels;
src_av_frame->format = AV_SAMPLE_FMT_S32;
#include "../../ffmpeg_error.h"
#include <core/video_format.h>
+#include <core/frame/audio_channel_layout.h>
#include <common/log.h>
#include <common/cache_aligned_vector.h>
namespace caspar { namespace ffmpeg {
-uint64_t get_channel_layout(AVCodecContext* dec)
+uint64_t get_ffmpeg_channel_layout(AVCodecContext* dec)
{
auto layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
return layout;
struct audio_decoder::impl : boost::noncopyable
{
core::monitor::subject monitor_subject_;
- input* input_;
+ input& input_;
int index_;
const core::video_format_desc format_desc_;
- const spl::shared_ptr<AVCodecContext> codec_context_ = open_codec(input_->context(), AVMEDIA_TYPE_AUDIO, index_);
+ const spl::shared_ptr<AVCodecContext> codec_context_ = open_codec(input_.context(), AVMEDIA_TYPE_AUDIO, index_);
std::shared_ptr<SwrContext> swr_ {
swr_alloc_set_opts(
nullptr,
- av_get_default_channel_layout(format_desc_.audio_channels),
+ create_channel_layout_bitmask(codec_context_->channels),//get_ffmpeg_channel_layout(codec_context_.get()),
AV_SAMPLE_FMT_S32,
format_desc_.audio_sample_rate,
- get_channel_layout(codec_context_.get()),
+ create_channel_layout_bitmask(codec_context_->channels),//get_ffmpeg_channel_layout(codec_context_.get()),
codec_context_->sample_fmt,
codec_context_->sample_rate,
0,
};
cache_aligned_vector<uint8_t> buffer_;
+ core::audio_channel_layout channel_layout_;
std::shared_ptr<AVPacket> current_packet_;
public:
- explicit impl(input& in, const core::video_format_desc& format_desc)
- : input_(&in)
- , format_desc_(format_desc)
- , codec_context_(open_codec(input_->context(), AVMEDIA_TYPE_AUDIO, index_))
+ explicit impl(input& in, const core::video_format_desc& format_desc, const std::wstring& channel_layout_spec)
+ : input_(in)
+ , format_desc_(format_desc)
, buffer_(480000 * 4)
- {
+ , channel_layout_(get_audio_channel_layout(*codec_context_, channel_layout_spec))
+ {
if(!swr_)
CASPAR_THROW_EXCEPTION(bad_alloc());
std::shared_ptr<AVFrame> poll()
{
- if(!current_packet_ && !input_->try_pop_audio(current_packet_))
+ if(!current_packet_ && !input_.try_pop_audio(current_packet_))
return nullptr;
std::shared_ptr<AVFrame> audio;
current_packet_.reset();
}
- return audio ? audio : poll();
+ return audio;
}
std::shared_ptr<AVFrame> decode(AVPacket& pkt)
uint8_t* out[] = {buffer_.data()};
auto channel_samples = swr_convert(swr_.get(),
- out, static_cast<int>(buffer_.size()) / format_desc_.audio_channels / av_get_bytes_per_sample(AV_SAMPLE_FMT_S32),
- in, frame->nb_samples);
+ out, static_cast<int>(buffer_.size()) / codec_context_->channels / av_get_bytes_per_sample(AV_SAMPLE_FMT_S32),
+ in, frame->nb_samples);
frame->data[0] = buffer_.data();
- frame->linesize[0] = channel_samples * format_desc_.audio_channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S32);
+ frame->linesize[0] = channel_samples * codec_context_->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S32);
frame->nb_samples = channel_samples;
frame->format = AV_SAMPLE_FMT_S32;
-
+
monitor_subject_ << core::monitor::message("/file/audio/sample-rate") % codec_context_->sample_rate
<< core::monitor::message("/file/audio/channels") % codec_context_->channels
<< core::monitor::message("/file/audio/format") % u8(av_get_sample_fmt_name(codec_context_->sample_fmt))
}
};
-audio_decoder::audio_decoder(input& input, const core::video_format_desc& format_desc) : impl_(new impl(input, format_desc)){}
+audio_decoder::audio_decoder(input& input, const core::video_format_desc& format_desc, const std::wstring& channel_layout_spec) : impl_(new impl(input, format_desc, channel_layout_spec)){}
audio_decoder::audio_decoder(audio_decoder&& other) : impl_(std::move(other.impl_)){}
audio_decoder& audio_decoder::operator=(audio_decoder&& other){impl_ = std::move(other.impl_); return *this;}
std::shared_ptr<AVFrame> audio_decoder::operator()(){return impl_->poll();}
uint32_t audio_decoder::nb_frames() const{return impl_->nb_frames();}
+const core::audio_channel_layout& audio_decoder::channel_layout() const { return impl_->channel_layout_; }
std::wstring audio_decoder::print() const{return impl_->print();}
core::monitor::subject& audio_decoder::monitor_output() { return impl_->monitor_subject_;}
+
}}
#include <common/memory.h>
+#include <core/fwd.h>
+
#include <boost/noncopyable.hpp>
struct AVPacket;
class audio_decoder : public boost::noncopyable
{
public:
- explicit audio_decoder(class input& input, const core::video_format_desc& format_desc);
+ explicit audio_decoder(class input& input, const core::video_format_desc& format_desc, const std::wstring& channel_layout_spec);
audio_decoder(audio_decoder&& other);
audio_decoder& operator=(audio_decoder&& other);
std::shared_ptr<AVFrame> operator()();
uint32_t nb_frames() const;
+ const core::audio_channel_layout& channel_layout() const;
std::wstring print() const;
#include <core/video_format.h>
#include <core/producer/frame_producer.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/frame/frame_factory.h>
#include <core/frame/draw_frame.h>
#include <core/frame/frame_transform.h>
std::unique_ptr<video_decoder> video_decoder_;
std::unique_ptr<audio_decoder> audio_decoder_;
- frame_muxer muxer_;
+ std::unique_ptr<frame_muxer> muxer_;
core::constraints constraints_;
core::draw_frame last_frame_ = core::draw_frame::empty();
boost::optional<uint32_t> seek_target_;
public:
- explicit ffmpeg_producer(const spl::shared_ptr<core::frame_factory>& frame_factory,
- const core::video_format_desc& format_desc,
- const std::wstring& filename,
- const std::wstring& filter,
- bool loop,
- uint32_t start,
- uint32_t length)
+ explicit ffmpeg_producer(
+ const spl::shared_ptr<core::frame_factory>& frame_factory,
+ const core::video_format_desc& format_desc,
+ const std::wstring& channel_layout_spec,
+ const std::wstring& filename,
+ const std::wstring& filter,
+ bool loop,
+ uint32_t start,
+ uint32_t length)
: filename_(filename)
, frame_factory_(frame_factory)
, format_desc_(format_desc)
, input_(graph_, filename_, loop, start, length)
, fps_(read_fps(input_.context(), format_desc_.fps))
- , muxer_(fps_, frame_factory, format_desc_, filter)
, start_(start)
{
graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));
CASPAR_LOG(warning) << print() << "Failed to open video-stream. Running without video.";
}
+ auto channel_layout = core::audio_channel_layout::invalid();
+
try
{
- audio_decoder_ .reset(new audio_decoder(input_, format_desc_));
+ audio_decoder_ .reset(new audio_decoder(input_, format_desc_, channel_layout_spec));
audio_decoder_->monitor_output().attach_parent(monitor_subject_);
+
+ channel_layout = audio_decoder_->channel_layout();
CASPAR_LOG(info) << print() << L" " << audio_decoder_->print();
}
{
CASPAR_LOG_CURRENT_EXCEPTION();
CASPAR_LOG(warning) << print() << " Failed to open audio-stream. Running without audio.";
- }
+ }
+
+ muxer_.reset(new frame_muxer(fps_, frame_factory, format_desc_, channel_layout, filter));
decode_next_frame();
decode_next_frame();
- if(!muxer_.empty())
+ if(!muxer_->empty())
{
- last_frame_ = frame = std::move(muxer_.front());
- muxer_.pop();
+ last_frame_ = frame = std::move(muxer_->front());
+ muxer_->pop();
}
- else
+ else
graph_->set_tag("underflow");
graph_->set_value("frame-time", frame_timer.elapsed()*format_desc_.fps*0.5);
*monitor_subject_
<< core::monitor::message("/profiler/time") % frame_timer.elapsed() % (1.0/format_desc_.fps);
*monitor_subject_
- << core::monitor::message("/file/time") % (file_frame_number()/fps_)
- % (file_nb_frames()/fps_)
<< core::monitor::message("/file/frame") % static_cast<int32_t>(file_frame_number())
% static_cast<int32_t>(file_nb_frames())
<< core::monitor::message("/file/fps") % fps_
uint32_t nb_frames = file_nb_frames();
nb_frames = std::min(input_.length(), nb_frames);
- nb_frames = muxer_.calc_nb_frames(nb_frames);
+ nb_frames = muxer_->calc_nb_frames(nb_frames);
return nb_frames > start_ ? nb_frames - start_ : 0;
}
for(int n = 0; n < 8 && (last_frame_ == core::draw_frame::empty() || (seek_target_ && file_frame_number() != *seek_target_+2)); ++n)
{
decode_next_frame();
- if(!muxer_.empty())
+ if(!muxer_->empty())
{
- last_frame_ = muxer_.front();
+ last_frame_ = muxer_->front();
seek_target_.reset();
}
}
seek_target_ = std::min(target, file_nb_frames());
input_.seek(*seek_target_);
- muxer_.clear();
+ muxer_->clear();
}
std::wstring print_mode() const
void decode_next_frame()
{
- for(int n = 0; n < 8 && muxer_.empty(); ++n)
+ for(int n = 0; n < 32 && muxer_->empty(); ++n)
{
- if(!muxer_.video_ready())
- muxer_.push_video(video_decoder_ ? (*video_decoder_)() : create_frame());
- if(!muxer_.audio_ready())
- muxer_.push_audio(audio_decoder_ ? (*audio_decoder_)() : create_frame());
+ if(!muxer_->video_ready())
+ muxer_->push_video(video_decoder_ ? (*video_decoder_)() : create_frame());
+ if(!muxer_->audio_ready())
+ muxer_->push_audio(audio_decoder_ ? (*audio_decoder_)() : create_frame());
}
+
graph_->set_text(print());
}
};
void describe_producer(core::help_sink& sink, const core::help_repository& repo)
{
sink.short_description(L"A producer for playing media files supported by FFmpeg.");
- sink.syntax(L"[clip:string] {[loop:LOOP]} {START,SEEK [start:int]} {LENGTH [start:int]} {FILTER [filter:string]}");
+ sink.syntax(L"[clip:string] {[loop:LOOP]} {START,SEEK [start:int]} {LENGTH [start:int]} {FILTER [filter:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
sink.para()
->text(L"The FFmpeg Producer can play all media that FFmpeg can play, which includes many ")
->text(L"QuickTime video codec such as Animation, PNG, PhotoJPEG, MotionJPEG, as well as ")
->item(L"loop", L"Will cause the media file to loop between start and start + length")
->item(L"start", L"Optionally sets the start frame. 0 by default. If loop is specified this will be the frame where it starts over again.")
->item(L"length", L"Optionally sets the length of the clip. If not specified the clip will be played to the end. If loop is specified the file will jump to start position once this number of frames has been played.")
- ->item(L"filter", L"If specified, will be used as an FFmpeg video filter.");
+ ->item(L"filter", L"If specified, will be used as an FFmpeg video filter.")
+ ->item(L"channel_layout",
+ L"Optionally override the automatically deduced audio channel layout. "
+ L"Either a named layout as specified in casparcg.config or in the format [type:string]:[channel_order:string] for a custom layout.");
sink.para()->text(L"Examples:");
sink.example(L">> PLAY 1-10 folder/clip", L"to play all frames in a clip and stop at the last frame.");
sink.example(L">> PLAY 1-10 folder/clip LOOP", L"to loop a clip between the first frame and the last frame.");
sink.example(L">> PLAY 1-10 folder/clip LOOP START 10 LENGTH 50", L"to loop a clip between frame 10 and frame 60.");
sink.example(L">> PLAY 1-10 folder/clip START 10 LENGTH 50", L"to play frames 10-60 in a clip and stop.");
sink.example(L">> PLAY 1-10 folder/clip FILTER yadif=1,-1", L"to deinterlace the video.");
+ sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT dolbydigital", L"given the defaults in casparcg.config this will specifies that the clip has 6 audio channels of the type 5.1 and that they are in the order FL FC FR BL BR LFE regardless of what ffmpeg says.");
+ sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT \"5.1:LFE FL FC FR BL BR\"", L"specifies that the clip has 6 audio channels of the type 5.1 and that they are in the specified order regardless of what ffmpeg says.");
sink.para()->text(L"The FFmpeg producer also supports changing some of the settings via CALL:");
sink.example(L">> CALL 1-10 LOOP 1");
sink.example(L">> CALL 1-10 START 10");
if(filename.empty())
return core::frame_producer::empty();
- bool loop = contains_param(L"LOOP", params);
- auto start = get_param(L"START", params, get_param(L"SEEK", params, static_cast<uint32_t>(0)));
- auto length = get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max());
- auto filter_str = get_param(L"FILTER", params, L"");
-
- return create_destroy_proxy(spl::make_shared_ptr(std::make_shared<ffmpeg_producer>(dependencies.frame_factory, dependencies.format_desc, filename, filter_str, loop, start, length)));
+ bool loop = contains_param(L"LOOP", params);
+ auto start = get_param(L"START", params, get_param(L"SEEK", params, static_cast<uint32_t>(0)));
+ auto length = get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max());
+ auto filter_str = get_param(L"FILTER", params, L"");
+ auto channel_layout = get_param(L"CHANNEL_LAYOUT", params, L"");
+
+ return create_destroy_proxy(spl::make_shared_ptr(std::make_shared<ffmpeg_producer>(
+ dependencies.frame_factory,
+ dependencies.format_desc,
+ channel_layout,
+ filename,
+ filter_str,
+ loop,
+ start,
+ length)));
}
}}
--- /dev/null
+/*
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Robert Nagy, ronag89@gmail.com
+*/
+
+#include "../../StdAfx.h"
+
+#include "audio_filter.h"
+
+#include "../../ffmpeg_error.h"
+
+#include <common/assert.h>
+#include <common/except.h>
+
+#include <boost/algorithm/string.hpp>
+#include <boost/thread.hpp>
+#include <boost/format.hpp>
+#include <boost/rational.hpp>
+
+#include <cstdio>
+#include <sstream>
+#include <string>
+
+#if defined(_MSC_VER)
+#pragma warning (push)
+#pragma warning (disable : 4244)
+#endif
+extern "C"
+{
+ #include <libavutil/avutil.h>
+ #include <libavutil/imgutils.h>
+ #include <libavutil/opt.h>
+ #include <libavfilter/avfilter.h>
+ #include <libavfilter/avcodec.h>
+ #include <libavfilter/buffersink.h>
+ #include <libavfilter/buffersrc.h>
+}
+#if defined(_MSC_VER)
+#pragma warning (pop)
+#endif
+
+namespace caspar { namespace ffmpeg {
+
+struct audio_filter::implementation
+{
+ std::string filtergraph_;
+
+ std::shared_ptr<AVFilterGraph> audio_graph_;
+ AVFilterContext* audio_graph_in_;
+ AVFilterContext* audio_graph_out_;
+
+ implementation(
+ boost::rational<int> in_time_base,
+ int in_sample_rate,
+ AVSampleFormat in_sample_fmt,
+ std::int64_t in_audio_channel_layout,
+ std::vector<int> out_sample_rates,
+ std::vector<AVSampleFormat> out_sample_fmts,
+ std::vector<std::int64_t> out_audio_channel_layouts,
+ const std::string& filtergraph)
+ : filtergraph_(boost::to_lower_copy(filtergraph))
+ {
+ if (out_sample_rates.empty())
+ out_sample_rates.push_back(48000);
+
+ out_sample_rates.push_back(-1);
+
+ if (out_sample_fmts.empty())
+ out_sample_fmts.push_back(AV_SAMPLE_FMT_S32);
+
+ out_sample_fmts.push_back(AV_SAMPLE_FMT_NONE);
+
+ if (out_audio_channel_layouts.empty())
+ out_audio_channel_layouts.push_back(AV_CH_LAYOUT_NATIVE);
+
+ out_audio_channel_layouts.push_back(-1);
+
+ audio_graph_.reset(
+ avfilter_graph_alloc(),
+ [](AVFilterGraph* p)
+ {
+ avfilter_graph_free(&p);
+ });
+
+ const auto asrc_options = (boost::format("time_base=%1%/%2%:sample_rate=%3%:sample_fmt=%4%:channel_layout=0x%|5$x|")
+ % in_time_base.numerator() % in_time_base.denominator()
+ % in_sample_rate
+ % av_get_sample_fmt_name(in_sample_fmt)
+ % in_audio_channel_layout).str();
+
+ AVFilterContext* filt_asrc = nullptr;
+ FF(avfilter_graph_create_filter(
+ &filt_asrc,
+ avfilter_get_by_name("abuffer"),
+ "filter_buffer",
+ asrc_options.c_str(),
+ nullptr,
+ audio_graph_.get()));
+
+ AVFilterContext* filt_asink = nullptr;
+ FF(avfilter_graph_create_filter(
+ &filt_asink,
+ avfilter_get_by_name("abuffersink"),
+ "filter_buffersink",
+ nullptr,
+ nullptr,
+ audio_graph_.get()));
+
+#pragma warning (push)
+#pragma warning (disable : 4245)
+
+ FF(av_opt_set_int_list(
+ filt_asink,
+ "sample_fmts",
+ out_sample_fmts.data(),
+ -1,
+ AV_OPT_SEARCH_CHILDREN));
+ FF(av_opt_set_int_list(
+ filt_asink,
+ "channel_layouts",
+ out_audio_channel_layouts.data(),
+ -1,
+ AV_OPT_SEARCH_CHILDREN));
+ FF(av_opt_set_int_list(
+ filt_asink,
+ "sample_rates",
+ out_sample_rates.data(),
+ -1,
+ AV_OPT_SEARCH_CHILDREN));
+
+#pragma warning (pop)
+
+ configure_filtergraph(
+ *audio_graph_,
+ filtergraph_,
+ *filt_asrc,
+ *filt_asink);
+
+ audio_graph_in_ = filt_asrc;
+ audio_graph_out_ = filt_asink;
+
+ CASPAR_LOG(info)
+ << u16(std::string("\n")
+ + avfilter_graph_dump(
+ audio_graph_.get(),
+ nullptr));
+ }
+
+ void configure_filtergraph(
+ AVFilterGraph& graph,
+ const std::string& filtergraph,
+ AVFilterContext& source_ctx,
+ AVFilterContext& sink_ctx)
+ {
+ AVFilterInOut* outputs = nullptr;
+ AVFilterInOut* inputs = nullptr;
+
+ try
+ {
+ if(!filtergraph.empty())
+ {
+ outputs = avfilter_inout_alloc();
+ inputs = avfilter_inout_alloc();
+
+ CASPAR_VERIFY(outputs && inputs);
+
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = &source_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = nullptr;
+
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = &sink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = nullptr;
+
+ FF(avfilter_graph_parse(
+ &graph,
+ filtergraph.c_str(),
+ inputs,
+ outputs,
+ nullptr));
+ }
+ else
+ {
+ FF(avfilter_link(
+ &source_ctx,
+ 0,
+ &sink_ctx,
+ 0));
+ }
+
+ FF(avfilter_graph_config(
+ &graph,
+ nullptr));
+ }
+ catch(...)
+ {
+ //avfilter_inout_free(&outputs);
+ //avfilter_inout_free(&inputs);
+ throw;
+ }
+ }
+
+ void push(const std::shared_ptr<AVFrame>& src_av_frame)
+ {
+ FF(av_buffersrc_add_frame(
+ audio_graph_in_,
+ src_av_frame.get()));
+ }
+
+ std::shared_ptr<AVFrame> poll()
+ {
+ std::shared_ptr<AVFrame> filt_frame(
+ av_frame_alloc(),
+ [](AVFrame* p)
+ {
+ av_frame_free(&p);
+ });
+
+ const auto ret = av_buffersink_get_frame(
+ audio_graph_out_,
+ filt_frame.get());
+
+ if(ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
+ return nullptr;
+
+ FF_RET(ret, "poll");
+
+ return filt_frame;
+ }
+};
+
+audio_filter::audio_filter(
+ boost::rational<int> in_time_base,
+ int in_sample_rate,
+ AVSampleFormat in_sample_fmt,
+ std::int64_t in_audio_channel_layout,
+ std::vector<int> out_sample_rates,
+ std::vector<AVSampleFormat> out_sample_fmts,
+ std::vector<std::int64_t> out_audio_channel_layouts,
+ const std::string& filtergraph)
+ : impl_(new implementation(
+ in_time_base,
+ in_sample_rate,
+ in_sample_fmt,
+ in_audio_channel_layout,
+ std::move(out_sample_rates),
+ std::move(out_sample_fmts),
+ std::move(out_audio_channel_layouts),
+ filtergraph)){}
+audio_filter::audio_filter(audio_filter&& other) : impl_(std::move(other.impl_)){}
+audio_filter& audio_filter::operator=(audio_filter&& other){impl_ = std::move(other.impl_); return *this;}
+void audio_filter::push(const std::shared_ptr<AVFrame>& frame){impl_->push(frame);}
+std::shared_ptr<AVFrame> audio_filter::poll(){return impl_->poll();}
+std::wstring audio_filter::filter_str() const{return u16(impl_->filtergraph_);}
+std::vector<spl::shared_ptr<AVFrame>> audio_filter::poll_all()
+{
+ std::vector<spl::shared_ptr<AVFrame>> frames;
+ for(auto frame = poll(); frame; frame = poll())
+ frames.push_back(spl::make_shared_ptr(frame));
+ return frames;
+}
+
+}}
--- /dev/null
+/*
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Robert Nagy, ronag89@gmail.com
+*/
+
+#pragma once
+
+#include <common/memory.h>
+
+#include <boost/rational.hpp>
+#include <boost/noncopyable.hpp>
+
+#include <string>
+#include <vector>
+
+#if defined(_MSC_VER)
+#pragma warning (push)
+#pragma warning (disable : 4244)
+#endif
+extern "C"
+{
+#include <libavutil/samplefmt.h>
+}
+#if defined(_MSC_VER)
+#pragma warning (pop)
+#endif
+
+struct AVFrame;
+
+namespace caspar { namespace ffmpeg {
+
+class audio_filter : boost::noncopyable
+{
+public:
+ audio_filter(
+ boost::rational<int> in_time_base,
+ int in_sample_rate,
+ AVSampleFormat in_sample_fmt,
+ std::int64_t in_audio_channel_layout,
+ std::vector<int> out_sample_rates,
+ std::vector<AVSampleFormat> out_sample_fmts,
+ std::vector<std::int64_t> out_audio_channel_layouts,
+ const std::string& filtergraph);
+ audio_filter(audio_filter&& other);
+ audio_filter& operator=(audio_filter&& other);
+
+ void push(const std::shared_ptr<AVFrame>& frame);
+ std::shared_ptr<AVFrame> poll();
+ std::vector<spl::shared_ptr<AVFrame>> poll_all();
+
+ std::wstring filter_str() const;
+private:
+ struct implementation;
+ spl::shared_ptr<implementation> impl_;
+};
+
+}}
#include <libavutil/opt.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/avcodec.h>
- #include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
}
: index_(index)
{
}
+
+ bool is_available() const
+ {
+ return index_ >= 0;
+ }
void push(const std::shared_ptr<AVPacket>& packet)
{
size_type size() const
{
- return index_ > -1 ? packets_.size() : std::numeric_limits<size_type>::max();
+ return is_available() ? packets_.size() : std::numeric_limits<size_type>::max();
}
};
if(start_ != 0)
seek_target_ = start_;
- graph_->set_color("seek", diagnostics::color(1.0f, 0.5f, 0.0f));
- graph_->set_color("audio-buffer", diagnostics::color(0.7f, 0.4f, 0.4f));
- graph_->set_color("video-buffer", diagnostics::color(1.0f, 1.0f, 0.0f));
+ graph_->set_color("seek", diagnostics::color(1.0f, 0.5f, 0.0f));
+
+ if (audio_stream_.is_available())
+ graph_->set_color("audio-buffer", diagnostics::color(0.7f, 0.4f, 0.4f));
+
+ if (video_stream_.is_available())
+ graph_->set_color("video-buffer", diagnostics::color(1.0f, 1.0f, 0.0f));
for(int n = 0; n < 8; ++n)
tick();
}
bool try_pop_video(std::shared_ptr<AVPacket>& packet)
- {
+ {
+ if (!video_stream_.is_available())
+ return false;
+
bool result = video_stream_.try_pop(packet);
if(result)
cond_.notify_one();
}
bool try_pop_audio(std::shared_ptr<AVPacket>& packet)
- {
+ {
+ if (!audio_stream_.is_available())
+ return false;
+
bool result = audio_stream_.try_pop(packet);
if(result)
cond_.notify_one();
if(is_eof(ret))
{
- video_stream_.push(packet);
- audio_stream_.push(packet);
-
- if(loop_)
- internal_seek(start_);
+ if (loop_)
+ internal_seek(start_);
+ else
+ {
+ audio_stream_.push(packet);
+ video_stream_.push(packet);
+ }
}
else
{
audio_stream_.push(packet);
}
}
-
- graph_->set_value("video-buffer", std::min(1.0, static_cast<double>(video_stream_.size()/MIN_FRAMES)));
- graph_->set_value("audio-buffer", std::min(1.0, static_cast<double>(audio_stream_.size()/MIN_FRAMES)));
+
+ if (video_stream_.is_available())
+ graph_->set_value("video-buffer", std::min(1.0, static_cast<double>(video_stream_.size()/MIN_FRAMES)));
+
+ if (audio_stream_.is_available())
+ graph_->set_value("audio-buffer", std::min(1.0, static_cast<double>(audio_stream_.size()/MIN_FRAMES)));
}
bool full() const
{
- return video_stream_.size() > MIN_FRAMES && audio_stream_.size() > MIN_FRAMES;
+ return video_stream_.size() >= MIN_FRAMES && audio_stream_.size() >= MIN_FRAMES;
}
void run()
{
try
{
- boost::this_thread::sleep_for(boost::chrono::milliseconds(1));
{
boost::unique_lock<boost::mutex> lock(mutex_);
#include <core/frame/pixel_format.h>
#include <core/frame/frame_factory.h>
#include <core/frame/frame.h>
+#include <core/frame/audio_channel_layout.h>
#include <common/env.h>
#include <common/except.h>
struct frame_muxer::impl : boost::noncopyable
{
std::queue<core::mutable_frame> video_stream_;
- core::audio_buffer audio_stream_;
+ core::mutable_audio_buffer audio_stream_;
std::queue<draw_frame> frame_buffer_;
display_mode display_mode_ = display_mode::invalid;
const double in_fps_;
const video_format_desc format_desc_;
+ audio_channel_layout channel_layout_;
std::vector<int> audio_cadence_ = format_desc_.audio_cadence;
const std::wstring filter_str_;
bool force_deinterlacing_ = env::properties().get(L"configuration.force-deinterlace", true);
- impl(double in_fps, const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, const std::wstring& filter_str)
+ impl(
+ double in_fps,
+ const spl::shared_ptr<core::frame_factory>& frame_factory,
+ const core::video_format_desc& format_desc,
+ const core::audio_channel_layout& channel_layout,
+ const std::wstring& filter_str)
: in_fps_(in_fps)
, format_desc_(format_desc)
+ , channel_layout_(channel_layout)
, frame_factory_(frame_factory)
, filter_str_(filter_str)
{
if(!video->data[0])
{
- auto empty_frame = frame_factory_->create_frame(this, core::pixel_format_desc(core::pixel_format::invalid));
+ auto empty_frame = frame_factory_->create_frame(this, core::pixel_format_desc(core::pixel_format::invalid), channel_layout_);
video_stream_.push(std::move(empty_frame));
display_mode_ = display_mode::simple;
}
filter_->push(video);
previous_frame_ = video;
- for (auto& av_frame : filter_->poll_all())
- video_stream_.push(make_frame(this, av_frame, format_desc_.fps, *frame_factory_));
+ for (auto& av_frame : filter_->poll_all())
+ video_stream_.push(make_frame(this, av_frame, format_desc_.fps, *frame_factory_, channel_layout_));
}
merge();
}
-
+
void push_audio(const std::shared_ptr<AVFrame>& audio)
{
if(!audio)
if(!audio->data[0])
{
- boost::range::push_back(audio_stream_, core::audio_buffer(audio_cadence_.front() * format_desc_.audio_channels, 0));
+ if (channel_layout_ == core::audio_channel_layout::invalid())
+ channel_layout_ = *core::audio_channel_layout_repository::get_default()->get_layout(L"stereo");
+
+ boost::range::push_back(audio_stream_, core::mutable_audio_buffer(audio_cadence_.front() * channel_layout_.num_channels, 0));
}
else
{
}
bool video_ready() const
- {
+ {
switch(display_mode_)
{
case display_mode::deinterlace_bob_reinterlace:
switch(display_mode_)
{
case display_mode::duplicate:
- return audio_stream_.size() >= static_cast<size_t>(audio_cadence_[0] + audio_cadence_[1 % audio_cadence_.size()]) * format_desc_.audio_channels;
+ return audio_stream_.size() >= static_cast<size_t>(audio_cadence_[0] + audio_cadence_[1 % audio_cadence_.size()]) * channel_layout_.num_channels;
default:
- return audio_stream_.size() >= static_cast<size_t>(audio_cadence_.front()) * format_desc_.audio_channels;
+ return audio_stream_.size() >= static_cast<size_t>(audio_cadence_.front()) * channel_layout_.num_channels;
}
}
std::vector<array<std::uint8_t>>(),
pop_audio(),
frame1.data_tag(),
- core::pixel_format_desc());
+ core::pixel_format_desc(),
+ channel_layout_);
auto first_frame = core::draw_frame(std::move(frame1));
auto muted_first_frame = core::draw_frame(first_frame);
muted_first_frame.transform().audio_transform.volume = 0;
return std::move(frame);
}
- core::audio_buffer pop_audio()
+ core::mutable_audio_buffer pop_audio()
{
- if (audio_stream_.size() < audio_cadence_.front() * format_desc_.audio_channels)
+ if (audio_stream_.size() < audio_cadence_.front() * channel_layout_.num_channels)
CASPAR_THROW_EXCEPTION(out_of_range());
auto begin = audio_stream_.begin();
- auto end = begin + audio_cadence_.front() * format_desc_.audio_channels;
+ auto end = begin + audio_cadence_.front() * channel_layout_.num_channels;
- core::audio_buffer samples(begin, end);
+ core::mutable_audio_buffer samples(begin, end);
audio_stream_.erase(begin, end);
boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);
}
};
-frame_muxer::frame_muxer(double in_fps, const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, const std::wstring& filter)
- : impl_(new impl(in_fps, frame_factory, format_desc, filter)){}
+frame_muxer::frame_muxer(
+ double in_fps,
+ const spl::shared_ptr<core::frame_factory>& frame_factory,
+ const core::video_format_desc& format_desc,
+ const core::audio_channel_layout& channel_layout,
+ const std::wstring& filter)
+ : impl_(new impl(in_fps, frame_factory, format_desc, channel_layout, filter)){}
void frame_muxer::push_video(const std::shared_ptr<AVFrame>& frame){impl_->push_video(frame);}
void frame_muxer::push_audio(const std::shared_ptr<AVFrame>& frame){impl_->push_audio(frame);}
bool frame_muxer::empty() const{return impl_->empty();}
class frame_muxer : boost::noncopyable
{
public:
- frame_muxer(double in_fps, const spl::shared_ptr<core::frame_factory>& frame_factory, const core::video_format_desc& format_desc, const std::wstring& filter = L"");
+ frame_muxer(
+ double in_fps,
+ const spl::shared_ptr<core::frame_factory>& frame_factory,
+ const core::video_format_desc& format_desc,
+ const core::audio_channel_layout& channel_layout,
+ const std::wstring& filter);
void push_video(const std::shared_ptr<AVFrame>& frame);
void push_audio(const std::shared_ptr<AVFrame>& frame);
#include <core/frame/frame_transform.h>
#include <core/frame/frame_factory.h>
#include <core/frame/frame.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/producer/frame_producer.h>
#include <common/except.h>
}
}
-core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>& decoded_frame, double fps, core::frame_factory& frame_factory)
+core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>& decoded_frame, double fps, core::frame_factory& frame_factory, const core::audio_channel_layout& channel_layout)
{
static tbb::concurrent_unordered_map<int64_t, tbb::concurrent_queue<std::shared_ptr<SwsContext>>> sws_contvalid_exts_;
if(decoded_frame->width < 1 || decoded_frame->height < 1)
- return frame_factory.create_frame(tag, core::pixel_format_desc(core::pixel_format::invalid));
+ return frame_factory.create_frame(tag, core::pixel_format_desc(core::pixel_format::invalid), core::audio_channel_layout::invalid());
const auto width = decoded_frame->width;
const auto height = decoded_frame->height;
auto target_desc = pixel_format_desc(target_pix_fmt, width, height);
- auto write = frame_factory.create_frame(tag, target_desc);
+ auto write = frame_factory.create_frame(tag, target_desc, channel_layout);
std::shared_ptr<SwsContext> sws_context;
}
else
{
- auto write = frame_factory.create_frame(tag, desc);
+ auto write = frame_factory.create_frame(tag, desc, channel_layout);
for(int n = 0; n < static_cast<int>(desc.planes.size()); ++n)
{
}
return L"";
}
+
+core::audio_channel_layout get_audio_channel_layout(const AVCodecContext& codec_context, const std::wstring& channel_layout_spec)
+{
+ auto num_channels = codec_context.channels;
+
+ if (!channel_layout_spec.empty())
+ {
+ if (boost::contains(channel_layout_spec, L":")) // Custom on the fly layout specified.
+ {
+ std::vector<std::wstring> type_and_channel_order;
+ boost::split(type_and_channel_order, channel_layout_spec, boost::is_any_of(L":"), boost::algorithm::token_compress_off);
+ auto& type = type_and_channel_order.at(0);
+ auto& order = type_and_channel_order.at(1);
+
+ return core::audio_channel_layout(num_channels, std::move(type), order);
+ }
+ else // Preconfigured named channel layout selected.
+ {
+ auto layout = core::audio_channel_layout_repository::get_default()->get_layout(channel_layout_spec);
+
+ if (!layout)
+ CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"No channel layout with name " + channel_layout_spec + L" registered"));
+
+ layout->num_channels = num_channels;
+
+ return *layout;
+ }
+ }
+
+ if (!codec_context.channel_layout)
+ {
+ if (num_channels == 1)
+ return core::audio_channel_layout(num_channels, L"mono", L"FC");
+ else if (num_channels == 2)
+ return core::audio_channel_layout(num_channels, L"stereo", L"FL FR");
+ else
+ return core::audio_channel_layout(num_channels, L"", L""); // Passthru without named channels as is.
+ }
+
+ // What FFMpeg calls "channel layout" is only the "layout type" of a channel layout in
+ // CasparCG where the channel layout supports different orders as well.
+ // The user needs to provide additional mix-configs in casparcg.config to support more
+ // than the most common (5.1, mono and stereo) types.
+
+ // Based on information in https://ffmpeg.org/ffmpeg-utils.html#Channel-Layout
+ switch (codec_context.channel_layout)
+ {
+ case AV_CH_LAYOUT_MONO:
+ return core::audio_channel_layout(num_channels, L"mono", L"FC");
+ case AV_CH_LAYOUT_STEREO:
+ return core::audio_channel_layout(num_channels, L"stereo", L"FL FR");
+ case AV_CH_LAYOUT_2POINT1:
+ return core::audio_channel_layout(num_channels, L"2.1", L"FL FR LFE");
+ case AV_CH_LAYOUT_SURROUND:
+ return core::audio_channel_layout(num_channels, L"3.0", L"FL FR FC");
+ case AV_CH_LAYOUT_2_1:
+ return core::audio_channel_layout(num_channels, L"3.0(back)", L"FL FR BC");
+ case AV_CH_LAYOUT_4POINT0:
+ return core::audio_channel_layout(num_channels, L"4.0", L"FL FR FC BC");
+ case AV_CH_LAYOUT_QUAD:
+ return core::audio_channel_layout(num_channels, L"quad", L"FL FR BL BR");
+ case AV_CH_LAYOUT_2_2:
+ return core::audio_channel_layout(num_channels, L"quad(side)", L"FL FR SL SR");
+ case AV_CH_LAYOUT_3POINT1:
+ return core::audio_channel_layout(num_channels, L"3.1", L"FL FR FC LFE");
+ case AV_CH_LAYOUT_5POINT0_BACK:
+ return core::audio_channel_layout(num_channels, L"5.0", L"FL FR FC BL BR");
+ case AV_CH_LAYOUT_5POINT0:
+ return core::audio_channel_layout(num_channels, L"5.0(side)", L"FL FR FC SL SR");
+ case AV_CH_LAYOUT_4POINT1:
+ return core::audio_channel_layout(num_channels, L"4.1", L"FL FR FC LFE BC");
+ case AV_CH_LAYOUT_5POINT1_BACK:
+ return core::audio_channel_layout(num_channels, L"5.1", L"FL FR FC LFE BL BR");
+ case AV_CH_LAYOUT_5POINT1:
+ return core::audio_channel_layout(num_channels, L"5.1(side)", L"FL FR FC LFE SL SR");
+ case AV_CH_LAYOUT_6POINT0:
+ return core::audio_channel_layout(num_channels, L"6.0", L"FL FR FC BC SL SR");
+ case AV_CH_LAYOUT_6POINT0_FRONT:
+ return core::audio_channel_layout(num_channels, L"6.0(front)", L"FL FR FLC FRC SL SR");
+ case AV_CH_LAYOUT_HEXAGONAL:
+ return core::audio_channel_layout(num_channels, L"hexagonal", L"FL FR FC BL BR BC");
+ case AV_CH_LAYOUT_6POINT1:
+ return core::audio_channel_layout(num_channels, L"6.1", L"FL FR FC LFE BC SL SR");
+ case AV_CH_LAYOUT_6POINT1_BACK:
+ return core::audio_channel_layout(num_channels, L"6.1(back)", L"FL FR FC LFE BL BR BC");
+ case AV_CH_LAYOUT_6POINT1_FRONT:
+ return core::audio_channel_layout(num_channels, L"6.1(front)", L"FL FR LFE FLC FRC SL SR");
+ case AV_CH_LAYOUT_7POINT0:
+ return core::audio_channel_layout(num_channels, L"7.0", L"FL FR FC BL BR SL SR");
+ case AV_CH_LAYOUT_7POINT0_FRONT:
+ return core::audio_channel_layout(num_channels, L"7.0(front)", L"FL FR FC FLC FRC SL SR");
+ case AV_CH_LAYOUT_7POINT1:
+ return core::audio_channel_layout(num_channels, L"7.1", L"FL FR FC LFE BL BR SL SR");
+ case AV_CH_LAYOUT_7POINT1_WIDE_BACK:
+ return core::audio_channel_layout(num_channels, L"7.1(wide)", L"FL FR FC LFE BL BR FLC FRC");
+ case AV_CH_LAYOUT_7POINT1_WIDE:
+ return core::audio_channel_layout(num_channels, L"7.1(wide-side)", L"FL FR FC LFE FLC FRC SL SR");
+ case AV_CH_LAYOUT_STEREO_DOWNMIX:
+ return core::audio_channel_layout(num_channels, L"downmix", L"DL DR");
+ default:
+ // Passthru
+ return core::audio_channel_layout(num_channels, L"", L"");
+ }
+}
+
+// av_get_default_channel_layout does not work for layouts not predefined in ffmpeg. This is needed to support > 8 channels.
+std::int64_t create_channel_layout_bitmask(int num_channels)
+{
+ if (num_channels > 63)
+ CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"FFMpeg cannot handle more than 63 audio channels"));
+
+ const auto ALL_63_CHANNELS = 0x7FFFFFFFFFFFFFFFULL;
+
+ auto to_shift = 63 - num_channels;
+ auto result = ALL_63_CHANNELS >> to_shift;
+
+ return static_cast<std::int64_t>(result);
+}
+
//
//void av_dup_frame(AVFrame* frame)
//{
// Utils
core::field_mode get_mode(const AVFrame& frame);
-core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>& decoded_frame, double fps, core::frame_factory& frame_factory);
+core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>& decoded_frame, double fps, core::frame_factory& frame_factory, const core::audio_channel_layout& channel_layout);
spl::shared_ptr<AVFrame> make_av_frame(core::mutable_frame& frame);
spl::shared_ptr<AVFrame> make_av_frame(core::const_frame& frame);
spl::shared_ptr<AVFrame> make_av_frame(std::array<uint8_t*, 4> data, const core::pixel_format_desc& pix_desc);
bool is_valid_file(const std::wstring& filename);
bool try_get_duration(const std::wstring filename, std::int64_t& duration, boost::rational<std::int64_t>& time_base);
+core::audio_channel_layout get_audio_channel_layout(const AVCodecContext& codec_context, const std::wstring& channel_layout_spec);
+
+// av_get_default_channel_layout does not work for layouts not predefined in ffmpeg. This is needed to support > 8 channels.
+std::int64_t create_channel_layout_bitmask(int num_channels);
+
}}
current_packet_.reset();
}
- return frame ? frame : poll();
+ return frame;
}
std::shared_ptr<AVFrame> decode(AVPacket& pkt)
#include <core/frame/draw_frame.h>
#include <core/frame/frame_factory.h>
#include <core/frame/pixel_format.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/producer/frame_producer.h>
#include <core/monitor/monitor.h>
#include <core/help/help_repository.h>
{
core::pixel_format_desc desc = core::pixel_format::bgra;
desc.planes.push_back(core::pixel_format_desc::plane(width_, height_, 4));
- auto frame = frame_factory_->create_frame(this, desc);
+ auto frame = frame_factory_->create_frame(this, desc, core::audio_channel_layout::invalid());
A_memset(bmp_.data(), 0, width_ * height_ * 4);
ax_->DrawControl(bmp_);
#include <core/interaction/interaction_event.h>
#include <core/frame/frame.h>
#include <core/frame/pixel_format.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/frame/geometry.h>
#include <core/help/help_repository.h>
#include <core/help/help_sink.h>
pixel_desc.format = core::pixel_format::bgra;
pixel_desc.planes.push_back(
core::pixel_format_desc::plane(width, height, 4));
- auto frame = frame_factory_->create_frame(this, pixel_desc);
+ auto frame = frame_factory_->create_frame(this, pixel_desc, core::audio_channel_layout::invalid());
A_memcpy(frame.image_data().begin(), buffer, width * height * 4);
lock(frames_mutex_, [&]
{
}
- void initialize(const core::video_format_desc&, int) override
+ void initialize(const core::video_format_desc&, const core::audio_channel_layout&, int) override
{
}
#include <core/frame/draw_frame.h>
#include <core/frame/frame_factory.h>
#include <core/frame/pixel_format.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/monitor/monitor.h>
#include <core/help/help_sink.h>
#include <core/help/help_repository.h>
auto width = FreeImage_GetWidth(bitmap.get());
auto height = FreeImage_GetHeight(bitmap.get());
desc.planes.push_back(core::pixel_format_desc::plane(width, height, 4));
- auto frame = frame_factory->create_frame(bitmap.get(), desc);
+ auto frame = frame_factory->create_frame(bitmap.get(), desc, core::audio_channel_layout::invalid());
std::copy_n(
FreeImage_GetBits(bitmap.get()),
core::pixel_format_desc desc;
desc.format = core::pixel_format::bgra;
desc.planes.push_back(core::pixel_format_desc::plane(FreeImage_GetWidth(bitmap.get()), FreeImage_GetHeight(bitmap.get()), 4));
- auto frame = frame_factory_->create_frame(this, desc);
+ auto frame = frame_factory_->create_frame(this, desc, core::audio_channel_layout::invalid());
std::copy_n(FreeImage_GetBits(bitmap.get()), frame.image_data().size(), frame.image_data().begin());
frame_ = core::draw_frame(std::move(frame));
#include <core/frame/frame_factory.h>
#include <core/frame/frame_transform.h>
#include <core/frame/pixel_format.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/monitor/monitor.h>
#include <core/help/help_sink.h>
#include <core/help/help_repository.h>
{
core::pixel_format_desc desc = core::pixel_format::bgra;
desc.planes.push_back(core::pixel_format_desc::plane(width_, format_desc_.height, 4));
- auto frame = frame_factory->create_frame(this, desc);
+ auto frame = frame_factory->create_frame(this, desc, core::audio_channel_layout::invalid());
if(count >= frame.image_data(0).size())
{
{
core::pixel_format_desc desc = core::pixel_format::bgra;
desc.planes.push_back(core::pixel_format_desc::plane(format_desc_.width, height_, 4));
- auto frame = frame_factory->create_frame(this, desc);
+ auto frame = frame_factory->create_frame(this, desc, core::audio_channel_layout::invalid());
if(count >= frame.image_data(0).size())
{
for(int y = 0; y < height_; ++y)
#include <core/consumer/frame_consumer.h>
#include <core/video_format.h>
#include <core/frame/frame.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/mixer/audio/audio_util.h>
#include <core/monitor/monitor.h>
#include <core/help/help_sink.h>
core::monitor::subject monitor_subject_;
std::shared_ptr<void> air_send_;
core::video_format_desc format_desc_;
+ core::audio_channel_layout channel_layout_ = core::audio_channel_layout::invalid();
executor executor_;
tbb::atomic<bool> connected_;
spl::shared_ptr<diagnostics::graph> graph_;
virtual void initialize(
const core::video_format_desc& format_desc,
+ const core::audio_channel_layout& channel_layout,
int channel_index) override
{
air_send_.reset(
format_desc.field_mode == core::field_mode::progressive,
static_cast<float>(format_desc.square_width) / static_cast<float>(format_desc.square_height),
true,
- format_desc.audio_channels,
+ channel_layout.num_channels,
format_desc.audio_sample_rate),
airsend::destroy);
CASPAR_VERIFY(air_send_);
- format_desc_ = format_desc;
+ format_desc_ = format_desc;
+ channel_layout_ = channel_layout;
CASPAR_LOG(info) << print() << L" Successfully Initialized.";
}
auto audio_buffer = core::audio_32_to_16(frame.audio_data());
- airsend::add_audio(air_send_.get(), audio_buffer.data(), static_cast<int>(audio_buffer.size()) / format_desc_.audio_channels);
+ airsend::add_audio(air_send_.get(), audio_buffer.data(), static_cast<int>(audio_buffer.size()) / channel_layout_.num_channels);
// VIDEO
#include <common/utf.h>
#include <common/env.h>
#include <common/future.h>
+#include <common/param.h>
#include <core/consumer/frame_consumer.h>
#include <core/frame/frame.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/mixer/audio/audio_util.h>
#include <core/mixer/audio/audio_mixer.h>
#include <core/video_format.h>
struct oal_consumer : public core::frame_consumer
{
- core::monitor::subject monitor_subject_;
+ core::monitor::subject monitor_subject_;
- spl::shared_ptr<diagnostics::graph> graph_;
- boost::timer perf_timer_;
- tbb::atomic<int64_t> presentation_age_;
- int channel_index_ = -1;
+ spl::shared_ptr<diagnostics::graph> graph_;
+ boost::timer perf_timer_;
+ tbb::atomic<int64_t> presentation_age_;
+ int channel_index_ = -1;
- core::video_format_desc format_desc_;
+ core::video_format_desc format_desc_;
+ core::audio_channel_layout out_channel_layout_;
+ std::unique_ptr<core::audio_channel_remapper> channel_remapper_;
- ALuint source_ = 0;
- std::vector<ALuint> buffers_;
+ ALuint source_ = 0;
+ std::vector<ALuint> buffers_;
- executor executor_ { L"oal_consumer" };
+ executor executor_ { L"oal_consumer" };
public:
- oal_consumer()
+ oal_consumer(const core::audio_channel_layout& out_channel_layout)
+ : out_channel_layout_(out_channel_layout)
{
presentation_age_ = 0;
// frame consumer
- void initialize(const core::video_format_desc& format_desc, int channel_index) override
+ void initialize(const core::video_format_desc& format_desc, const core::audio_channel_layout& channel_layout, int channel_index) override
{
format_desc_ = format_desc;
channel_index_ = channel_index;
+ if (out_channel_layout_ == core::audio_channel_layout::invalid())
+ out_channel_layout_ = channel_layout.num_channels == 2 ? channel_layout : *core::audio_channel_layout_repository::get_default()->get_layout(L"stereo");
+
+ out_channel_layout_.num_channels = 2;
+
+ channel_remapper_.reset(new core::audio_channel_remapper(channel_layout, out_channel_layout_));
graph_->set_text(print());
executor_.begin_invoke([=]
for(std::size_t n = 0; n < buffers_.size(); ++n)
{
- audio_buffer_16 audio(format_desc_.audio_cadence[n % format_desc_.audio_cadence.size()]*format_desc_.audio_channels, 0);
+ audio_buffer_16 audio(format_desc_.audio_cadence[n % format_desc_.audio_cadence.size()]*2, 0);
alBufferData(buffers_[n], AL_FORMAT_STEREO16, audio.data(), static_cast<ALsizei>(audio.size()*sizeof(int16_t)), format_desc_.audio_sample_rate);
alSourceQueueBuffers(source_, 1, &buffers_[n]);
}
alSourceUnqueueBuffers(source_, 1, &buffer);
if(buffer)
{
- std::vector<int16_t> audio(format_desc_.audio_cadence[n % format_desc_.audio_cadence.size()] * format_desc_.audio_channels, 0);
+ std::vector<int16_t> audio(format_desc_.audio_cadence[n % format_desc_.audio_cadence.size()] * 2, 0);
alBufferData(buffer, AL_FORMAT_STEREO16, audio.data(), static_cast<ALsizei>(audio.size()*sizeof(int16_t)), format_desc_.audio_sample_rate);
alSourceQueueBuffers(source_, 1, &buffer);
}
graph_->set_tag("late-frame");
}
- auto audio = core::audio_32_to_16(frame.audio_data());
+ auto audio = core::audio_32_to_16(channel_remapper_->mix_and_rearrange(frame.audio_data()));
ALuint buffer = 0;
alSourceUnqueueBuffers(source_, 1, &buffer);
void describe_consumer(core::help_sink& sink, const core::help_repository& repo)
{
sink.short_description(L"A system audio consumer.");
- sink.syntax(L"AUDIO");
+ sink.syntax(L"AUDIO {CHANNEL_LAYOUT [channel_layout:string]}");
sink.para()->text(L"Uses the system's default audio playback device.");
sink.para()->text(L"Examples:");
sink.example(L">> ADD 1 AUDIO");
+ sink.example(L">> ADD 1 AUDIO CHANNEL_LAYOUT matrix", L"Uses the matrix channel layout");
}
spl::shared_ptr<core::frame_consumer> create_consumer(const std::vector<std::wstring>& params, core::interaction_sink*)
if(params.size() < 1 || !boost::iequals(params.at(0), L"AUDIO"))
return core::frame_consumer::empty();
- return spl::make_shared<oal_consumer>();
+ auto channel_layout = core::audio_channel_layout::invalid();
+ auto channel_layout_spec = get_param(L"CHANNEL_LAYOUT", params);
+
+ if (!channel_layout_spec.empty())
+ {
+ auto found_layout = core::audio_channel_layout_repository::get_default()->get_layout(channel_layout_spec);
+
+ if (!found_layout)
+ CASPAR_THROW_EXCEPTION(file_not_found() << msg_info(L"Channel layout " + channel_layout_spec + L" not found."));
+
+ channel_layout = *found_layout;
+ }
+
+ return spl::make_shared<oal_consumer>(channel_layout);
}
-spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(const boost::property_tree::wptree&, core::interaction_sink*)
+spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(const boost::property_tree::wptree& ptree, core::interaction_sink*)
{
- return spl::make_shared<oal_consumer>();
+ auto channel_layout = core::audio_channel_layout::invalid();
+ auto channel_layout_spec = ptree.get_optional<std::wstring>(L"channel-layout");
+
+ if (channel_layout_spec)
+ {
+ auto found_layout = core::audio_channel_layout_repository::get_default()->get_layout(*channel_layout_spec);
+
+ if (!found_layout)
+ CASPAR_THROW_EXCEPTION(file_not_found() << msg_info(L"Channel layout " + *channel_layout_spec + L" not found."));
+
+ channel_layout = *found_layout;
+ }
+
+ return spl::make_shared<oal_consumer>(channel_layout);
}
}}
#include <core/frame/pixel_format.h>
#include <core/frame/frame_factory.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/producer/frame_producer.h>
#include <core/producer/color/color_producer.h>
#include <core/producer/text/text_producer.h>
core::pixel_format_desc pfd(core::pixel_format::bgra);
pfd.planes.push_back(core::pixel_format_desc::plane((*it)->bitmap()->width(), (*it)->bitmap()->height(), 4));
- auto frame = dependencies.frame_factory->create_frame(it->get(), pfd);
+ auto frame = dependencies.frame_factory->create_frame(it->get(), pfd, core::audio_channel_layout::invalid());
auto destination = frame.image_data().data();
auto source = (*it)->bitmap()->data();
memcpy(destination, source, frame.image_data().size());
#include <core/frame/frame.h>
#include <core/frame/pixel_format.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/frame/draw_frame.h>
#include <core/frame/frame_factory.h>
#include <core/video_format.h>
core::monitor::subject monitor_subject_;
tbb::concurrent_bounded_queue<core::const_frame> frame_buffer_;
core::video_format_desc format_desc_;
+ core::audio_channel_layout channel_layout_ = core::audio_channel_layout::invalid();
int channel_index_;
int consumer_index_;
tbb::atomic<bool> is_running_;
void initialize(
const core::video_format_desc& format_desc,
+ const core::audio_channel_layout& channel_layout,
int channel_index) override
{
format_desc_ = format_desc;
+ channel_layout_ = channel_layout;
channel_index_ = channel_index;
}
return format_desc_;
}
+ const core::audio_channel_layout& get_audio_channel_layout()
+ {
+ return channel_layout_;
+ }
+
void block_until_first_frame_available()
{
if (first_frame_available_.wait_for(std::chrono::seconds(2)) == std::future_status::timeout)
core::pixel_format_desc desc;
desc.format = core::pixel_format::bgra;
desc.planes.push_back(core::pixel_format_desc::plane(format_desc.width, format_desc.height, 4));
- auto frame = frame_factory_->create_frame(this, desc);
+ auto frame = frame_factory_->create_frame(this, desc, consumer_->get_audio_channel_layout());
bool copy_audio = !double_speed && !half_speed;
// frame_consumer
- void initialize(const core::video_format_desc& format_desc, int channel_index) override
+ void initialize(const core::video_format_desc& format_desc, const core::audio_channel_layout&, int channel_index) override
{
consumer_.reset();
consumer_.reset(new screen_consumer(config_, format_desc, channel_index, sink_));
#include <core/help/util.h>
#include <core/video_format.h>
#include <core/producer/transition/transition_producer.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/frame/frame_transform.h>
#include <core/producer/stage.h>
#include <core/producer/layer.h>
sink.syntax(L"SET [video_channel:int] [variable:string] [value:string]");
sink.para()->text(L"Changes the value of a channel variable. Available variables to set:");
sink.definitions()
- ->item(L"MODE", L"Changes the video format of the channel.");
+ ->item(L"MODE", L"Changes the video format of the channel.")
+ ->item(L"CHANNEL_LAYOUT", L"Changes the audio channel layout of the video channel channel.");
sink.para()->text(L"Examples:");
- sink.example(L">> SET 1 MODE PAL", L"changes the video mode on channel 1 to PAL");
+ sink.example(L">> SET 1 MODE PAL", L"changes the video mode on channel 1 to PAL.");
+ sink.example(L">> SET 1 CHANNEL_LAYOUT smpte", L"changes the audio channel layout on channel 1 to smpte.");
}
std::wstring set_command(command_context& ctx)
CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"Invalid video mode"));
}
+ else if (name == L"CHANNEL_LAYOUT")
+ {
+ auto channel_layout = core::audio_channel_layout_repository::get_default()->get_layout(value);
+
+ if (channel_layout)
+ {
+ ctx.channel.channel->audio_channel_layout(*channel_layout);
+ return L"202 SET CHANNEL_LAYOUT OK\r\n";
+ }
+
+ CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"Invalid audio channel layout"));
+ }
CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"Invalid channel variable"));
}
stdafx.cpp
)
set(HEADERS
+ default_audio_config.h
included_modules.h
platform_specific.h
server.h
<channels>\r
<channel>\r
<video-mode>PAL</video-mode>\r
+ <channel-layout>stereo</channel-layout>\r
<consumers>\r
<screen>\r
<device>1</device>\r
</predefined-client>\r
</predefined-clients>\r
</osc>\r
+<audio>\r
+ <channel-layouts>\r
+ <channel-layout name="mono" type="mono" num-channels="1" channel-order="FC" />\r
+ <channel-layout name="stereo" type="stereo" num-channels="2" channel-order="FL FR" />\r
+ <channel-layout name="matrix" type="matrix" num-channels="2" channel-order="ML MR" />\r
+ <channel-layout name="film" type="5.1" num-channels="6" channel-order="FL FC FR BL BR LFE" />\r
+ <channel-layout name="smpte" type="5.1" num-channels="6" channel-order="FL FR FC LFE BL BR" />\r
+ <channel-layout name="ebu_r123_8a" type="5.1+downmix" num-channels="8" channel-order="DL DR FL FR FC LFE BL BR" />\r
+ <channel-layout name="ebu_r123_8b" type="5.1+downmix" num-channels="8" channel-order="FL FR FC LFE BL BR DL DR" />\r
+ <channel-layout name="8ch" type="8ch" num-channels="8" />\r
+ <channel-layout name="16ch" type="16ch" num-channels="16" />\r
+ </channel-layouts>\r
+ <mix-configs>\r
+ <mix-config from-type="mono" to-types="stereo, 5.1" mix="FL = FC | FR = FC" />\r
+ <mix-config from-type="mono" to-types="5.1+downmix" mix="FL = FC | FR = FC | DL = FC | DR = FC" />\r
+ <mix-config from-type="mono" to-types="matrix" mix="ML = FC | MR = FC" />\r
+ <mix-config from-type="stereo" to-types="mono" mix="FC < FL + FR" />\r
+ <mix-config from-type="stereo" to-types="matrix" mix="ML = FL | MR = FR" />\r
+ <mix-config from-type="stereo" to-types="5.1" mix="FL = FL | FR = FR" />\r
+ <mix-config from-type="stereo" to-types="5.1+downmix" mix="FL = FL | FR = FR | DL = FL | DR = FR" />\r
+ <mix-config from-type="5.1" to-types="mono" mix="FC < FL + FR + 0.707*FC + 0.707*BL + 0.707*BR" />\r
+ <mix-config from-type="5.1" to-types="stereo" mix="FL < FL + 0.707*FC + 0.707*BL | FR < FR + 0.707*FC + 0.707*BR" />\r
+ <mix-config from-type="5.1" to-types="5.1+downmix" mix="FL = FL | FR = FR | FC = FC | BL = BL | BR = BR | LFE = LFE | DL < FL + 0.707*FC + 0.707*BL | DR < FR + 0.707*FC + 0.707*BR" />\r
+ <mix-config from-type="5.1" to-types="matrix" mix="ML = 0.3204*FL + 0.293*FC + -0.293*BL + -0.293*BR | MR = 0.3204*FR + 0.293*FC + 0.293*BL + 0.293*BR" />\r
+ <mix-config from-type="5.1+stereomix" to-types="mono" mix="FC < DL + DR" />\r
+ <mix-config from-type="5.1+stereomix" to-types="stereo" mix="FL = DL | FR = DR" />\r
+ <mix-config from-type="5.1+stereomix" to-types="5.1" mix="FL = FL | FR = FR | FC = FC | BL = BL | BR = BR | LFE = LFE" />\r
+ <mix-config from-type="5.1+stereomix" to-types="matrix" mix="ML = 0.3204*FL + 0.293*FC + -0.293*BL + -0.293*BR | MR = 0.3204*FR + 0.293*FC + 0.293*BL + 0.293*BR" />\r
+ </mix-configs>\r
+</audio>\r
-->\r
--- /dev/null
+/*
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Helge Norberg, helge.norberg@svt.se
+*/
+
+#pragma once
+
+#include <string>
+
+#include <boost/property_tree/ptree.hpp>
+#include <boost/property_tree/xml_parser.hpp>
+
+namespace caspar {
+
+std::wstring get_default_audio_config_xml()
+{
+ return LR"(
+ <audio>
+ <channel-layouts>
+ <channel-layout name="mono" type="mono" num-channels="1" channel-order="FC" />
+ <channel-layout name="stereo" type="stereo" num-channels="2" channel-order="FL FR" />
+ <channel-layout name="matrix" type="matrix" num-channels="2" channel-order="ML MR" />
+ <channel-layout name="film" type="5.1" num-channels="6" channel-order="FL FC FR BL BR LFE" />
+ <channel-layout name="smpte" type="5.1" num-channels="6" channel-order="FL FR FC LFE BL BR" />
+ <channel-layout name="ebu_r123_8a" type="5.1+downmix" num-channels="8" channel-order="DL DR FL FR FC LFE BL BR" />
+ <channel-layout name="ebu_r123_8b" type="5.1+downmix" num-channels="8" channel-order="FL FR FC LFE BL BR DL DR" />
+ <channel-layout name="8ch" type="8ch" num-channels="8" />
+ <channel-layout name="16ch" type="16ch" num-channels="16" />
+ </channel-layouts>
+ <mix-configs>
+ <mix-config from-type="mono" to-types="stereo, 5.1" mix="FL = FC | FR = FC" />
+ <mix-config from-type="mono" to-types="5.1+downmix" mix="FL = FC | FR = FC | DL = FC | DR = FC" />
+ <mix-config from-type="mono" to-types="matrix" mix="ML = FC | MR = FC" />
+ <mix-config from-type="stereo" to-types="mono" mix="FC < FL + FR" />
+ <mix-config from-type="stereo" to-types="matrix" mix="ML = FL | MR = FR" />
+ <mix-config from-type="stereo" to-types="5.1" mix="FL = FL | FR = FR" />
+ <mix-config from-type="stereo" to-types="5.1+downmix" mix="FL = FL | FR = FR | DL = FL | DR = FR" />
+ <mix-config from-type="5.1" to-types="mono" mix="FC < FL + FR + 0.707*FC + 0.707*BL + 0.707*BR" />
+ <mix-config from-type="5.1" to-types="stereo" mix="FL < FL + 0.707*FC + 0.707*BL | FR < FR + 0.707*FC + 0.707*BR" />
+ <mix-config from-type="5.1" to-types="5.1+downmix" mix="FL = FL | FR = FR | FC = FC | BL = BL | BR = BR | LFE = LFE | DL < FL + 0.707*FC + 0.707*BL | DR < FR + 0.707*FC + 0.707*BR" />
+ <mix-config from-type="5.1" to-types="matrix" mix="ML = 0.3204*FL + 0.293*FC + -0.293*BL + -0.293*BR | MR = 0.3204*FR + 0.293*FC + 0.293*BL + 0.293*BR" />
+ <mix-config from-type="5.1+stereomix" to-types="mono" mix="FC < DL + DR" />
+ <mix-config from-type="5.1+stereomix" to-types="stereo" mix="FL = DL | FR = DR" />
+ <mix-config from-type="5.1+stereomix" to-types="5.1" mix="FL = FL | FR = FR | FC = FC | BL = BL | BR = BR | LFE = LFE" />
+ <mix-config from-type="5.1+stereomix" to-types="matrix" mix="ML = 0.3204*FL + 0.293*FC + -0.293*BL + -0.293*BR | MR = 0.3204*FR + 0.293*FC + 0.293*BL + 0.293*BR" />
+ </mix-configs>
+ </audio>
+ )";
+}
+
+boost::property_tree::wptree get_default_audio_config()
+{
+ std::wstringstream stream(get_default_audio_config_xml());
+ boost::property_tree::wptree result;
+ boost::property_tree::xml_parser::read_xml(stream, result, boost::property_tree::xml_parser::trim_whitespace | boost::property_tree::xml_parser::no_comments);
+
+ return result;
+}
+
+}
#include "server.h"
#include "included_modules.h"
+#include "default_audio_config.h"
#include <accelerator/accelerator.h>
#include <core/video_channel.h>
#include <core/video_format.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/producer/stage.h>
#include <core/producer/frame_producer.h>
#include <core/producer/scene/scene_producer.h>
{
running_ = true;
+ setup_audio_config(env::properties());
+ CASPAR_LOG(info) << L"Initialized audio config.";
+
setup_channels(env::properties());
CASPAR_LOG(info) << L"Initialized channels.";
uninitialize_modules();
core::diagnostics::osd::shutdown();
}
-
+
+ void setup_audio_config(const boost::property_tree::wptree& pt)
+ {
+ using boost::property_tree::wptree;
+
+ auto default_config = get_default_audio_config();
+
+ // Start with the defaults
+ audio_channel_layout_repository::get_default()->register_all_layouts(default_config.get_child(L"audio.channel-layouts"));
+ audio_mix_config_repository::get_default()->register_all_configs(default_config.get_child(L"audio.mix-configs"));
+
+ // Merge with user configuration (adds to or overwrites the defaults)
+ auto custom_channel_layouts = pt.get_child_optional(L"configuration.audio.channel-layouts");
+ auto custom_mix_configs = pt.get_child_optional(L"configuration.audio.mix-configs");
+
+ if (custom_channel_layouts)
+ audio_channel_layout_repository::get_default()->register_all_layouts(*custom_channel_layouts);
+
+ if (custom_mix_configs)
+ audio_mix_config_repository::get_default()->register_all_configs(*custom_mix_configs);
+ }
+
void setup_channels(const boost::property_tree::wptree& pt)
{
using boost::property_tree::wptree;
for (auto& xml_channel : pt.get_child(L"configuration.channels"))
{
- auto format_desc = video_format_desc(xml_channel.second.get(L"video-mode", L"PAL"));
+ auto format_desc = video_format_desc(xml_channel.second.get(L"video-mode", L"PAL"));
if(format_desc.format == video_format::invalid)
CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Invalid video-mode."));
-
- auto channel = spl::make_shared<video_channel>(static_cast<int>(channels_.size()+1), format_desc, accelerator_.create_image_mixer());
+
+ auto channel_layout = core::audio_channel_layout_repository::get_default()->get_layout(xml_channel.second.get(L"channel-layout", L"stereo"));
+ if (!channel_layout)
+ CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Unknown channel-layout."));
+
+ auto channel = spl::make_shared<video_channel>(static_cast<int>(channels_.size()+1), format_desc, *channel_layout, accelerator_.create_image_mixer());
core::diagnostics::scoped_call_context save;
core::diagnostics::call_context::for_thread().video_channel = channel->index();
// Dummy diagnostics channel
if (env::properties().get(L"configuration.channel-grid", false))
{
- channels_.push_back(spl::make_shared<video_channel>(static_cast<int>(channels_.size() + 1), core::video_format_desc(core::video_format::x576p2500), accelerator_.create_image_mixer()));
+ channels_.push_back(spl::make_shared<video_channel>(
+ static_cast<int>(channels_.size() + 1),
+ core::video_format_desc(core::video_format::x576p2500),
+ *core::audio_channel_layout_repository::get_default()->get_layout(L"stereo"),
+ accelerator_.create_image_mixer()));
channels_.back()->monitor_output().attach_parent(monitor_subject_);
}
}
--- /dev/null
+cmake_minimum_required (VERSION 2.6)
+project (unit-test)
+
+set(SOURCES
+ audio_channel_layout_test.cpp
+ base64_test.cpp
+ image_mixer_test.cpp
+ main.cpp
+ param_test.cpp
+ stdafx.cpp
+ tweener_test.cpp
+)
+set(HEADERS
+ stdafx.h
+)
+
+add_executable(unit-test ${SOURCES} ${HEADERS})
+add_precompiled_header(unit-test stdafx.h FORCEINCLUDE)
+
+target_link_libraries(unit-test
+ accelerator
+ common
+ core
+ ffmpeg
+
+ optimized gtest.lib
+ debug gtestd.lib
+)
+
+include_directories(..)
+include_directories(${BOOST_INCLUDE_PATH})
+include_directories(${RXCPP_INCLUDE_PATH})
+include_directories(${TBB_INCLUDE_PATH})
+include_directories(${GTEST_INCLUDE_PATH})
+
+source_group(sources ./*)
+
+set(OUTPUT_FOLDER "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}")
+
+foreach(FILE_TO_COPY ${CASPARCG_RUNTIME_DEPENDENCIES})
+ if(IS_DIRECTORY ${FILE_TO_COPY})
+ get_filename_component(FOLDER_NAME "${FILE_TO_COPY}" NAME)
+ add_custom_command(TARGET unit-test POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_directory \"${FILE_TO_COPY}\" \"${OUTPUT_FOLDER}/${FOLDER_NAME}\")
+ add_custom_command(TARGET unit-test POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_directory \"${FILE_TO_COPY}\" \"${CMAKE_CURRENT_BINARY_DIR}/${FOLDER_NAME}\")
+ else()
+ add_custom_command(TARGET unit-test POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy \"${FILE_TO_COPY}\" \"${OUTPUT_FOLDER}/\")
+ add_custom_command(TARGET unit-test POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy \"${FILE_TO_COPY}\" \"${CMAKE_CURRENT_BINARY_DIR}/\")
+ endif()
+endforeach(FILE_TO_COPY)
--- /dev/null
+/*
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Helge Norberg, helge.norberg@svt.se
+*/
+
+#include "stdafx.h"
+
+#include <gtest/gtest.h>
+
+#include <common/memory.h>
+
+#include <core/frame/audio_channel_layout.h>
+#include <core/frame/frame.h>
+
+#include <boost/range/algorithm/equal.hpp>
+
+namespace {
+
+::caspar::core::audio_buffer get_buffer(::caspar::core::mutable_audio_buffer buffer)
+{
+ ::caspar::spl::shared_ptr<::caspar::core::mutable_audio_buffer> buf(new ::caspar::core::mutable_audio_buffer(std::move(buffer)));
+ return ::caspar::core::audio_buffer(buf->data(), buf->size(), true, std::move(buf));
+}
+
+}
+
+namespace caspar {
+
+bool operator==(const ::caspar::core::audio_buffer& lhs, const ::caspar::core::audio_buffer& rhs)
+{
+ return boost::range::equal(lhs, rhs);
+}
+
+namespace core {
+
+TEST(AudioChannelLayoutTest, PassThrough)
+{
+ audio_channel_layout input_layout(2, L"stereo", L"L R");
+ audio_channel_layout output_layout(2, L"stereo", L"L R" );
+ audio_channel_remapper remapper(input_layout, output_layout);
+
+ auto result = remapper.mix_and_rearrange(get_buffer({ 1, 2 }));
+
+ EXPECT_EQ(get_buffer({ 1, 2 }), result);
+}
+
+TEST(AudioChannelLayoutTest, ReverseLeftAndRight)
+{
+ audio_channel_layout input_layout(2,L"stereo", L"L R");
+ audio_channel_layout output_layout(2, L"stereo", L"R L");
+ audio_channel_remapper remapper(input_layout, output_layout);
+
+ auto result = remapper.mix_and_rearrange(get_buffer({ 1, 2, 3, 4 }));
+
+ EXPECT_EQ(get_buffer({ 2, 1, 4, 3 }), result);
+}
+
+TEST(AudioChannelLayoutTest, StereoToMono)
+{
+ spl::shared_ptr<audio_mix_config_repository> mix_repo;
+ mix_repo->register_config(L"stereo", { L"mono" }, L"C < L + R");
+ audio_channel_layout input_layout(2, L"stereo", L"L R");
+ audio_channel_layout output_layout(1, L"mono", L"C");
+ audio_channel_remapper remapper(input_layout, output_layout, mix_repo);
+
+ auto result = remapper.mix_and_rearrange(get_buffer({ 10, 30, 50, 30 }));
+
+ EXPECT_EQ(get_buffer({ 20, 40 }), result);
+}
+
+TEST(AudioChannelLayoutTest, StereoToPassthru)
+{
+ audio_channel_layout input_layout(2, L"stereo", L"L R");
+ audio_channel_layout output_layout(16, L"16ch", L"");
+ audio_channel_remapper remapper(input_layout, output_layout);
+
+ auto result = remapper.mix_and_rearrange(get_buffer({ 1, 2 }));
+
+ EXPECT_EQ(get_buffer({ 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }), result);
+}
+
+TEST(AudioChannelLayoutTest, 16ChTo8ChPassthruDropLast8)
+{
+ audio_channel_layout input_layout(16, L"16ch", L"");
+ audio_channel_layout output_layout(8, L"8ch", L"");
+ audio_channel_remapper remapper(input_layout, output_layout);
+
+ auto result = remapper.mix_and_rearrange(get_buffer(
+ {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, // Sample 1
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, // Sample 2
+ }));
+
+ EXPECT_EQ(get_buffer(
+ {
+ 1, 2, 3, 4, 5, 6, 7, 8, // Sample 1
+ 17, 18, 19, 20, 21, 22, 23, 24 // Sample 2
+ }), result);
+}
+
+TEST(AudioChannelLayoutTest, MixMultipliers)
+{
+ spl::shared_ptr<audio_mix_config_repository> mix_repo;
+ mix_repo->register_config(
+ L"5.1",
+ { L"stereo" },
+ L"FL < FL + 0.5 * FC + 0.5 * BL | FR < FR + 0.5 * FC + 0.5 * BR"); // = 1.0 / (1.0 + 0.5 + 0.5) = 0.5 scaled => 0.5 + 0.25 + 0.25
+ audio_channel_layout input_layout(6, L"5.1", L"FL FR FC LFE BL BR");
+ audio_channel_layout output_layout(2, L"stereo", L"FL FR");
+ audio_channel_remapper remapper(input_layout, output_layout, mix_repo);
+ // FL FR FC LFE BL BR
+ auto result = remapper.mix_and_rearrange(get_buffer({ 200, 50, 100, 1000, 40, 80 }));
+
+ EXPECT_EQ(get_buffer({
+ 100 + 25 + 10,
+ 25 + 25 + 20
+ }), result);
+}
+
+}}
#include <gtest/gtest.h>
#include <set>
+#include <locale>
#include <boost/assign.hpp>
#include <gtest/gtest.h>
#include <core/mixer/image/image_mixer.h>
+#include <core/frame/audio_channel_layout.h>
#include <core/frame/pixel_format.h>
#include <core/frame/frame_transform.h>
#include <core/frame/draw_frame.h>
{
return spl::make_shared<accelerator::ogl::image_mixer>(
ogl_device(),
- false); // blend modes not wanted
+ false, // blend modes not wanted
+ false); // straight alpha not wanted
}
struct dummy_ogl_with_blend_modes {};
{
return spl::make_shared<accelerator::ogl::image_mixer>(
ogl_device(),
- true); // blend modes wanted
+ true, // blend modes wanted
+ false); // straight alpha not wanted
}
template <>
{
core::pixel_format_desc desc(core::pixel_format::bgra);
desc.planes.push_back(core::pixel_format_desc::plane(width, height, 4));
- return mixer->create_frame(this, desc);
+ return mixer->create_frame(this, desc, core::audio_channel_layout::invalid());
}
core::mutable_frame create_single_color_frame(
void add_layer(core::draw_frame frame, core::blend_mode blend_mode = core::blend_mode::normal)
{
- mixer->begin_layer(core::blend_mode::normal);
-
+ frame.transform().image_transform.layer_depth = 1;
frame.accept(*mixer);
-
- mixer->end_layer();
}
array<const uint8_t> get_result(int width, int height)
desc.height = height;
desc.size = width * height * 4;
- return (*mixer)(desc).get();
+ return (*mixer)(desc, false).get();
}
};
frame.transform().image_transform.fill_scale[1] = 0.5;
add_layer(frame);
auto res = get_result(2, 2);
- std::vector<const uint8_t> result(res.begin(), res.end());
+ std::vector<std::uint8_t> result(res.begin(), res.end());
// bottom right corner
ASSERT_EQ(boost::assign::list_of<uint8_t>
frame.transform().image_transform.fill_translation[0] = 0;
add_layer(frame);
res = get_result(2, 2);
- result = std::vector<const uint8_t>(res.begin(), res.end());
+ result = std::vector<std::uint8_t>(res.begin(), res.end());
// bottom left corner
ASSERT_EQ(boost::assign::list_of<uint8_t>
#include <gtest/gtest.h>
#include <common/log.h>
+#include <common/memory.h>
+
+#include <core/system_info_provider.h>
+#include <core/producer/cg_proxy.h>
+#include <core/producer/media_info/in_memory_media_info_repository.h>
+
+#include <modules/ffmpeg/ffmpeg.h>
int main(int argc, char** argv)
{
+ using namespace caspar;
+
+ spl::shared_ptr<core::system_info_provider_repository> system_info_provider_repo;
+ spl::shared_ptr<core::cg_producer_registry> cg_registry;
+ auto media_info_repo = core::create_in_memory_media_info_repository();
+ spl::shared_ptr<core::help_repository> help_repo;
+ auto producer_registry = spl::make_shared<core::frame_producer_registry>(help_repo);
+ auto consumer_registry = spl::make_shared<core::frame_consumer_registry>(help_repo);
+
+ core::module_dependencies dependencies(system_info_provider_repo, cg_registry, media_info_repo, producer_registry, consumer_registry);
+ caspar::ffmpeg::init(dependencies);
testing::InitGoogleTest(&argc, argv);
- caspar::log::set_log_level(L"error");
+ caspar::log::set_log_level(L"trace");
return RUN_ALL_TESTS();
}
#include <common/tweener.h>
-namespace caspar { namespace core {
+namespace caspar {
class TweenerTest : public ::testing::TestWithParam<std::wstring>
{
static const double REQUIRED_CLOSENESS = 0.01;
auto name = GetParam();
- caspar::core::tweener t(name);
+ tweener t(name);
EXPECT_NEAR(
start_value,
TweenerTest,
::testing::ValuesIn(tweener::names()));
-}}
+}
+++ /dev/null
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <ItemGroup Label="ProjectConfigurations">
- <ProjectConfiguration Include="Debug|x64">
- <Configuration>Debug</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release|x64">
- <Configuration>Release</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- </ItemGroup>
- <ItemGroup>
- <ClCompile Include="base64_test.cpp" />
- <ClCompile Include="image_mixer_test.cpp" />
- <ClCompile Include="main.cpp" />
- <ClCompile Include="param_test.cpp" />
- <ClCompile Include="stdafx.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="tweener_test.cpp" />
- </ItemGroup>
- <ItemGroup>
- <ClInclude Include="stdafx.h" />
- </ItemGroup>
- <ItemGroup>
- <ProjectReference Include="..\accelerator\accelerator.vcxproj">
- <Project>{8493d01a-f642-454c-8c44-88b38e2a3ef8}</Project>
- </ProjectReference>
- <ProjectReference Include="..\common\common.vcxproj">
- <Project>{02308602-7fe0-4253-b96e-22134919f56a}</Project>
- </ProjectReference>
- <ProjectReference Include="..\core\core.vcxproj">
- <Project>{79388c20-6499-4bf6-b8b9-d8c33d7d4ddd}</Project>
- </ProjectReference>
- <ProjectReference Include="..\modules\bluefish\bluefish.vcxproj">
- <Project>{69313d25-9f54-4fc9-9872-628a4dd79464}</Project>
- </ProjectReference>
- <ProjectReference Include="..\modules\decklink\decklink.vcxproj">
- <Project>{d3611658-8f54-43cf-b9af-a5cf8c1102ea}</Project>
- </ProjectReference>
- <ProjectReference Include="..\modules\ffmpeg\ffmpeg.vcxproj">
- <Project>{f6223af3-be0b-4b61-8406-98922ce521c2}</Project>
- </ProjectReference>
- <ProjectReference Include="..\modules\flash\flash.vcxproj">
- <Project>{816deaba-3757-4306-afe0-c27cf96c4dea}</Project>
- </ProjectReference>
- <ProjectReference Include="..\modules\image\image.vcxproj">
- <Project>{3e11ff65-a9da-4f80-87f2-a7c6379ed5e2}</Project>
- </ProjectReference>
- <ProjectReference Include="..\modules\oal\oal.vcxproj">
- <Project>{82ed7ed6-8a15-40ec-a8af-f5e712e0da68}</Project>
- </ProjectReference>
- <ProjectReference Include="..\modules\psd\psd.vcxproj">
- <Project>{866a164b-6f7a-450e-8452-c6ae4e176436}</Project>
- </ProjectReference>
- <ProjectReference Include="..\modules\reroute\reroute.vcxproj">
- <Project>{7d58bd57-fdd5-46e6-a23b-ed14b5314a0e}</Project>
- </ProjectReference>
- <ProjectReference Include="..\modules\screen\screen.vcxproj">
- <Project>{88f974f0-d09f-4788-8cf8-f563209e60c1}</Project>
- </ProjectReference>
- <ProjectReference Include="..\protocol\protocol.vcxproj">
- <Project>{2040b361-1fb6-488e-84a5-38a580da90de}</Project>
- </ProjectReference>
- </ItemGroup>
- <PropertyGroup Label="Globals">
- <ProjectGuid>{3D4DA315-EEFD-4AAD-91B8-CBA879D7F440}</ProjectGuid>
- <Keyword>Win32Proj</Keyword>
- <RootNamespace>shell</RootNamespace>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseDebugLibraries>true</UseDebugLibraries>
- <CharacterSet>Unicode</CharacterSet>
- <PlatformToolset>v120</PlatformToolset>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseDebugLibraries>false</UseDebugLibraries>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- <CharacterSet>Unicode</CharacterSet>
- <PlatformToolset>v120</PlatformToolset>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
- <ImportGroup Label="ExtensionSettings">
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <PropertyGroup Label="UserMacros" />
- <PropertyGroup>
- <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)tmp\unit-test\$(Configuration)\</IntDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)tmp\unit-test\$(Configuration)\</IntDir>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..\dependencies64\RxCpp\include\;..\dependencies64\boost\;..\dependencies64\ffmpeg\include\;..\dependencies64\glew\include;..\dependencies64\sfml\include\;..\dependencies64\tbb\include\;..\;..\dependencies64\gtest\include;$(IncludePath)</IncludePath>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..\dependencies64\RxCpp\include\;..\dependencies64\boost\;..\dependencies64\ffmpeg\include\;..\dependencies64\glew\include;..\dependencies64\sfml\include\;..\dependencies64\tbb\include\;..\;..\dependencies64\gtest\include;$(IncludePath)</IncludePath>
- <LibraryPath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..\dependencies64\boost\stage\lib;..\dependencies64\ffmpeg\lib\;..\dependencies64\glew\lib;..\dependencies64\sfml\extlibs\lib;..\dependencies64\sfml\lib\;..\dependencies64\tbb\lib\;..\dependencies64\freeimage\lib\;..\dependencies64\openal\lib\;..\dependencies64\asmlib\;..\dependencies64\gtest\lib;..\dependencies64\freetype\objs\win32\vc2010;..\dependencies64\zlib\lib\;$(LibraryPath)</LibraryPath>
- <LibraryPath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..\dependencies64\boost\stage\lib;..\dependencies64\ffmpeg\lib\;..\dependencies64\glew\lib;..\dependencies64\sfml\extlibs\lib;..\dependencies64\sfml\lib\;..\dependencies64\tbb\lib\;..\dependencies64\freeimage\lib\;..\dependencies64\openal\lib\;..\dependencies64\asmlib\;..\dependencies64\gtest\lib;..\dependencies64\freetype\objs\win32\vc2010;..\dependencies64\zlib\lib\;$(LibraryPath)</LibraryPath>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)bin\unit-test\$(Configuration)\</OutDir>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)bin\unit-test\$(Configuration)\</OutDir>
- <TargetName Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(ProjectName)</TargetName>
- <TargetName Condition="'$(Configuration)|$(Platform)'=='Release|x64'">unit-test</TargetName>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- <LinkIncremental>true</LinkIncremental>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- <LinkIncremental>false</LinkIncremental>
- </PropertyGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- <PreBuildEvent>
- <Command>
- </Command>
- </PreBuildEvent>
- <ClCompile>
- <Optimization>Disabled</Optimization>
- <AdditionalIncludeDirectories>../</AdditionalIncludeDirectories>
- <MinimalRebuild>false</MinimalRebuild>
- <ExceptionHandling>Async</ExceptionHandling>
- <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
- <SmallerTypeCheck>false</SmallerTypeCheck>
- <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
- <EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
- <RuntimeTypeInfo>true</RuntimeTypeInfo>
- <PrecompiledHeader>NotUsing</PrecompiledHeader>
- <BrowseInformation>true</BrowseInformation>
- <WarningLevel>Level4</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <PreprocessorDefinitions>SFML_STATIC;BOOST_THREAD_VERSION=4;TBB_USE_CAPTURED_EXCEPTION=0;TBB_USE_ASSERT=1;TBB_USE_DEBUG;_DEBUG;_CRT_SECURE_NO_WARNINGS;COMPILE_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <TreatWarningAsError>true</TreatWarningAsError>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- <FloatingPointModel>Fast</FloatingPointModel>
- <ForcedIncludeFiles>common/compiler/vs/disable_silly_warnings.h</ForcedIncludeFiles>
- </ClCompile>
- <Link>
- <AdditionalDependencies>alibcof64.lib;jpeg.lib;sndfile.lib;sfml-system-d-2.lib;sfml-window-d-2.lib;sfml-graphics-d-2.lib;sfml-audio-d-2.lib;Winmm.lib;Ws2_32.lib;user32.lib;gdi32.lib;avformat.lib;avcodec.lib;avutil.lib;avfilter.lib;swscale.lib;swresample.lib;tbb.lib;OpenGL32.lib;FreeImaged.lib;glew32.lib;openal32.lib;gtestd.lib;Advapi32.lib;freetype.lib;zlibstat.lib</AdditionalDependencies>
- <Version>
- </Version>
- <AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
- <IgnoreSpecificDefaultLibraries>
- </IgnoreSpecificDefaultLibraries>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <ProgramDatabaseFile>$(TargetDir)$(TargetName).pdb</ProgramDatabaseFile>
- <GenerateMapFile>false</GenerateMapFile>
- <MapFileName>
- </MapFileName>
- <SubSystem>Console</SubSystem>
- <RandomizedBaseAddress>false</RandomizedBaseAddress>
- <DataExecutionPrevention>
- </DataExecutionPrevention>
- <MapExports>false</MapExports>
- <IgnoreAllDefaultLibraries>
- </IgnoreAllDefaultLibraries>
- </Link>
- <PostBuildEvent>
- <Command>copy "$(SolutionDir)dependencies64\ffmpeg\bin\*.dll" "$(OutDir)"
-copy "$(SolutionDir)dependencies64\tbb\bin\*.dll" "$(OutDir)"
-copy "$(SolutionDir)dependencies64\glew\bin\*.dll" "$(OutDir)"
-copy "$(SolutionDir)dependencies64\freeimage\bin\*.dll" "$(OutDir)"
-copy "$(SolutionDir)dependencies64\openal\bin\*.dll" "$(OutDir)"
-</Command>
- </PostBuildEvent>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- <PreBuildEvent>
- <Command>
- </Command>
- </PreBuildEvent>
- <ClCompile>
- <Optimization>MaxSpeed</Optimization>
- <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
- <AdditionalIncludeDirectories>../;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <ExceptionHandling>Async</ExceptionHandling>
- <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
- <EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
- <RuntimeTypeInfo>true</RuntimeTypeInfo>
- <PrecompiledHeader>NotUsing</PrecompiledHeader>
- <WarningLevel>Level4</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- <PreprocessorDefinitions>SFML_STATIC;BOOST_THREAD_VERSION=4;TBB_USE_CAPTURED_EXCEPTION=0;NDEBUG;COMPILE_RELEASE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <WholeProgramOptimization>false</WholeProgramOptimization>
- <TreatWarningAsError>true</TreatWarningAsError>
- <OmitFramePointers>true</OmitFramePointers>
- <FloatingPointModel>Fast</FloatingPointModel>
- <ForcedIncludeFiles>common/compiler/vs/disable_silly_warnings.h</ForcedIncludeFiles>
- </ClCompile>
- <Link>
- <OptimizeReferences>true</OptimizeReferences>
- <AdditionalDependencies>alibcof64.lib;jpeg.lib;sndfile.lib;sfml-system-2.lib;sfml-window-2.lib;sfml-graphics-2.lib;Winmm.lib;Ws2_32.lib;user32.lib;gdi32.lib;avformat.lib;avcodec.lib;avutil.lib;avfilter.lib;swscale.lib;swresample.lib;tbb.lib;OpenGL32.lib;glew32.lib;openal32.lib;freeimage.lib;gtest.lib;Advapi32.lib;freetype.lib;zlibstat.lib</AdditionalDependencies>
- <AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <ProgramDatabaseFile>$(TargetDir)$(TargetName).pdb</ProgramDatabaseFile>
- <GenerateMapFile>false</GenerateMapFile>
- <SubSystem>Console</SubSystem>
- <RandomizedBaseAddress>false</RandomizedBaseAddress>
- <MapExports>false</MapExports>
- <LinkTimeCodeGeneration>Default</LinkTimeCodeGeneration>
- </Link>
- <PostBuildEvent>
- <Command>copy "$(SolutionDir)dependencies64\ffmpeg\bin\*.dll" "$(OutDir)"
-copy "$(SolutionDir)dependencies64\tbb\bin\*.dll" "$(OutDir)"
-copy "$(SolutionDir)dependencies64\glew\bin\*.dll" "$(OutDir)"
-copy "$(SolutionDir)dependencies64\freeimage\bin\*.dll" "$(OutDir)"
-copy "$(SolutionDir)dependencies64\openal\bin\*.dll" "$(OutDir)"</Command>
- </PostBuildEvent>
- </ItemDefinitionGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
- <ImportGroup Label="ExtensionTargets">
- </ImportGroup>
-</Project>
\ No newline at end of file
+++ /dev/null
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <ItemGroup>
- <Filter Include="source">
- <UniqueIdentifier>{bb2d04f1-2743-4d80-87a4-928970bb233a}</UniqueIdentifier>
- </Filter>
- </ItemGroup>
- <ItemGroup>
- <ClCompile Include="main.cpp">
- <Filter>source</Filter>
- </ClCompile>
- <ClCompile Include="tweener_test.cpp">
- <Filter>source</Filter>
- </ClCompile>
- <ClCompile Include="stdafx.cpp" />
- <ClCompile Include="base64_test.cpp">
- <Filter>source</Filter>
- </ClCompile>
- <ClCompile Include="param_test.cpp">
- <Filter>source</Filter>
- </ClCompile>
- <ClCompile Include="image_mixer_test.cpp">
- <Filter>source</Filter>
- </ClCompile>
- </ItemGroup>
- <ItemGroup>
- <ClInclude Include="stdafx.h" />
- </ItemGroup>
-</Project>
\ No newline at end of file