#include <string>\r
#include <vector>\r
\r
-static const size_t CONSUMER_BUFFER_DEPTH = 7;\r
+static const size_t CONSUMER_BUFFER_DEPTH = 6;\r
\r
namespace caspar { namespace core {\r
\r
{\r
virtual ~frame_consumer() {}\r
\r
- virtual void send(const safe_ptr<const read_frame>& frame) = 0;\r
+ virtual void send(const safe_ptr<read_frame>& frame) = 0;\r
virtual size_t buffer_depth() const {return 1;}\r
virtual bool key_only() const{ return false;}\r
virtual void initialize(const video_format_desc& format_desc) = 0;\r
struct empty_frame_consumer : public frame_consumer\r
{\r
core::video_format_desc format_desc;\r
- virtual void send(const safe_ptr<const read_frame>&){}\r
+ virtual void send(const safe_ptr<read_frame>&){}\r
virtual size_t buffer_depth() const{return 0;}\r
virtual void initialize(const video_format_desc&){}\r
virtual std::wstring print() const {return L"empty";}\r
#include <common/utility/timer.h>\r
#include <common/memory/memshfl.h>\r
\r
+#include <tbb/mutex.h>\r
+\r
namespace caspar { namespace core {\r
+\r
+class key_read_frame_adapter : public core::read_frame\r
+{\r
+ ogl_device& ogl_;\r
+ safe_ptr<read_frame> fill_;\r
+ std::shared_ptr<host_buffer> key_;\r
+ tbb::mutex mutex_;\r
+public:\r
+ key_read_frame_adapter(ogl_device& ogl, const safe_ptr<read_frame>& fill)\r
+ : ogl_(ogl)\r
+ , fill_(fill)\r
+ {\r
+ }\r
+\r
+ virtual const boost::iterator_range<const uint8_t*> image_data()\r
+ {\r
+ tbb::mutex::scoped_lock lock(mutex_);\r
+ if(!key_)\r
+ {\r
+ key_ = ogl_.create_host_buffer(fill_->image_data().size(), host_buffer::write_only); \r
+ fast_memsfhl(key_->data(), fill_->image_data().begin(), fill_->image_data().size(), 0x0F0F0F0F, 0x0B0B0B0B, 0x07070707, 0x03030303);\r
+ }\r
+\r
+ auto ptr = static_cast<const uint8_t*>(key_->data());\r
+ return boost::iterator_range<const uint8_t*>(ptr, ptr + key_->size());\r
+ }\r
+\r
+ virtual const boost::iterator_range<const int16_t*> audio_data()\r
+ {\r
+ return fill_->audio_data();\r
+ } \r
+};\r
\r
struct output::implementation\r
{ \r
- typedef std::pair<safe_ptr<const read_frame>, safe_ptr<const read_frame>> fill_and_key;\r
+ typedef std::pair<safe_ptr<read_frame>, safe_ptr<read_frame>> fill_and_key;\r
\r
video_channel_context& channel_;\r
\r
timer_.tick(1.0/channel_.get_format_desc().fps);\r
\r
auto fill = frame;\r
- auto key = get_key_frame(frame);\r
+ auto key = make_safe<key_read_frame_adapter>(channel_.ogl(), frame);\r
\r
auto it = consumers_.begin();\r
while(it != consumers_.end())\r
return p.second->has_synchronization_clock();\r
});\r
}\r
-\r
- safe_ptr<const read_frame> get_key_frame(const safe_ptr<const read_frame>& frame)\r
- {\r
- const bool has_key_only = std::any_of(consumers_.begin(), consumers_.end(), [](const decltype(*consumers_.begin())& p)\r
- {\r
- return p.second->key_only();\r
- });\r
-\r
- if(has_key_only)\r
- {\r
- // Currently do key_only transform on cpu. Unsure if the extra 400MB/s (1080p50) overhead is worth it to do it on gpu.\r
- auto key_data = channel_.ogl().create_host_buffer(frame->image_data().size(), host_buffer::write_only); \r
- fast_memsfhl(key_data->data(), frame->image_data().begin(), frame->image_data().size(), 0x0F0F0F0F, 0x0B0B0B0B, 0x07070707, 0x03030303);\r
- std::vector<int16_t> audio_data(frame->audio_data().begin(), frame->audio_data().end());\r
- return make_safe<read_frame>(std::move(key_data), std::move(audio_data));\r
- }\r
- \r
- return make_safe<read_frame>();\r
- }\r
\r
std::wstring print() const\r
{\r
\r
image_kernel kernel_;\r
\r
- safe_ptr<host_buffer> read_buffer_;\r
safe_ptr<device_buffer> draw_buffer_;\r
safe_ptr<device_buffer> write_buffer_;\r
\r
public:\r
implementation(video_channel_context& video_channel) \r
: channel_(video_channel)\r
- , read_buffer_(video_channel.ogl().create_host_buffer(video_channel.get_format_desc().size, host_buffer::read_only))\r
, draw_buffer_(video_channel.ogl().create_device_buffer(video_channel.get_format_desc().width, channel_.get_format_desc().height, 4))\r
, write_buffer_ (video_channel.ogl().create_device_buffer(video_channel.get_format_desc().width, channel_.get_format_desc().height, 4))\r
, local_key_buffer_(video_channel.ogl().create_device_buffer(video_channel.get_format_desc().width, channel_.get_format_desc().height, 1))\r
\r
void reinitialize_buffers()\r
{\r
- read_buffer_ = channel_.ogl().create_host_buffer(channel_.get_format_desc().size, host_buffer::read_only);\r
draw_buffer_ = channel_.ogl().create_device_buffer(channel_.get_format_desc().width, channel_.get_format_desc().height, 4);\r
write_buffer_ = channel_.ogl().create_device_buffer(channel_.get_format_desc().width, channel_.get_format_desc().height, 4);\r
local_key_buffer_ = channel_.ogl().create_device_buffer(channel_.get_format_desc().width, channel_.get_format_desc().height, 1);\r
\r
safe_ptr<host_buffer> render()\r
{ \r
- auto read_buffer = read_buffer_;\r
- auto result = channel_.ogl().begin_invoke([=]() -> safe_ptr<host_buffer>\r
- {\r
- read_buffer->map();\r
- return read_buffer;\r
- });\r
+ auto read_buffer = channel_.ogl().create_host_buffer(channel_.get_format_desc().size, host_buffer::read_only); \r
\r
auto render_queue = std::move(render_queue_);\r
\r
- channel_.ogl().begin_invoke([=]() mutable\r
+ channel_.ogl().invoke([=]() mutable\r
{\r
if(draw_buffer_->width() != channel_.get_format_desc().width || draw_buffer_->height() != channel_.get_format_desc().height)\r
reinitialize_buffers();\r
\r
std::swap(draw_buffer_, write_buffer_);\r
\r
- // Start transfer from device to host. \r
- read_buffer_ = channel_.ogl().create_host_buffer(channel_.get_format_desc().size, host_buffer::read_only); \r
- write_buffer_->write(*read_buffer_);\r
+ // Start transfer from device to host. \r
+ write_buffer_->write(*read_buffer);\r
});\r
\r
- return std::move(result.get());\r
+ return read_buffer;\r
}\r
\r
void draw(const std::vector<render_item>& stream)\r
auto image = mix_image(frames);\r
auto audio = mix_audio(frames);\r
\r
- return make_safe<read_frame>(std::move(image), std::move(audio));\r
+ return make_safe<read_frame>(channel_.ogl(), std::move(image), std::move(audio));\r
}\r
catch(...)\r
{\r
\r
struct read_frame::implementation : boost::noncopyable\r
{\r
- std::shared_ptr<host_buffer> image_data_;\r
- std::vector<int16_t> audio_data_;\r
+ ogl_device& ogl_;\r
+ safe_ptr<host_buffer> image_data_;\r
+ std::vector<int16_t> audio_data_;\r
\r
public:\r
- implementation(safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data) \r
- : image_data_(std::move(image_data))\r
+ implementation(ogl_device& ogl, safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data) \r
+ : ogl_(ogl)\r
+ , image_data_(std::move(image_data))\r
, audio_data_(std::move(audio_data)){} \r
\r
const boost::iterator_range<const uint8_t*> image_data()\r
{\r
- if(!image_data_)\r
- return boost::iterator_range<const uint8_t*>();\r
+ if(!image_data_->data())\r
+ {\r
+ ogl_.invoke([=]\r
+ {\r
+ image_data_->map();\r
+ }, high_priority);\r
+ }\r
\r
auto ptr = static_cast<const uint8_t*>(image_data_->data());\r
return boost::iterator_range<const uint8_t*>(ptr, ptr + image_data_->size());\r
}\r
- const boost::iterator_range<const int16_t*> audio_data() const\r
+ const boost::iterator_range<const int16_t*> audio_data()\r
{\r
return boost::iterator_range<const int16_t*>(audio_data_.data(), audio_data_.data() + audio_data_.size());\r
}\r
};\r
\r
-read_frame::read_frame(safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data) \r
- : impl_(new implementation(std::move(image_data), std::move(audio_data))){}\r
+read_frame::read_frame(ogl_device& ogl, safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data) \r
+ : impl_(new implementation(ogl, std::move(image_data), std::move(audio_data))){}\r
read_frame::read_frame(){}\r
-const boost::iterator_range<const uint8_t*> read_frame::image_data() const\r
+const boost::iterator_range<const uint8_t*> read_frame::image_data()\r
{\r
return impl_ ? impl_->image_data() : boost::iterator_range<const uint8_t*>();\r
}\r
\r
-const boost::iterator_range<const int16_t*> read_frame::audio_data() const\r
+const boost::iterator_range<const int16_t*> read_frame::audio_data()\r
{\r
return impl_ ? impl_->audio_data() : boost::iterator_range<const int16_t*>();\r
}\r
{\r
public:\r
read_frame();\r
- read_frame(safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data);\r
+ read_frame(ogl_device& ogl, safe_ptr<host_buffer>&& image_data, std::vector<int16_t>&& audio_data);\r
\r
- virtual const boost::iterator_range<const uint8_t*> image_data() const;\r
- virtual const boost::iterator_range<const int16_t*> audio_data() const;\r
+ virtual const boost::iterator_range<const uint8_t*> image_data();\r
+ virtual const boost::iterator_range<const int16_t*> audio_data();\r
\r
private:\r
struct implementation;\r
unsigned int vid_fmt_;\r
\r
std::array<blue_dma_buffer_ptr, 4> reserved_frames_; \r
- tbb::concurrent_bounded_queue<std::shared_ptr<const core::read_frame>> frame_buffer_;\r
+ tbb::concurrent_bounded_queue<std::shared_ptr<core::read_frame>> frame_buffer_;\r
\r
int preroll_count_;\r
\r
CASPAR_LOG(error)<< print() << TEXT(" Failed to disable video output."); \r
}\r
\r
- void send(const safe_ptr<const core::read_frame>& frame)\r
+ void send(const safe_ptr<core::read_frame>& frame)\r
{ \r
if(preroll_count_ < executor_.capacity())\r
{\r
schedule_next_video(frame); \r
}\r
\r
- void schedule_next_video(const safe_ptr<const core::read_frame>& frame)\r
+ void schedule_next_video(const safe_ptr<core::read_frame>& frame)\r
{\r
static std::vector<int16_t> silence(MAX_HANC_BUFFER_SIZE, 0);\r
\r
consumer_.reset(new bluefish_consumer(format_desc, device_index_, embedded_audio_));\r
}\r
\r
- virtual void send(const safe_ptr<const core::read_frame>& frame)\r
+ virtual void send(const safe_ptr<core::read_frame>& frame)\r
{\r
consumer_->send(frame);\r
}\r
\r
class decklink_frame_adapter : public IDeckLinkVideoFrame\r
{\r
- const safe_ptr<const core::read_frame> frame_;\r
+ const safe_ptr<core::read_frame> frame_;\r
const core::video_format_desc format_desc_;\r
public:\r
- decklink_frame_adapter(const safe_ptr<const core::read_frame>& frame, const core::video_format_desc& format_desc)\r
+ decklink_frame_adapter(const safe_ptr<core::read_frame>& frame, const core::video_format_desc& format_desc)\r
: frame_(frame)\r
, format_desc_(format_desc){}\r
\r
std::list<std::shared_ptr<IDeckLinkVideoFrame>> frame_container_; // Must be std::list in order to guarantee that pointers are always valid.\r
boost::circular_buffer<std::vector<short>> audio_container_;\r
\r
- tbb::concurrent_bounded_queue<std::shared_ptr<const core::read_frame>> video_frame_buffer_;\r
- tbb::concurrent_bounded_queue<std::shared_ptr<const core::read_frame>> audio_frame_buffer_;\r
+ tbb::concurrent_bounded_queue<std::shared_ptr<core::read_frame>> video_frame_buffer_;\r
+ tbb::concurrent_bounded_queue<std::shared_ptr<core::read_frame>> audio_frame_buffer_;\r
\r
std::shared_ptr<diagnostics::graph> graph_;\r
boost::timer tick_timer_;\r
return frame.get() == completed_frame;\r
}));\r
\r
- std::shared_ptr<const core::read_frame> frame; \r
+ std::shared_ptr<core::read_frame> frame; \r
video_frame_buffer_.pop(frame); \r
schedule_next_video(make_safe(frame)); \r
}\r
}\r
else\r
{\r
- std::shared_ptr<const core::read_frame> frame;\r
+ std::shared_ptr<core::read_frame> frame;\r
audio_frame_buffer_.pop(frame);\r
schedule_next_audio(make_safe(frame)); \r
}\r
return S_OK;\r
}\r
\r
- void schedule_next_audio(const safe_ptr<const core::read_frame>& frame)\r
+ void schedule_next_audio(const safe_ptr<core::read_frame>& frame)\r
{\r
static std::vector<short> silence(48000, 0);\r
\r
CASPAR_LOG(error) << print() << L" Failed to schedule audio.";\r
}\r
\r
- void schedule_next_video(const safe_ptr<const core::read_frame>& frame)\r
+ void schedule_next_video(const safe_ptr<core::read_frame>& frame)\r
{\r
frame_container_.push_back(std::make_shared<decklink_frame_adapter>(frame, format_desc_));\r
if(FAILED(output_->ScheduleVideoFrame(frame_container_.back().get(), (frames_scheduled_++) * format_desc_.duration, format_desc_.duration, format_desc_.time_scale)))\r
tick_timer_.restart();\r
}\r
\r
- void send(const safe_ptr<const core::read_frame>& frame)\r
+ void send(const safe_ptr<core::read_frame>& frame)\r
{\r
if(exception_ != nullptr)\r
std::rethrow_exception(exception_);\r
context_.reset([&]{return new decklink_consumer(config_, format_desc);});\r
}\r
\r
- virtual void send(const safe_ptr<const core::read_frame>& frame)\r
+ virtual void send(const safe_ptr<core::read_frame>& frame)\r
{\r
context_->send(frame);\r
}\r
}\r
}\r
\r
- void encode_video_frame(const safe_ptr<const core::read_frame>& frame)\r
+ void encode_video_frame(const safe_ptr<core::read_frame>& frame)\r
{ \r
if(!video_st_)\r
return;\r
} \r
}\r
\r
- void encode_audio_frame(const safe_ptr<const core::read_frame>& frame)\r
+ void encode_audio_frame(const safe_ptr<core::read_frame>& frame)\r
{ \r
if(!audio_st_)\r
return;\r
return true;\r
}\r
\r
- void send(const safe_ptr<const core::read_frame>& frame)\r
+ void send(const safe_ptr<core::read_frame>& frame)\r
{\r
executor_.begin_invoke([=]\r
{ \r
consumer_.reset(new ffmpeg_consumer(narrow(filename_), format_desc, bitrate_));\r
}\r
\r
- virtual void send(const safe_ptr<const core::read_frame>& frame)\r
+ virtual void send(const safe_ptr<core::read_frame>& frame)\r
{\r
consumer_->send(frame);\r
}\r
graph_ = diagnostics::create_graph([this]{return print();});\r
graph_->set_color("output-buffer", diagnostics::color(0.0f, 1.0f, 0.0f));\r
\r
- frame_buffer_.set_capacity(2);\r
+ frame_buffer_.set_capacity(1);\r
\r
initialize(); \r
}\r
CASPAR_LOG(info) << print() << L" Shutting down."; \r
}\r
\r
- void send(const safe_ptr<const core::read_frame>& frame)\r
+ void send(const safe_ptr<core::read_frame>& frame)\r
{ \r
if(preroll_count_ < input_.capacity())\r
{\r
\r
oal_consumer::oal_consumer(){}\r
oal_consumer::oal_consumer(oal_consumer&& other) : impl_(std::move(other.impl_)){}\r
-void oal_consumer::send(const safe_ptr<const core::read_frame>& frame){impl_->send(frame);}\r
+void oal_consumer::send(const safe_ptr<core::read_frame>& frame){impl_->send(frame);}\r
size_t oal_consumer::buffer_depth() const{return impl_->buffer_depth();}\r
void oal_consumer::initialize(const core::video_format_desc& format_desc){impl_.reset(new implementation(format_desc));}\r
std::wstring oal_consumer::print() const { return impl_->print(); }\r
\r
// frame_consumer\r
virtual void initialize(const core::video_format_desc& format_desc); \r
- virtual void send(const safe_ptr<const core::read_frame>&);\r
+ virtual void send(const safe_ptr<core::read_frame>&);\r
virtual size_t buffer_depth() const;\r
virtual std::wstring print() const;\r
virtual const core::video_format_desc& get_video_format_desc() const;\r
size_t square_width_;\r
size_t square_height_;\r
\r
- boost::circular_buffer<safe_ptr<const core::read_frame>> frame_buffer_;\r
+ boost::circular_buffer<safe_ptr<core::read_frame>> frame_buffer_;\r
\r
executor executor_;\r
public:\r
return std::make_pair(width, height);\r
}\r
\r
- void render(const safe_ptr<const core::read_frame>& frame)\r
+ void render(const safe_ptr<core::read_frame>& frame)\r
{ \r
glBindTexture(GL_TEXTURE_2D, texture_);\r
\r
std::rotate(pbos_.begin(), pbos_.begin() + 1, pbos_.end());\r
}\r
\r
- void send(const safe_ptr<const core::read_frame>& frame)\r
+ void send(const safe_ptr<core::read_frame>& frame)\r
{\r
frame_buffer_.push_back(frame);\r
\r
do_send(frame_buffer_.front());\r
}\r
\r
- void do_send(const safe_ptr<const core::read_frame>& frame)\r
+ void do_send(const safe_ptr<core::read_frame>& frame)\r
{ \r
executor_.try_begin_invoke([=]\r
{\r
consumer_.reset(new ogl_consumer(screen_index_, stretch_, windowed_, format_desc));\r
}\r
\r
- virtual void send(const safe_ptr<const core::read_frame>& frame)\r
+ virtual void send(const safe_ptr<core::read_frame>& frame)\r
{\r
consumer_->send(frame);\r
}\r