priority_count\r
};\r
\r
+namespace internal\r
+{\r
+ template<typename T>\r
+ struct move_on_copy\r
+ {\r
+ move_on_copy(const move_on_copy<T>& other) : value(std::move(other.value)){}\r
+ move_on_copy(T&& value) : value(std::move(value)){}\r
+ mutable T value;\r
+ };\r
+\r
+ template<typename T>\r
+ move_on_copy<T> make_move_on_copy(T&& value)\r
+ {\r
+ return move_on_copy<T>(std::move(value));\r
+ }\r
+}\r
+\r
class executor : boost::noncopyable\r
{\r
const std::string name_;\r
\r
typedef tbb::concurrent_bounded_queue<std::function<void()>> function_queue;\r
function_queue execution_queue_[priority_count];\r
- \r
+ \r
+ template<typename Func>\r
+ auto create_task(Func&& func) -> boost::packaged_task<decltype(func())> // noexcept\r
+ { \r
+ typedef boost::packaged_task<decltype(func())> task_type;\r
+ \r
+ auto task = task_type(std::forward<Func>(func));\r
+ \r
+ task.set_wait_callback(std::function<void(task_type&)>([=](task_type& my_task) // The std::function wrapper is required in order to add ::result_type to functor class.\r
+ {\r
+ try\r
+ {\r
+ if(boost::this_thread::get_id() == thread_.get_id()) // Avoids potential deadlock.\r
+ my_task();\r
+ }\r
+ catch(boost::task_already_started&){}\r
+ }));\r
+ \r
+ return std::move(task);\r
+ }\r
+\r
public:\r
\r
explicit executor(const std::wstring& name) : name_(narrow(name)) // noexcept\r
virtual ~executor() // noexcept\r
{\r
stop();\r
- \r
- std::function<void()> func;\r
- while(execution_queue_[normal_priority].try_pop(func)){} // Wake all waiting push threads.\r
-\r
- if(boost::this_thread::get_id() != thread_.get_id())\r
- thread_.join();\r
+ join();\r
}\r
\r
void set_capacity(size_t capacity) // noexcept\r
{\r
invoke([]{});\r
}\r
+\r
+ void join()\r
+ {\r
+ if(boost::this_thread::get_id() != thread_.get_id())\r
+ thread_.join();\r
+ }\r
\r
template<typename Func>\r
auto begin_invoke(Func&& func, priority priority = normal_priority) -> boost::unique_future<decltype(func())> // noexcept\r
{ \r
- typedef boost::packaged_task<decltype(func())> task_type;\r
- \r
- auto task = task_type(std::forward<Func>(func));\r
- auto future = task.get_future();\r
- \r
- if(!is_running_)\r
- return std::move(future); \r
-\r
- task.set_wait_callback(std::function<void(task_type&)>([=](task_type& my_task) // The std::function wrapper is required in order to add ::result_type to functor class.\r
- {\r
- try\r
- {\r
- if(boost::this_thread::get_id() == thread_.get_id()) // Avoids potential deadlock.\r
- my_task();\r
- }\r
- catch(boost::task_already_started&){}\r
- }));\r
- \r
// Create a move on copy adaptor to avoid copying the functor into the queue, tbb::concurrent_queue does not support move semantics.\r
- struct task_adaptor_t\r
- {\r
- task_adaptor_t(const task_adaptor_t& other) : task(std::move(other.task)){}\r
- task_adaptor_t(task_type&& task) : task(std::move(task)){}\r
- void operator()() const { task(); }\r
- mutable task_type task;\r
- } task_adaptor(std::move(task));\r
+ auto task_adaptor = internal::make_move_on_copy(create_task(func));\r
+\r
+ auto future = task_adaptor.value.get_future();\r
\r
execution_queue_[priority].push([=]\r
{\r
- try{task_adaptor();}\r
+ try{task_adaptor.value();}\r
catch(boost::task_already_started&){}\r
catch(...){CASPAR_LOG_CURRENT_EXCEPTION();}\r
});\r
\r
return std::move(future); \r
}\r
- \r
+\r
+ template<typename Func>\r
+ auto try_begin_invoke(Func&& func, priority priority = normal_priority) -> boost::unique_future<decltype(func())> // noexcept\r
+ {\r
+ // Create a move on copy adaptor to avoid copying the functor into the queue, tbb::concurrent_queue does not support move semantics.\r
+ auto task_adaptor = internal::make_move_on_copy(create_task(func));\r
+ \r
+ auto future = task_adaptor.value.get_future();\r
+\r
+ if(priority == normal_priority || execution_queue_[normal_priority].try_push(nullptr))\r
+ { \r
+ execution_queue_[priority].try_push([=]\r
+ {\r
+ try{task_adaptor.value();}\r
+ catch(boost::task_already_started&){}\r
+ catch(...){CASPAR_LOG_CURRENT_EXCEPTION();}\r
+ });\r
+ }\r
+ \r
+ return std::move(future); \r
+ }\r
+\r
template<typename Func>\r
auto invoke(Func&& func, priority prioriy = normal_priority) -> decltype(func()) // noexcept\r
{\r
return begin_invoke(std::forward<Func>(func), prioriy).get();\r
}\r
\r
+ template<typename Func>\r
+ auto try_invoke(Func&& func, priority prioriy = normal_priority) -> decltype(func()) // noexcept\r
+ {\r
+ if(boost::this_thread::get_id() == thread_.get_id()) // Avoids potential deadlock.\r
+ return func();\r
+ \r
+ return try_begin_invoke(std::forward<Func>(func), prioriy).get();\r
+ }\r
+\r
void yield() // noexcept\r
{\r
if(boost::this_thread::get_id() != thread_.get_id()) // Only yield when calling from execution thread.\r
#include <common/concurrency/executor.h>\r
#include <common/diagnostics/graph.h>\r
#include <common/utility/assert.h>\r
+#include <common/utility/timer.h>\r
#include <common/memory/memshfl.h>\r
\r
#include <boost/range/algorithm_ext/erase.hpp>\r
\r
struct frame_consumer_device::implementation\r
{ \r
+ high_prec_timer timer_;\r
+\r
boost::circular_buffer<std::pair<safe_ptr<const read_frame>,safe_ptr<const read_frame>>> buffer_;\r
\r
std::map<int, std::shared_ptr<frame_consumer>> consumers_; // Valid iterators after erase\r
{ \r
executor_.begin_invoke([=]\r
{\r
+ if(!std::any_of(consumers_.begin(), consumers_.end(), [](const decltype(*consumers_.begin())& p){return p.second->has_synchronization_clock();}))\r
+ timer_.tick(1.0/format_desc_.fps);\r
+\r
diag_->set_value("input-buffer", static_cast<float>(executor_.size())/static_cast<float>(executor_.capacity()));\r
frame_timer_.restart();\r
\r
auto key_frame = read_frame::empty();\r
\r
- if(boost::range::find_if(consumers_, [](const decltype(*consumers_.begin())& p){return p.second->key_only();}) != consumers_.end())\r
+ if(std::any_of(consumers_.begin(), consumers_.end(), [](const decltype(*consumers_.begin())& p){return p.second->key_only();}))\r
{\r
// Currently do key_only transform on cpu. Unsure if the extra 400MB/s (1080p50) overhead is worth it to do it on gpu.\r
auto key_data = ogl_device::create_host_buffer(frame->image_data().size(), host_buffer::write_only); \r
\r
if(!buffer_.full())\r
return;\r
-\r
\r
auto it = consumers_.begin();\r
while(it != consumers_.end())\r