struct pixel_buffer_object::implementation : boost::noncopyable\r
{\r
implementation(size_t width, size_t height, GLenum format) \r
- : width_(width), height_(height), pbo_(0), format_(format),\r
+ : width_(width), height_(height), pbo_(0), format_(format), data_(nullptr),\r
texture_(0), writing_(false), reading_(false), mapped_(false)\r
{\r
switch(format)\r
{\r
if(pbo_ != 0)\r
glDeleteBuffers(1, &pbo_);\r
+ if(texture_ != 0)\r
+ glDeleteTextures(1, &texture_);\r
} \r
\r
void bind_pbo(GLenum mode)\r
void* end_write()\r
{\r
if(mapped_)\r
- BOOST_THROW_EXCEPTION(invalid_operation());\r
+ {\r
+ if(!writing_)\r
+ return data_;\r
+ else\r
+ BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("Buffer is already mapped."));\r
+ }\r
\r
bind_pbo(GL_PIXEL_UNPACK_BUFFER);\r
GL(glBufferData(GL_PIXEL_UNPACK_BUFFER, size_, NULL, GL_STREAM_DRAW));\r
- auto data = glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY);\r
+ data_= glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY);\r
unbind_pbo(GL_PIXEL_UNPACK_BUFFER); \r
- if(!data)\r
+ if(!data_)\r
BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("glMapBuffer failed"));\r
writing_ = false;\r
mapped_ = true;\r
- return data;\r
+ return data_;\r
}\r
\r
void begin_read()\r
void* end_read()\r
{\r
if(mapped_)\r
- BOOST_THROW_EXCEPTION(invalid_operation());\r
+ {\r
+ if(!reading_)\r
+ return data_;\r
+ else\r
+ BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("Buffer is already mapped."));\r
+ }\r
\r
bind_pbo(GL_PIXEL_PACK_BUFFER);\r
- auto data = glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY); \r
+ data_ = glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY); \r
unbind_pbo(GL_PIXEL_PACK_BUFFER);\r
- if(!data)\r
+ if(!data_)\r
BOOST_THROW_EXCEPTION(std::bad_alloc());\r
reading_ = false;\r
mapped_ = true;\r
- return data;\r
+ return data_;\r
}\r
\r
void is_smooth(bool smooth)\r
\r
GLint internal_;\r
GLenum format_;\r
+ void* data_;\r
};\r
\r
pixel_buffer_object::pixel_buffer_object(){}\r
\r
enum severity_level\r
{\r
- trace,\r
- debug,\r
- info,\r
- warning,\r
- error,\r
- fatal\r
+ trace,\r
+ debug,\r
+ info,\r
+ warning,\r
+ error,\r
+ fatal\r
};\r
\r
template< typename CharT, typename TraitsT >\r
inline std::basic_ostream< CharT, TraitsT >& operator<< (\r
- std::basic_ostream< CharT, TraitsT >& strm, severity_level lvl)\r
+ std::basic_ostream< CharT, TraitsT >& strm, severity_level lvl)\r
{\r
if(lvl == trace)\r
strm << "trace";\r
\r
BOOST_LOG_DECLARE_GLOBAL_LOGGER_INIT(logger, caspar_logger)\r
{\r
- internal::init();\r
- return caspar_logger(boost::log::keywords::severity = trace);\r
+ internal::init();\r
+ return caspar_logger(boost::log::keywords::severity = trace);\r
}\r
\r
#define CASPAR_LOG(lvl)\\r
- BOOST_LOG_STREAM_WITH_PARAMS(::caspar::log::get_logger(),\\r
- (::boost::log::keywords::severity = ::caspar::log::lvl))\r
+ BOOST_LOG_STREAM_WITH_PARAMS(::caspar::log::get_logger(),\\r
+ (::boost::log::keywords::severity = ::caspar::log::lvl))\r
\r
#define CASPAR_LOG_CURRENT_EXCEPTION() \\r
try\\r
: producer_device_(producer_device), processor_device_(processor_device), consumer_device_(consumer_device)\r
{\r
}\r
+\r
+ ~implementation()\r
+ {\r
+ producer_device_->clear();\r
+ }\r
\r
void load(int render_layer, const frame_producer_ptr& producer, load_option::type option = load_option::none)\r
{\r
\r
struct consumer::implementation : public sf::SoundStream, boost::noncopyable\r
{\r
- implementation() : container_(5)\r
+ implementation() : container_(5), underrun_count_(0)\r
{\r
input_.set_capacity(2);\r
sf::SoundStream::Initialize(2, 48000);\r
static std::vector<short> silence(1920*2, 0);\r
\r
std::shared_ptr<std::vector<short>> audio_data;\r
+ \r
if(!input_.try_pop(audio_data))\r
{\r
- CASPAR_LOG(trace) << "Sound Buffer Underrun";\r
+ if(underrun_count_ == 0)\r
+ CASPAR_LOG(warning) << "### Sound Input underflow has STARTED.";\r
+ ++underrun_count_;\r
input_.pop(audio_data);\r
}\r
-\r
+ else if(underrun_count_ > 0)\r
+ {\r
+ CASPAR_LOG(trace) << "### Sound Input Underrun has ENDED with " << underrun_count_ << " ticks.";\r
+ underrun_count_ = 0;\r
+ }\r
+ \r
if(audio_data->empty())\r
{ \r
data.Samples = silence.data();\r
return true;\r
}\r
\r
+ long underrun_count_;\r
boost::circular_buffer<std::vector<short>> container_;\r
tbb::concurrent_bounded_queue<std::shared_ptr<std::vector<short>>> input_;\r
};\r
<PreprocessorDefinitions>NDEBUG;_VC80_UPGRADE=0x0710;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<WholeProgramOptimization>true</WholeProgramOptimization>\r
<TreatWarningAsError>true</TreatWarningAsError>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
</ClCompile>\r
<PreLinkEvent>\r
<Command>\r
}\r
\r
pixel_format::type pix_fmt;\r
-\r
std::array<plane, 4> planes;\r
+\r
+ static size_t hash(const pixel_format_desc& desc)\r
+ {\r
+ size_t hash = 0;\r
+ switch(desc.pix_fmt)\r
+ {\r
+ case pixel_format::ycbcr:\r
+ case pixel_format::ycbcra:\r
+ // 0-10 (11) width\r
+ // 11-21 (11) height\r
+ // 22-24 (3) x-ratio\r
+ // 25-27 (3) y-ratio\r
+ // 28-29 (2) unused\r
+ // 30 (1) alpha\r
+ // 31 (1) yuv = true => 1\r
+ hash |= (desc.planes[0].width & 0x7FF) << 0;\r
+ hash |= (desc.planes[0].height & 0x7FF) << 11;\r
+ hash |= ((desc.planes[0].height/desc.planes[1].height) & 0x7) << 22;\r
+ hash |= ((desc.planes[0].width/desc.planes[1].width) & 0x7) << 25;\r
+ hash |= desc.pix_fmt == pixel_format::ycbcra ? (1 << 30) : 0;\r
+ hash |= 1 << 31;\r
+ return hash;\r
+ case pixel_format::bgra:\r
+ case pixel_format::rgba:\r
+ case pixel_format::argb:\r
+ case pixel_format::abgr:\r
+ \r
+ //0-10 (11) height\r
+ //11-21 (11) width\r
+ //22-29 (8) unused\r
+ //30 (1) alpha\r
+ //31 (1) yuv = false => 0\r
+ hash |= (desc.planes[0].height & 0xFFFF) << 0;\r
+ hash |= (desc.planes[0].width & 0xFFFF) << 15;\r
+ hash |= 1 << 30;\r
+ return hash;\r
+\r
+ default:\r
+ return hash;\r
+ };\r
+ }\r
};\r
\r
}}
\ No newline at end of file
struct frame_processor_device::implementation : boost::noncopyable\r
{ \r
implementation(frame_processor_device* self, const video_format_desc& format_desc) \r
- : fmt_(format_desc), underrun_count_(0)\r
+ : fmt_(format_desc), output_underrun_count_(0), input_underrun_count_(0)\r
{ \r
- boost::promise<frame_ptr> promise;\r
- active_frame_ = promise.get_future();\r
- promise.set_value(nullptr);\r
-\r
input_.set_capacity(2);\r
output_.set_capacity(2);\r
executor_.start();\r
\r
~implementation()\r
{\r
+ stop();\r
+ }\r
+\r
+ void stop()\r
+ {\r
+ output_.clear();\r
+ input_.clear();\r
+ input_.push(nullptr);\r
+ output_.push(nullptr);\r
executor_.stop();\r
}\r
- \r
- frame_ptr create_frame(const pixel_format_desc& desc, void* tag)\r
+\r
+ frame_ptr create_frame(const pixel_format_desc& desc)\r
{\r
- size_t key = reinterpret_cast<size_t>(tag);\r
+ size_t key = pixel_format_desc::hash(desc);\r
auto& pool = writing_pools_[key];\r
\r
frame_ptr my_frame;\r
- if(!pool.try_pop(my_frame))\r
- {\r
- my_frame = executor_.invoke([&]\r
- {\r
- return std::shared_ptr<frame>(new frame(desc));\r
- });\r
- }\r
+ if(!pool.try_pop(my_frame)) \r
+ my_frame = executor_.invoke([&]{return std::shared_ptr<frame>(new frame(desc));}); \r
\r
auto destructor = [=]\r
{\r
writing_pools_[key].push(my_frame);\r
};\r
\r
- return frame_ptr(my_frame.get(), [=](frame*) \r
- {\r
- executor_.begin_invoke(destructor);\r
- });\r
- }\r
-\r
- void release_tag(void* tag)\r
- {\r
- writing_pools_[reinterpret_cast<size_t>(tag)].clear();\r
+ return frame_ptr(my_frame.get(), [=](frame*){executor_.begin_invoke(destructor);});\r
}\r
- \r
+ \r
void send(const frame_ptr& input_frame)\r
{ \r
+ if(input_frame == nullptr)\r
+ return;\r
+\r
input_.push(input_frame); // Block if there are too many frames in pipeline\r
executor_.begin_invoke([=]\r
{\r
try\r
{\r
- frame_ptr output_frame;\r
- input_.pop(output_frame);\r
- if(output_frame != nullptr)\r
- output_.push(renderer_->render(output_frame));\r
+ output_.push(renderer_->render(input_frame));\r
+ frame_ptr dummy;\r
+ input_.try_pop(dummy);\r
}\r
catch(...)\r
{\r
{\r
if(!output_.try_pop(output_frame))\r
{\r
- if(underrun_count_ == 0) \r
- CASPAR_LOG(trace) << "Frame Processor Underrun has STARTED.";\r
+ if(input_.empty())\r
+ {\r
+ if(input_underrun_count_ == 0) \r
+ CASPAR_LOG(trace) << "### Frame Processor Input Underrun has STARTED. ###";\r
+ \r
+ ++input_underrun_count_;\r
+ }\r
+ else \r
+ {\r
+ if(output_underrun_count_ == 0) \r
+ CASPAR_LOG(trace) << "### Frame Processor Output Underrun has STARTED. ###";\r
\r
- ++underrun_count_;\r
+ ++output_underrun_count_;\r
+ }\r
+\r
output_.pop(output_frame);\r
- } \r
- else if(underrun_count_ > 0)\r
+ } \r
+ else if(input_underrun_count_ > 0)\r
+ {\r
+ CASPAR_LOG(trace) << "### Frame Processor Input Underrun has ENDED with " << output_underrun_count_ << " ticks. ###";\r
+ input_underrun_count_ = 0;\r
+ } \r
+ else if(output_underrun_count_ > 0)\r
{\r
- CASPAR_LOG(trace) << "Frame Processor Underrun has ENDED with " << underrun_count_ << " ticks.";\r
- underrun_count_ = 0;\r
+ CASPAR_LOG(trace) << "### Frame Processor Output Underrun has ENDED with " << output_underrun_count_ << " ticks. ###";\r
+ output_underrun_count_ = 0;\r
}\r
}\r
\r
frame_queue reading_pool_; \r
\r
std::unique_ptr<sf::Context> ogl_context_;\r
-\r
- boost::unique_future<frame_ptr> active_frame_;\r
\r
common::executor executor_;\r
\r
\r
frame_renderer_ptr renderer_;\r
\r
- long underrun_count_;\r
+ long output_underrun_count_;\r
+ long input_underrun_count_;\r
};\r
\r
#if defined(_MSC_VER)\r
\r
frame_processor_device::frame_processor_device(const video_format_desc& format_desc) \r
: impl_(new implementation(this, format_desc)){}\r
-frame_ptr frame_processor_device::create_frame(const pixel_format_desc& desc, void* tag){return impl_->create_frame(desc, tag);}\r
-void frame_processor_device::release_tag(void* tag){impl_->release_tag(tag);}\r
+frame_ptr frame_processor_device::create_frame(const pixel_format_desc& desc){return impl_->create_frame(desc);}\r
void frame_processor_device::send(const frame_ptr& frame){impl_->send(frame);}\r
void frame_processor_device::receive(frame_ptr& frame){impl_->receive(frame);}\r
const video_format_desc frame_processor_device::get_video_format_desc() const { return impl_->fmt_;}\r
-frame_ptr frame_processor_device::create_frame(size_t width, size_t height, void* tag)\r
+frame_ptr frame_processor_device::create_frame(size_t width, size_t height)\r
{\r
+ // Create bgra frame\r
pixel_format_desc desc;\r
desc.pix_fmt = pixel_format::bgra;\r
desc.planes[0] = pixel_format_desc::plane(width, height, 4);\r
- return create_frame(desc, tag);\r
+ return create_frame(desc);\r
}\r
\r
-frame_ptr frame_processor_device::create_frame(void* tag)\r
+frame_ptr frame_processor_device::create_frame()\r
{\r
+ // Create bgra frame with output resolution\r
pixel_format_desc desc;\r
desc.pix_fmt = pixel_format::bgra;\r
desc.planes[0] = pixel_format_desc::plane(get_video_format_desc().width, get_video_format_desc().height, 4);\r
- return create_frame(desc, tag);\r
+ return create_frame(desc);\r
}\r
+void frame_processor_device::stop(){impl_->stop();}\r
}}
\ No newline at end of file
public:\r
frame_processor_device(const video_format_desc& format_desc);\r
\r
+ void stop();\r
void send(const frame_ptr& frame);\r
void receive(frame_ptr& frame);\r
\r
- frame_ptr create_frame(const pixel_format_desc& desc, void* tag); \r
- frame_ptr create_frame(size_t width, size_t height, void* tag); \r
- frame_ptr create_frame(void* tag);\r
-\r
- void release_tag(void* tag);\r
-\r
+ frame_ptr create_frame(const pixel_format_desc& desc); \r
+ frame_ptr create_frame(size_t width, size_t heightg); \r
+ frame_ptr create_frame();\r
+ \r
const video_format_desc get_video_format_desc() const;\r
private:\r
struct implementation;\r
writing_[next_index]->draw(shader_);\r
\r
// Create an output frame\r
- auto temp_frame = frame_processor_.create_frame(this);\r
+ auto temp_frame = frame_processor_.create_frame();\r
\r
// Read from framebuffer into page-locked memory.\r
temp_frame->begin_read();\r
#pragma intrinsic(__movsd, __stosd)\r
\r
namespace caspar { namespace core {\r
-\r
-class color_producer : public frame_producer\r
-{\r
-public:\r
- explicit color_producer(unsigned int color_value) \r
- : color_value_(color_value){}\r
-\r
- ~color_producer()\r
- {\r
- if(frame_processor_)\r
- frame_processor_->release_tag(this);\r
- }\r
-\r
- frame_ptr render_frame()\r
- { \r
- return frame_;\r
- }\r
-\r
- void initialize(const frame_processor_device_ptr& frame_processor)\r
- {\r
- frame_processor_ = frame_processor;\r
- frame_ = frame_processor->create_frame(this);\r
- __stosd(reinterpret_cast<unsigned long*>(frame_->data()), color_value_, frame_->size() / sizeof(unsigned long));\r
- }\r
-\r
- frame_processor_device_ptr frame_processor_;\r
- frame_ptr frame_;\r
- unsigned int color_value_;\r
-};\r
-\r
-union Color \r
+ \r
+unsigned int get_pixel_color_value(const std::wstring& parameter)\r
{\r
- struct Components \r
+ union Color \r
{\r
- unsigned char a;\r
- unsigned char r;\r
- unsigned char g;\r
- unsigned char b;\r
- } comp;\r
+ struct Components \r
+ {\r
+ unsigned char a;\r
+ unsigned char r;\r
+ unsigned char g;\r
+ unsigned char b;\r
+ } comp;\r
\r
- unsigned int value;\r
-};\r
+ unsigned int value;\r
+ };\r
\r
-unsigned int get_pixel_color_value(const std::wstring& parameter)\r
-{\r
std::wstring color_code;\r
if(parameter.length() != 9 || parameter[0] != '#')\r
BOOST_THROW_EXCEPTION(invalid_argument() << arg_name_info("parameter") << arg_value_info(common::narrow(parameter)) << msg_info("Invalid color code"));\r
return color.value;\r
}\r
\r
+class color_producer : public frame_producer\r
+{\r
+public:\r
+ explicit color_producer(const std::wstring& color) : color_str_(color), color_value_(get_pixel_color_value(color)){}\r
+ \r
+ frame_ptr render_frame()\r
+ { \r
+ return frame_;\r
+ }\r
+\r
+ void initialize(const frame_processor_device_ptr& frame_processor)\r
+ {\r
+ frame_processor_ = frame_processor;\r
+ frame_ = frame_processor->create_frame();\r
+ __stosd(reinterpret_cast<unsigned long*>(frame_->data()), color_value_, frame_->size() / sizeof(unsigned long));\r
+ }\r
+ \r
+ std::wstring print()\r
+ {\r
+ std::wstringstream str;\r
+ str << L"color_producer " << color_str_ << L".";\r
+ return str.str();\r
+ }\r
+\r
+ frame_processor_device_ptr frame_processor_;\r
+ frame_ptr frame_;\r
+ unsigned int color_value_;\r
+ std::wstring color_str_;\r
+};\r
+\r
frame_producer_ptr create_color_producer(const std::vector<std::wstring>& params)\r
{\r
if(params.empty() || params[0].at(0) != '#')\r
return nullptr;\r
- return std::make_shared<color_producer>(get_pixel_color_value(params[0]));\r
+ return std::make_shared<color_producer>(params[0]);\r
}\r
\r
}}
\ No newline at end of file
if(!input_->seek(boost::lexical_cast<unsigned long long>(*seek)))\r
CASPAR_LOG(warning) << "Failed to seek file: " << filename_ << "to frame" << *seek;\r
}\r
-\r
- input_->start();\r
}\r
\r
void initialize(const frame_processor_device_ptr& frame_processor)\r
frame_ptr render_frame()\r
{\r
while(ouput_channel_.empty() && !input_->is_eof())\r
- { \r
+ { \r
auto video_packet = input_->get_video_packet(); \r
auto audio_packet = input_->get_audio_packet(); \r
tbb::parallel_invoke(\r
if(video_packet.empty() && audio_packet.empty())\r
{\r
if(underrun_count_ == 0)\r
- CASPAR_LOG(warning) << "File read underflow has STARTED.";\r
+ CASPAR_LOG(warning) << "### File read underflow has STARTED.";\r
++underrun_count_;\r
+ return last_frame_;\r
}\r
else if(underrun_count_ > 0)\r
{\r
- CASPAR_LOG(trace) << "File Read Underrun has ENDED with " << underrun_count_ << " ticks.";\r
+ CASPAR_LOG(trace) << "### File Read Underrun has ENDED with " << underrun_count_ << " ticks.";\r
underrun_count_ = 0;\r
}\r
\r
} \r
}\r
\r
- frame_ptr frame;\r
if(!ouput_channel_.empty())\r
{\r
- frame = ouput_channel_.front();\r
+ last_frame_ = ouput_channel_.front();\r
ouput_channel_.pop();\r
}\r
- return frame;\r
+ return last_frame_;\r
+ }\r
+\r
+ std::wstring print()\r
+ {\r
+ std::wstringstream str;\r
+ str << L"ffmpeg_producer " << filename_ << L".";\r
+ return str.str();\r
}\r
\r
bool has_audio_;\r
\r
std::wstring filename_;\r
\r
- long underrun_count_;\r
+ long underrun_count_;\r
+\r
+ frame_ptr last_frame_;\r
};\r
\r
frame_producer_ptr create_ffmpeg_producer(const std::vector<std::wstring>& params)\r
#include <tbb/queuing_mutex.h>\r
\r
#include <boost/thread.hpp>\r
+#include <boost/exception/error_info.hpp>\r
\r
#include <errno.h>\r
#include <system_error>\r
{\r
implementation() : video_s_index_(-1), audio_s_index_(-1), video_codec_(nullptr), audio_codec_(nullptr)\r
{\r
- loop_ = false;\r
- //file_buffer_size_ = 0; \r
+ loop_ = false; \r
video_packet_buffer_.set_capacity(50);\r
audio_packet_buffer_.set_capacity(50);\r
}\r
\r
~implementation()\r
{ \r
- stop();\r
- }\r
- \r
- void stop()\r
- {\r
is_running_ = false;\r
audio_packet_buffer_.clear();\r
video_packet_buffer_.clear();\r
- //file_buffer_size_ = 0;\r
- //file_buffer_size_cond_.notify_all();\r
io_thread_.join();\r
}\r
-\r
+ \r
void load(const std::string& filename)\r
{ \r
try\r
int errn;\r
AVFormatContext* weak_format_context_;\r
if((errn = -av_open_input_file(&weak_format_context_, filename.c_str(), nullptr, 0, nullptr)) > 0)\r
- BOOST_THROW_EXCEPTION(file_read_error() << msg_info("No video or audio codec found."));\r
+ BOOST_THROW_EXCEPTION(\r
+ file_read_error() << \r
+ msg_info("No format context found.") << \r
+ boost::errinfo_api_function("av_open_input_file") <<\r
+ boost::errinfo_errno(errn) <<\r
+ boost::errinfo_file_name(filename));\r
+\r
format_context_.reset(weak_format_context_, av_close_input_file);\r
\r
if((errn = -av_find_stream_info(format_context_.get())) > 0)\r
- throw std::runtime_error("File read error");\r
+ BOOST_THROW_EXCEPTION(\r
+ file_read_error() << \r
+ boost::errinfo_api_function("av_find_stream_info") <<\r
+ msg_info("No stream found.") << \r
+ boost::errinfo_errno(errn));\r
\r
video_codec_context_ = open_video_stream();\r
if(!video_codec_context_)\r
CASPAR_LOG(warning) << "No audio stream found.";\r
\r
if(!video_codec_context_ && !audio_codex_context_)\r
- BOOST_THROW_EXCEPTION(file_read_error() << msg_info("No video or audio codec found.")); \r
+ BOOST_THROW_EXCEPTION(file_read_error() << msg_info("No video or audio codec context found.")); \r
}\r
catch(...)\r
{\r
throw;\r
}\r
filename_ = filename;\r
- }\r
-\r
- void start()\r
- {\r
io_thread_ = boost::thread([=]{read_file();});\r
}\r
- \r
+ \r
std::shared_ptr<AVCodecContext> open_video_stream()\r
{ \r
AVStream** streams_end = format_context_->streams+format_context_->nb_streams;\r
\r
if (av_read_frame(format_context_.get(), packet.get()) >= 0) // NOTE: Packet is only valid until next call of av_read_frame or av_close_input_file\r
{\r
- if(packet->stream_index == video_s_index_) \r
- {\r
- video_packet_buffer_.push(std::make_shared<aligned_buffer>(packet->data, packet->data + packet->size)); \r
- packet_wait_cond_.notify_all();\r
- //file_buffer_size_ += packet->size;\r
- }\r
+ auto buffer = std::make_shared<aligned_buffer>(packet->data, packet->data + packet->size);\r
+ if(packet->stream_index == video_s_index_) \r
+ video_packet_buffer_.push(buffer); \r
else if(packet->stream_index == audio_s_index_) \r
- {\r
- audio_packet_buffer_.push(std::make_shared<aligned_buffer>(packet->data, packet->data + packet->size)); \r
- packet_wait_cond_.notify_all(); \r
- //file_buffer_size_ += packet->size;\r
- }\r
+ audio_packet_buffer_.push(buffer); \r
}\r
else if(!loop_ || av_seek_frame(format_context_.get(), -1, 0, AVSEEK_FLAG_BACKWARD) < 0) // TODO: av_seek_frame does not work for all formats\r
is_running_ = false;\r
- \r
- //if(is_running_)\r
- //{\r
- // boost::unique_lock<boost::mutex> lock(file_buffer_size_mutex_);\r
- // while(file_buffer_size_ > 32*1000000)\r
- // file_buffer_size_cond_.wait(lock); \r
- //}\r
}\r
\r
is_running_ = false;\r
aligned_buffer get_video_packet()\r
{\r
std::shared_ptr<aligned_buffer> video_packet;\r
- if(video_packet_buffer_.try_pop(video_packet))\r
- {\r
- return std::move(*video_packet);\r
- //file_buffer_size_ -= video_packet->size;\r
- //file_buffer_size_cond_.notify_all();\r
- }\r
+ if(video_packet_buffer_.try_pop(video_packet)) \r
+ return std::move(*video_packet); \r
return aligned_buffer();\r
}\r
\r
{\r
std::shared_ptr<aligned_buffer> audio_packet;\r
if(audio_packet_buffer_.try_pop(audio_packet))\r
- {\r
return std::move(*audio_packet);\r
- //file_buffer_size_ -= audio_packet->size;\r
- //file_buffer_size_cond_.notify_all();\r
- }\r
return aligned_buffer();\r
}\r
\r
{\r
return !is_running_ && video_packet_buffer_.empty() && audio_packet_buffer_.empty();\r
}\r
-\r
- void wait_for_packet()\r
- {\r
- boost::unique_lock<boost::mutex> lock(packet_wait_mutex_);\r
- while(is_running_ && video_packet_buffer_.empty() && audio_packet_buffer_.empty())\r
- packet_wait_cond_.wait(lock); \r
- }\r
- \r
+ \r
bool seek(unsigned long long seek_target)\r
{\r
tbb::queuing_mutex::scoped_lock lock(seek_mutex_);\r
- if(av_seek_frame(format_context_.get(), -1, seek_target*AV_TIME_BASE, 0) >= 0)\r
- {\r
- video_packet_buffer_.clear();\r
- audio_packet_buffer_.clear();\r
- // TODO: Not sure its enough to jsut flush in input class\r
- if(video_codec_context_)\r
- avcodec_flush_buffers(video_codec_context_.get());\r
- if(audio_codex_context_)\r
- avcodec_flush_buffers(audio_codex_context_.get());\r
- return true;\r
- }\r
+ if(av_seek_frame(format_context_.get(), -1, seek_target*AV_TIME_BASE, 0) < 0)\r
+ return false;\r
\r
- return false;\r
+ video_packet_buffer_.clear();\r
+ audio_packet_buffer_.clear();\r
+ // TODO: Not sure its enough to jsut flush in input class\r
+ if(video_codec_context_)\r
+ avcodec_flush_buffers(video_codec_context_.get());\r
+ if(audio_codex_context_)\r
+ avcodec_flush_buffers(audio_codex_context_.get());\r
+ return true;\r
}\r
- \r
- //int file_buffer_max_size_;\r
- //tbb::atomic<int> file_buffer_size_;\r
- //boost::condition_variable file_buffer_size_cond_;\r
- //boost::mutex file_buffer_size_mutex_;\r
- \r
- boost::condition_variable packet_wait_cond_;\r
- boost::mutex packet_wait_mutex_;\r
-\r
+ \r
std::shared_ptr<AVFormatContext> format_context_; // Destroy this last\r
\r
tbb::queuing_mutex seek_mutex_;\r
aligned_buffer input::get_video_packet(){return impl_->get_video_packet();}\r
aligned_buffer input::get_audio_packet(){return impl_->get_audio_packet();}\r
bool input::seek(unsigned long long frame){return impl_->seek(frame);}\r
-void input::start(){impl_->start();}\r
-void input::wait_for_packet(){impl_->wait_for_packet();}\r
}}}
\ No newline at end of file
\r
aligned_buffer get_video_packet();\r
aligned_buffer get_audio_packet();\r
- void wait_for_packet();\r
\r
bool seek(unsigned long long frame);\r
- void start();\r
\r
bool is_eof() const;\r
void set_loop(bool value);\r
std::shared_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
\r
int frame_finished = 0;\r
- if((-avcodec_decode_video(codec_context_, decoded_frame.get(), &frame_finished, video_packet.data(), video_packet.size())) > 0) \r
- decoded_frame.reset(); \r
+ const int result = avcodec_decode_video(codec_context_, decoded_frame.get(), &frame_finished, video_packet.data(), video_packet.size());\r
+ if(result < 0) \r
+ return nullptr; \r
\r
return decoded_frame; \r
}\r
struct video_transformer::implementation : boost::noncopyable\r
{\r
implementation(AVCodecContext* codec_context) : codec_context_(codec_context), sw_warning_(false){}\r
-\r
- ~implementation()\r
- {\r
- if(frame_processor_)\r
- frame_processor_->release_tag(this);\r
- }\r
-\r
+ \r
frame_ptr execute(const std::shared_ptr<AVFrame>& decoded_frame)\r
{ \r
if(decoded_frame == nullptr)\r
\r
if(desc.pix_fmt != pixel_format::invalid)\r
{\r
- result_frame = frame_processor_->create_frame(desc, this);\r
+ result_frame = frame_processor_->create_frame(desc);\r
\r
tbb::parallel_for(0, static_cast<int>(desc.planes.size()), 1, [&](int n)\r
{\r
CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
sw_warning_ = true;\r
}\r
- result_frame = frame_processor_->create_frame(width, height, this);\r
+ result_frame = frame_processor_->create_frame(width, height);\r
\r
AVFrame av_frame; \r
avcodec_get_frame_defaults(&av_frame);\r
{\r
double param;\r
sws_context_.reset(sws_getContext(width, height, pix_fmt, width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);\r
+ if(!sws_context_)\r
+ BOOST_THROW_EXCEPTION(\r
+ operation_failed() <<\r
+ msg_info("Could not create software scaling context.") << \r
+ boost::errinfo_api_function("sws_getContext"));\r
+\r
} \r
\r
sws_scale(sws_context_.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame.data, av_frame.linesize); \r
~implementation() \r
{\r
stop();\r
- if(frame_processor_)\r
- frame_processor_->release_tag(this);\r
}\r
\r
void start(bool force = true)\r
});\r
} \r
\r
- auto frame = frame_processor_->create_frame(format_desc.width, format_desc.height, this);\r
+ auto frame = frame_processor_->create_frame(format_desc.width, format_desc.height);\r
common::aligned_parallel_memcpy(frame->data(), current_frame_->data(), current_frame_->size()); \r
\r
return frame;\r
/// \param frame_processor The frame frame_processor. \r
////////////////////////////////////////////////////////////////////////////////////////////////////\r
virtual void initialize(const frame_processor_device_ptr& frame_processor) = 0;\r
+\r
+ virtual std::wstring print() { return L"Unknown frame_producer.";}\r
};\r
typedef std::shared_ptr<frame_producer> frame_producer_ptr;\r
\r
\r
~implementation()\r
{\r
+ frame_processor_->stop();\r
is_running_ = false;\r
render_thread_.join();\r
}\r
struct image_producer : public frame_producer\r
{\r
image_producer(const std::wstring& filename) : filename_(filename) {}\r
-\r
- ~image_producer()\r
- {\r
- if(frame_processor_)\r
- frame_processor_->release_tag(this);\r
- }\r
-\r
+ \r
frame_ptr render_frame(){return frame_;}\r
\r
void initialize(const frame_processor_device_ptr& frame_processor)\r
frame_processor_ = frame_processor;\r
auto bitmap = load_image(filename_);\r
FreeImage_FlipVertical(bitmap.get());\r
- frame_ = frame_processor->create_frame(FreeImage_GetWidth(bitmap.get()), FreeImage_GetHeight(bitmap.get()), this);\r
+ frame_ = frame_processor->create_frame(FreeImage_GetWidth(bitmap.get()), FreeImage_GetHeight(bitmap.get()));\r
common::aligned_parallel_memcpy(frame_->data(), FreeImage_GetBits(bitmap.get()), frame_->size());\r
}\r
\r
\r
loop_ = std::find(params.begin(), params.end(), L"LOOP") != params.end();\r
}\r
- \r
- ~image_scroll_producer()\r
- {\r
- if(frame_processor_)\r
- frame_processor_->release_tag(this);\r
- }\r
\r
void load_and_pad_image(const std::wstring& filename)\r
{\r
\r
frame_ptr do_render_frame()\r
{\r
- frame_ptr frame = frame_processor_->create_frame(format_desc_.width, format_desc_.height, this);\r
+ frame_ptr frame = frame_processor_->create_frame(format_desc_.width, format_desc_.height);\r
common::clear(frame->data(), format_desc_.size);\r
\r
const int delta_x = direction_ == direction::Left ? speed_ : -speed_;\r
catch(...)\r
{\r
CASPAR_LOG_CURRENT_EXCEPTION();\r
+ CASPAR_LOG(warning) << L"Removed " << (active_ ? active_->print() : L"frame_producer") << L" from layer.";\r
active_ = nullptr;\r
- CASPAR_LOG(warning) << "Removed producer from layer.";\r
+ last_frame_ = nullptr;\r
}\r
\r
return last_frame_;\r
\r
tbb::parallel_invoke\r
(\r
- [&]{dest = render_frame(dest_producer_);},\r
+ [&]{dest = render_frame(dest_producer_);},\r
[&]{source = render_frame(source_producer_);}\r
);\r
\r
{\r
CASPAR_LOG_CURRENT_EXCEPTION();\r
producer = nullptr;\r
- CASPAR_LOG(warning) << "Removed renderer from transition.";\r
+ CASPAR_LOG(warning) << "Removed producer from transition.";\r
}\r
\r
- if(frame == nullptr && producer != nullptr && \r
- producer->get_following_producer() != nullptr)\r
+ if(frame == nullptr)\r
{\r
- auto following = producer->get_following_producer();\r
- following->initialize(frame_processor_);\r
- following->set_leading_producer(producer);\r
- producer = following;\r
+ if(producer == nullptr || producer->get_following_producer() == nullptr)\r
+ return nullptr;\r
+\r
+ try\r
+ {\r
+ auto following = producer->get_following_producer();\r
+ following->initialize(frame_processor_);\r
+ following->set_leading_producer(producer);\r
+ producer = following;\r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG(warning) << "Failed to initialize following producer. Removing it.";\r
+ producer = nullptr;\r
+ }\r
+\r
return render_frame(producer);\r
}\r
return frame;\r
</paths>\r
<channels>\r
<channel>\r
- <videomode>PAL</videomode>\r
+ <videomode>720p2500</videomode>\r
<consumers>\r
- <!--ogl>\r
+ <ogl>\r
<device>1</device>\r
<stretch>uniform</stretch>\r
<windowed>true</windowed>\r
</ogl>\r
- <audio/-->\r
- <bluefish>\r
+ <audio/>\r
+ <!--bluefish>\r
<device>1</device> \r
<embedded-audio>true</embedded-audio>\r
- </bluefish>\r
+ </bluefish-->\r
</consumers>\r
</channel>\r
</channels>\r
<None Include="caspar.config">\r
<SubType>Designer</SubType>\r
</None>\r
+ <None Include="My Amplifier Results\r006hs\r006hs.ampl" />\r
+ <None Include="My Amplifier Results\r007cc\r007cc.ampl" />\r
+ <None Include="My Amplifier Results\r008lw\r008lw.ampl" />\r
+ <None Include="My Inspector Results\r000ti3\r000ti3.insp" />\r
</ItemGroup>\r
<PropertyGroup Label="Globals">\r
<ProjectGuid>{8C26C94F-8092-4769-8D84-DEA479721C5B}</ProjectGuid>\r
</ItemGroup>\r
<ItemGroup>\r
<None Include="caspar.config" />\r
+ <None Include="My Amplifier Results\r006hs\r006hs.ampl">\r
+ <Filter>My Amplifier Results</Filter>\r
+ </None>\r
+ <None Include="My Amplifier Results\r007cc\r007cc.ampl">\r
+ <Filter>My Amplifier Results</Filter>\r
+ </None>\r
+ <None Include="My Amplifier Results\r008lw\r008lw.ampl">\r
+ <Filter>My Amplifier Results</Filter>\r
+ </None>\r
+ <None Include="My Inspector Results\r000ti3\r000ti3.insp">\r
+ <Filter>My Inspector Results</Filter>\r
+ </None>\r
</ItemGroup>\r
<ItemGroup>\r
<Filter Include="My Amplifier Results">\r
<UniqueIdentifier>{4df636fc-0183-410c-8d76-2d0e06c29ca3}</UniqueIdentifier>\r
</Filter>\r
+ <Filter Include="My Inspector Results">\r
+ <UniqueIdentifier>{54aa3e52-b5b4-4451-84e3-3b5d5ddcee0e}</UniqueIdentifier>\r
+ </Filter>\r
</ItemGroup>\r
</Project>
\ No newline at end of file