, key_only_(key_only)\r
, executor_(print())\r
{\r
- executor_.set_capacity(core::consumer_buffer_depth());\r
+ executor_.set_capacity(1);\r
\r
graph_->add_guide("tick-time", 0.5);\r
graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f)); \r
graph_->add_guide("frame-time", 0.5f); \r
graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));\r
graph_->set_color("sync-time", diagnostics::color(0.5f, 1.0f, 0.2f));\r
- graph_->set_color("input-buffer", diagnostics::color(1.0f, 1.0f, 0.0f));\r
graph_->set_text(print());\r
diagnostics::register_graph(graph_);\r
\r
{\r
try\r
{\r
- const size_t audio_samples = format_desc_.audio_samples_per_frame;\r
- const size_t audio_nchannels = format_desc_.audio_channels;\r
-\r
frame_timer_.restart();\r
\r
// Copy to local buffers\r
sync_timer_.restart();\r
unsigned long n_field = 0;\r
blue_->wait_output_video_synch(UPD_FMT_FRAME, n_field);\r
- graph_->update_value("sync-time", static_cast<float>(sync_timer_.elapsed()*format_desc_.fps*0.5));\r
+ graph_->update_value("sync-time", sync_timer_.elapsed()*format_desc_.fps*0.5);\r
\r
// Send and display\r
\r
if(embedded_audio_)\r
{ \r
- auto frame_audio = core::audio_32_to_16_sse(frame->audio_data());\r
- auto frame_audio_data = frame_audio.size() != audio_samples ? silence.data() : frame_audio.data(); \r
+ auto frame_audio16 = core::audio_32_to_16_sse(frame->audio_data());\r
\r
- encode_hanc(reinterpret_cast<BLUE_UINT32*>(reserved_frames_.front()->hanc_data()), frame_audio_data, audio_samples, audio_nchannels);\r
+ encode_hanc(reinterpret_cast<BLUE_UINT32*>(reserved_frames_.front()->hanc_data()), frame_audio16.data(), frame->audio_data().size(), format_desc_.audio_channels);\r
\r
blue_->system_buffer_write_async(const_cast<uint8_t*>(reserved_frames_.front()->image_data()), \r
reserved_frames_.front()->image_size(), \r
{\r
CASPAR_LOG_CURRENT_EXCEPTION();\r
}\r
- graph_->set_value("input-buffer", static_cast<double>(executor_.size())/static_cast<double>(executor_.capacity()));\r
});\r
- graph_->set_value("input-buffer", static_cast<double>(executor_.size())/static_cast<double>(executor_.capacity()));\r
}\r
\r
void encode_hanc(BLUE_UINT32* hanc_data, void* audio_data, size_t audio_samples, size_t audio_nchannels)\r
\r
return L"bluefish [" + boost::lexical_cast<std::wstring>(device_index_) + L"]";\r
}\r
+\r
+ size_t buffer_depth() const\r
+ {\r
+ return 1;\r
+ }\r
}; \r
\r
safe_ptr<core::frame_consumer> create_consumer(const std::vector<std::wstring>& params)\r