<ClInclude Include="mixer\read_frame.h" />\r
<ClInclude Include="mixer\write_frame.h" />\r
<ClInclude Include="producer\color\color_producer.h" />\r
- <ClInclude Include="producer\frame\audio_transform.h" />\r
<ClInclude Include="producer\frame\basic_frame.h" />\r
<ClInclude Include="producer\frame\frame_factory.h" />\r
<ClInclude Include="producer\frame\frame_visitor.h" />\r
- <ClInclude Include="producer\frame\image_transform.h" />\r
+ <ClInclude Include="producer\frame\frame_transform.h" />\r
<ClInclude Include="producer\frame\pixel_format.h" />\r
<ClInclude Include="producer\frame_producer.h" />\r
<ClInclude Include="producer\stage.h" />\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
</ClCompile>\r
- <ClCompile Include="producer\frame\audio_transform.cpp">\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
- <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
- </ClCompile>\r
<ClCompile Include="producer\frame\basic_frame.cpp">\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
</ClCompile>\r
- <ClCompile Include="producer\frame\image_transform.cpp">\r
+ <ClCompile Include="producer\frame\frame_transform.cpp">\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Profile|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
<PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Develop|Win32'">../../StdAfx.h</PrecompiledHeaderFile>\r
<ClInclude Include="producer\layer.h">\r
<Filter>source\producer</Filter>\r
</ClInclude>\r
- <ClInclude Include="producer\frame\audio_transform.h">\r
- <Filter>source\producer\frame</Filter>\r
- </ClInclude>\r
<ClInclude Include="producer\frame\basic_frame.h">\r
<Filter>source\producer\frame</Filter>\r
</ClInclude>\r
- <ClInclude Include="producer\frame\image_transform.h">\r
- <Filter>source\producer\frame</Filter>\r
- </ClInclude>\r
<ClInclude Include="producer\frame\pixel_format.h">\r
<Filter>source\producer\frame</Filter>\r
</ClInclude>\r
<ClInclude Include="mixer\image\blend_modes.h">\r
<Filter>source\mixer\image</Filter>\r
</ClInclude>\r
+ <ClInclude Include="producer\frame\frame_transform.h">\r
+ <Filter>source\producer\frame</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
<ItemGroup>\r
<ClCompile Include="producer\transition\transition_producer.cpp">\r
<ClCompile Include="producer\frame\basic_frame.cpp">\r
<Filter>source\producer\frame</Filter>\r
</ClCompile>\r
- <ClCompile Include="producer\frame\image_transform.cpp">\r
- <Filter>source\producer\frame</Filter>\r
- </ClCompile>\r
- <ClCompile Include="producer\frame\audio_transform.cpp">\r
- <Filter>source\producer\frame</Filter>\r
- </ClCompile>\r
<ClCompile Include="mixer\image\image_mixer.cpp">\r
<Filter>source\mixer\image</Filter>\r
</ClCompile>\r
<ClCompile Include="mixer\image\blend_modes.cpp">\r
<Filter>source\mixer\image</Filter>\r
</ClCompile>\r
+ <ClCompile Include="producer\frame\frame_transform.cpp">\r
+ <Filter>source\producer\frame</Filter>\r
+ </ClCompile>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
#include "audio_mixer.h"\r
\r
#include <core/mixer/write_frame.h>\r
-#include <core/producer/frame/audio_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
\r
#include <tbb/parallel_for.h>\r
\r
#include <deque>\r
\r
namespace caspar { namespace core {\r
+\r
+struct audio_item\r
+{\r
+ const void* tag;\r
+ frame_transform transform;\r
+ std::vector<int16_t> audio_data;\r
+};\r
\r
struct audio_mixer::implementation\r
{\r
- std::deque<std::vector<int16_t>> audio_data_;\r
- std::stack<core::audio_transform> transform_stack_;\r
+ std::stack<core::frame_transform> transform_stack_;\r
\r
- std::map<const void*, core::audio_transform> prev_audio_transforms_;\r
- std::map<const void*, core::audio_transform> next_audio_transforms_;\r
+ std::map<const void*, core::frame_transform> prev_frame_transforms_;\r
+ std::map<const void*, core::frame_transform> next_frame_transforms_;\r
\r
const core::video_format_desc format_desc_;\r
\r
+ std::vector<audio_item> items;\r
+\r
public:\r
implementation(const core::video_format_desc& format_desc)\r
: format_desc_(format_desc)\r
{\r
- transform_stack_.push(core::audio_transform());\r
- audio_data_.push_back(std::vector<int16_t>()); // One frame delay\r
+ transform_stack_.push(core::frame_transform());\r
}\r
\r
void begin(core::basic_frame& frame)\r
{\r
- transform_stack_.push(transform_stack_.top()*frame.get_audio_transform());\r
+ transform_stack_.push(transform_stack_.top()*frame.get_frame_transform());\r
}\r
\r
void visit(const core::write_frame& frame)\r
{\r
- if(!transform_stack_.top().get_has_audio() || frame.audio_data().empty())\r
+ // We only care about the last field.\r
+ if(format_desc_.field_mode == field_mode::upper && transform_stack_.top().field_mode == field_mode::upper)\r
return;\r
\r
- const auto& audio_data = frame.audio_data();\r
- const auto tag = frame.tag(); // Get the identifier for the audio-stream.\r
- \r
- const auto next = transform_stack_.top();\r
- auto prev = next;\r
-\r
- const auto it = prev_audio_transforms_.find(tag);\r
- if(it != prev_audio_transforms_.end())\r
- prev = it->second;\r
- \r
- next_audio_transforms_[tag] = next; // Store all active tags, inactive tags will be removed in end_pass.\r
- \r
- if(next.get_volume() < 0.001 && prev.get_volume() < 0.001)\r
+ if(format_desc_.field_mode == field_mode::lower && transform_stack_.top().field_mode == field_mode::lower)\r
return;\r
- \r
- static const int BASE = 1<<15;\r
-\r
- const auto next_volume = static_cast<int>(next.get_volume()*BASE);\r
- const auto prev_volume = static_cast<int>(prev.get_volume()*BASE);\r
- \r
- const int n_samples = audio_data_.back().size();\r
- \r
- const auto in_size = static_cast<size_t>(audio_data.size());\r
- CASPAR_VERIFY(in_size == 0 || in_size == audio_data_.back().size());\r
\r
- if(in_size > audio_data_.back().size())\r
+ // Skip empty audio.\r
+ if(transform_stack_.top().volume < 0.002 || frame.audio_data().empty())\r
return;\r
\r
- tbb::parallel_for\r
- (\r
- tbb::blocked_range<size_t>(0, audio_data.size()),\r
- [&](const tbb::blocked_range<size_t>& r)\r
- {\r
- for(size_t n = r.begin(); n < r.end(); ++n)\r
- {\r
- const int sample_volume = (prev_volume - (prev_volume * n)/n_samples) + (next_volume * n)/n_samples;\r
- const int sample = (static_cast<int>(audio_data[n])*sample_volume)/BASE;\r
- audio_data_.back()[n] = static_cast<int16_t>((static_cast<int>(audio_data_.back()[n]) + sample) & 0xFFFF);\r
- }\r
- }\r
- );\r
+ audio_item item;\r
+ item.tag = frame.tag();\r
+ item.transform = transform_stack_.top();\r
+ item.audio_data = std::vector<int16_t>(frame.audio_data().begin(), frame.audio_data().end());\r
+\r
+ items.push_back(item); \r
}\r
\r
- void begin(const core::audio_transform& transform)\r
+ void begin(const core::frame_transform& transform)\r
{\r
transform_stack_.push(transform_stack_.top()*transform);\r
}\r
\r
std::vector<int16_t> mix()\r
{\r
- prev_audio_transforms_ = std::move(next_audio_transforms_); \r
- auto result = std::move(audio_data_.front());\r
- audio_data_.pop_front();\r
- audio_data_.push_back(std::vector<int16_t>(format_desc_.audio_samples_per_frame));\r
+ auto result = std::vector<int16_t>(format_desc_.audio_samples_per_frame);\r
+\r
+ BOOST_FOREACH(auto& item, items)\r
+ { \r
+ const auto next = item.transform;\r
+ auto prev = next;\r
+\r
+ const auto it = prev_frame_transforms_.find(item.tag);\r
+ if(it != prev_frame_transforms_.end())\r
+ prev = it->second;\r
+ \r
+ next_frame_transforms_[item.tag] = next; // Store all active tags, inactive tags will be removed at the end.\r
+ \r
+ if(next.volume < 0.001 && prev.volume < 0.001)\r
+ continue;\r
+ \r
+ static const int BASE = 1<<15;\r
+\r
+ const auto next_volume = static_cast<int>(next.volume*BASE);\r
+ const auto prev_volume = static_cast<int>(prev.volume*BASE);\r
+ \r
+ const int n_samples = result.size();\r
+ \r
+ const auto in_size = static_cast<size_t>(item.audio_data.size());\r
+ CASPAR_VERIFY(in_size == 0 || in_size == result.size());\r
+\r
+ if(in_size > result.size())\r
+ continue;\r
+\r
+ tbb::parallel_for\r
+ (\r
+ tbb::blocked_range<size_t>(0, item.audio_data.size()),\r
+ [&](const tbb::blocked_range<size_t>& r)\r
+ {\r
+ for(size_t n = r.begin(); n < r.end(); ++n)\r
+ {\r
+ const int sample_volume = (prev_volume - (prev_volume * n)/n_samples) + (next_volume * n)/n_samples;\r
+ const int sample = (static_cast<int>(item.audio_data[n])*sample_volume)/BASE;\r
+ result[n] = static_cast<int16_t>((static_cast<int>(result[n]) + sample) & 0xFFFF);\r
+ }\r
+ }\r
+ );\r
+ }\r
+\r
+ items.clear();\r
+ prev_frame_transforms_ = std::move(next_frame_transforms_); \r
+\r
return std::move(result);\r
}\r
};\r
\r
#include <core/video_format.h>\r
#include <core/producer/frame/pixel_format.h>\r
-#include <core/producer/frame/image_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
\r
#include <GL/glew.h>\r
\r
if(item.textures.empty())\r
return;\r
\r
- if(item.transform.get_opacity() < epsilon)\r
+ if(item.transform.opacity < epsilon)\r
return;\r
\r
if(!std::all_of(item.textures.begin(), item.textures.end(), std::mem_fn(&device_buffer::ready)))\r
shader_->set("has_local_key", local_key);\r
shader_->set("has_layer_key", layer_key);\r
shader_->set("pixel_format", item.pix_desc.pix_fmt); \r
- shader_->set("opacity", item.transform.get_is_key() ? 1.0 : item.transform.get_opacity()); \r
+ shader_->set("opacity", item.transform.is_key ? 1.0 : item.transform.opacity); \r
\r
// Setup blend_func\r
\r
- if(item.transform.get_is_key())\r
+ if(item.transform.is_key)\r
item.blend_mode = blend_mode::normal;\r
\r
if(blend_modes_)\r
}\r
\r
// Setup image-adjustements\r
-\r
- auto levels = item.transform.get_levels();\r
-\r
- if(levels.min_input > epsilon ||\r
- levels.max_input < 1.0-epsilon ||\r
- levels.min_output > epsilon ||\r
- levels.max_output < 1.0-epsilon ||\r
- std::abs(levels.gamma - 1.0) > epsilon)\r
+ \r
+ if(item.transform.levels.min_input > epsilon ||\r
+ item.transform.levels.max_input < 1.0-epsilon ||\r
+ item.transform.levels.min_output > epsilon ||\r
+ item.transform.levels.max_output < 1.0-epsilon ||\r
+ std::abs(item.transform.levels.gamma - 1.0) > epsilon)\r
{\r
shader_->set("levels", true); \r
- shader_->set("min_input", levels.min_input); \r
- shader_->set("max_input", levels.max_input);\r
- shader_->set("min_output", levels.min_output);\r
- shader_->set("max_output", levels.max_output);\r
- shader_->set("gamma", levels.gamma);\r
+ shader_->set("min_input", item.transform.levels.min_input); \r
+ shader_->set("max_input", item.transform.levels.max_input);\r
+ shader_->set("min_output", item.transform.levels.min_output);\r
+ shader_->set("max_output", item.transform.levels.max_output);\r
+ shader_->set("gamma", item.transform.levels.gamma);\r
}\r
else\r
shader_->set("levels", false); \r
\r
- if(std::abs(item.transform.get_brightness() - 1.0) > epsilon ||\r
- std::abs(item.transform.get_saturation() - 1.0) > epsilon ||\r
- std::abs(item.transform.get_contrast() - 1.0) > epsilon)\r
+ if(std::abs(item.transform.brightness - 1.0) > epsilon ||\r
+ std::abs(item.transform.saturation - 1.0) > epsilon ||\r
+ std::abs(item.transform.contrast - 1.0) > epsilon)\r
{\r
shader_->set("csb", true); \r
\r
- shader_->set("brt", item.transform.get_brightness()); \r
- shader_->set("sat", item.transform.get_saturation());\r
- shader_->set("con", item.transform.get_contrast());\r
+ shader_->set("brt", item.transform.brightness); \r
+ shader_->set("sat", item.transform.saturation);\r
+ shader_->set("con", item.transform.contrast);\r
}\r
else\r
shader_->set("csb", false); \r
\r
// Setup interlacing\r
\r
- if(item.transform.get_field_mode() == core::field_mode::progressive) \r
+ if(item.transform.field_mode == core::field_mode::progressive) \r
ogl.disable(GL_POLYGON_STIPPLE); \r
else \r
{\r
ogl.enable(GL_POLYGON_STIPPLE);\r
\r
- if(item.transform.get_field_mode() == core::field_mode::upper)\r
+ if(item.transform.field_mode == core::field_mode::upper)\r
ogl.stipple_pattern(upper_pattern);\r
- else if(item.transform.get_field_mode() == core::field_mode::lower)\r
+ else if(item.transform.field_mode == core::field_mode::lower)\r
ogl.stipple_pattern(lower_pattern);\r
}\r
\r
\r
ogl.viewport(0, 0, background.width(), background.height());\r
\r
- auto m_p = item.transform.get_clip_translation();\r
- auto m_s = item.transform.get_clip_scale();\r
+ auto m_p = item.transform.clip_translation;\r
+ auto m_s = item.transform.clip_scale;\r
\r
bool scissor = m_p[0] > std::numeric_limits<double>::epsilon() || m_p[1] > std::numeric_limits<double>::epsilon() &&\r
m_s[0] < 1.0 - std::numeric_limits<double>::epsilon() || m_s[1] < 1.0 - std::numeric_limits<double>::epsilon();\r
ogl.scissor(static_cast<size_t>(m_p[0]*w), static_cast<size_t>(m_p[1]*h), static_cast<size_t>(m_s[0]*w), static_cast<size_t>(m_s[1]*h));\r
}\r
\r
- auto f_p = item.transform.get_fill_translation();\r
- auto f_s = item.transform.get_fill_scale();\r
+ auto f_p = item.transform.fill_translation;\r
+ auto f_s = item.transform.fill_scale;\r
\r
// Set render target\r
\r
#include <common/memory/safe_ptr.h>\r
\r
#include <core/producer/frame/pixel_format.h>\r
-#include <core/producer/frame/image_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
\r
#include <boost/noncopyable.hpp>\r
\r
{\r
pixel_format_desc pix_desc;\r
std::vector<safe_ptr<device_buffer>> textures;\r
- image_transform transform;\r
+ frame_transform transform;\r
blend_mode::type blend_mode;\r
};\r
\r
#include <common/exception/exceptions.h>\r
#include <common/gl/gl_check.h>\r
\r
-#include <core/producer/frame/image_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
#include <core/producer/frame/pixel_format.h>\r
#include <core/video_format.h>\r
\r
\r
BOOST_FOREACH(auto& item, layer)\r
{\r
- if(layer_draw_buffer.first & item.transform.get_field_mode())\r
+ if(layer_draw_buffer.first & item.transform.field_mode)\r
item.blend_mode = blend_mode::normal; // Disable blending and just merge, it will be used when merging back into render stack.\r
else\r
{\r
item.blend_mode = blend_mode::replace; // Target field is empty, no blending, just copy\r
- layer_draw_buffer.first |= item.transform.get_field_mode();\r
+ layer_draw_buffer.first |= item.transform.field_mode;\r
}\r
\r
draw_item(std::move(item), *layer_draw_buffer.second, local_key_buffer, layer_key_buffer); \r
item.pix_desc.pix_fmt = pixel_format::bgra;\r
item.pix_desc.planes = list_of(pixel_format_desc::plane(channel_.get_format_desc().width, channel_.get_format_desc().height, 4));\r
item.textures = list_of(layer_draw_buffer.second);\r
- item.transform = image_transform();\r
+ item.transform = frame_transform();\r
item.blend_mode = layer_blend_mode;\r
\r
kernel_.draw(channel_.ogl(), std::move(item), *draw_buffer, nullptr, nullptr);\r
std::pair<int, std::shared_ptr<device_buffer>>& local_key_buffer, \r
std::shared_ptr<device_buffer>& layer_key_buffer)\r
{ \r
- if(item.transform.get_is_key())\r
+ if(item.transform.is_key)\r
{\r
if(!local_key_buffer.second)\r
{\r
local_key_buffer.second = create_device_buffer(1);\r
}\r
\r
- local_key_buffer.first |= item.transform.get_field_mode(); // Add field to flag.\r
+ local_key_buffer.first |= item.transform.field_mode; // Add field to flag.\r
kernel_.draw(channel_.ogl(), std::move(item), *local_key_buffer.second, nullptr, nullptr);\r
}\r
else\r
{\r
kernel_.draw(channel_.ogl(), std::move(item), draw_buffer, local_key_buffer.second, layer_key_buffer);\r
- local_key_buffer.first ^= item.transform.get_field_mode(); // Remove field from flag.\r
+ local_key_buffer.first ^= item.transform.field_mode; // Remove field from flag.\r
\r
if(local_key_buffer.first == 0) // If all fields from key has been used, reset it\r
{\r
{ \r
auto upper_count = boost::range::count_if(layer, [&](const render_item& item)\r
{\r
- return item.transform.get_field_mode() | field_mode::upper;\r
+ return item.transform.field_mode | field_mode::upper;\r
});\r
\r
auto lower_count = boost::range::count_if(layer, [&](const render_item& item)\r
{\r
- return item.transform.get_field_mode() | field_mode::lower;\r
+ return item.transform.field_mode | field_mode::lower;\r
});\r
\r
return upper_count > 1 || lower_count > 1;\r
{ \r
ogl_device& ogl_;\r
image_renderer renderer_;\r
- std::vector<image_transform> transform_stack_;\r
+ std::vector<frame_transform> transform_stack_;\r
blend_mode::type active_blend_mode_;\r
std::deque<std::deque<render_item>> layers_; // layer/stream/items\r
public:\r
\r
void begin(core::basic_frame& frame)\r
{\r
- transform_stack_.push_back(transform_stack_.back()*frame.get_image_transform());\r
+ transform_stack_.push_back(transform_stack_.back()*frame.get_frame_transform());\r
}\r
\r
void visit(core::write_frame& frame)\r
{ \r
- if(transform_stack_.back().get_field_mode() == field_mode::empty)\r
+ if(transform_stack_.back().field_mode == field_mode::empty)\r
return;\r
\r
core::render_item item;\r
#include <core/producer/frame/basic_frame.h>\r
#include <core/producer/frame/frame_factory.h>\r
#include <core/producer/frame/pixel_format.h>\r
-#include <core/producer/frame/audio_transform.h>\r
-#include <core/producer/frame/image_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
\r
#include <core/video_format.h>\r
\r
audio_mixer audio_mixer_;\r
image_mixer image_mixer_;\r
\r
- typedef std::unordered_map<int, tweened_transform<core::image_transform>> image_transforms;\r
- typedef std::unordered_map<int, tweened_transform<core::audio_transform>> audio_transforms;\r
-\r
- boost::fusion::map<boost::fusion::pair<core::image_transform, image_transforms>,\r
- boost::fusion::pair<core::audio_transform, audio_transforms>> transforms_;\r
- \r
+ std::unordered_map<int, tweened_transform<core::frame_transform>> transforms_; \r
std::unordered_map<int, blend_mode::type> blend_modes_;\r
\r
std::queue<std::pair<boost::unique_future<safe_ptr<host_buffer>>, std::vector<int16_t>>> buffer_;\r
{ \r
try\r
{\r
- decltype(mix_image(frames)) image;\r
- decltype(mix_audio(frames)) audio;\r
+ BOOST_FOREACH(auto& frame, frames)\r
+ {\r
+ auto blend_it = blend_modes_.find(frame.first);\r
+ image_mixer_.begin_layer(blend_it != blend_modes_.end() ? blend_it->second : blend_mode::normal);\r
+\r
+ if(channel_.get_format_desc().field_mode != core::field_mode::progressive)\r
+ {\r
+ auto frame1 = make_safe<core::basic_frame>(frame.second);\r
+ frame1->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
+ \r
+ auto frame2 = make_safe<core::basic_frame>(frame.second);\r
+ frame2->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
+ \r
+ if(frame1->get_frame_transform() != frame2->get_frame_transform())\r
+ frame2 = core::basic_frame::interlace(frame1, frame2, channel_.get_format_desc().field_mode);\r
+\r
+ frame2->accept(audio_mixer_); \r
+ frame2->accept(image_mixer_);\r
+ }\r
+ else\r
+ {\r
+ auto frame2 = make_safe<core::basic_frame>(frame.second);\r
+ frame2->get_frame_transform() = transforms_[frame.first].fetch_and_tick(1);\r
+ \r
+ // Audio\r
+ frame2->accept(audio_mixer_);\r
+\r
+ // Video\r
+ auto blend_it = blend_modes_.find(frame.first);\r
+ image_mixer_.begin_layer(blend_it != blend_modes_.end() ? blend_it->second : blend_mode::normal);\r
+ \r
+ frame2->accept(image_mixer_);\r
+ }\r
+\r
+ image_mixer_.end_layer();\r
+ }\r
\r
- tbb::parallel_invoke\r
- (\r
- [&]{image = mix_image(frames);}, \r
- [&]{audio = mix_audio(frames);}\r
- );\r
+ auto image = image_mixer_.render();\r
+ auto audio = audio_mixer_.mix();\r
\r
buffer_.push(std::make_pair(std::move(image), audio));\r
\r
{ \r
return image_mixer_.create_frame(tag, desc);\r
}\r
-\r
- void reset_transforms()\r
- {\r
- channel_.execution().invoke([&]\r
- {\r
- boost::fusion::at_key<image_transform>(transforms_).clear();\r
- boost::fusion::at_key<audio_transform>(transforms_).clear();\r
- });\r
- }\r
\r
- template<typename T>\r
- void set_transform(int index, const T& transform, unsigned int mix_duration, const std::wstring& tween)\r
+ void set_transform(int index, const frame_transform& transform, unsigned int mix_duration, const std::wstring& tween)\r
{\r
channel_.execution().invoke([&]\r
{\r
- auto& transforms = boost::fusion::at_key<T>(transforms_);\r
-\r
- auto src = transforms[index].fetch();\r
+ auto src = transforms_[index].fetch();\r
auto dst = transform;\r
- transforms[index] = tweened_transform<T>(src, dst, mix_duration, tween);\r
+ transforms_[index] = tweened_transform<frame_transform>(src, dst, mix_duration, tween);\r
});\r
}\r
\r
- template<typename T>\r
- void apply_transform(int index, const std::function<T(T)>& transform, unsigned int mix_duration, const std::wstring& tween)\r
+ void apply_transform(int index, const std::function<frame_transform(frame_transform)>& transform, unsigned int mix_duration, const std::wstring& tween)\r
{\r
channel_.execution().invoke([&]\r
{\r
- auto& transforms = boost::fusion::at_key<T>(transforms_);\r
-\r
- auto src = transforms[index].fetch();\r
+ auto src = transforms_[index].fetch();\r
auto dst = transform(src);\r
- transforms[index] = tweened_transform<T>(src, dst, mix_duration, tween);\r
+ transforms_[index] = tweened_transform<frame_transform>(src, dst, mix_duration, tween);\r
});\r
}\r
+\r
+ void clear_transforms()\r
+ {\r
+ channel_.execution().invoke([&]{transforms_.clear();});\r
+ }\r
\r
void set_blend_mode(int index, blend_mode::type value)\r
{\r
{\r
return L"mixer";\r
}\r
-\r
-private:\r
- \r
- boost::unique_future<safe_ptr<host_buffer>> mix_image(std::map<int, safe_ptr<core::basic_frame>> frames)\r
- { \r
- auto& image_transforms = boost::fusion::at_key<core::image_transform>(transforms_);\r
- \r
- BOOST_FOREACH(auto& frame, frames)\r
- {\r
- auto blend_it = blend_modes_.find(frame.first);\r
- image_mixer_.begin_layer(blend_it != blend_modes_.end() ? blend_it->second : blend_mode::normal);\r
-\r
- auto frame1 = make_safe<core::basic_frame>(frame.second);\r
- frame1->get_image_transform() = image_transforms[frame.first].fetch_and_tick(1);\r
- \r
- if(channel_.get_format_desc().mode != core::field_mode::progressive)\r
- {\r
- auto frame2 = make_safe<core::basic_frame>(frame.second);\r
- frame2->get_image_transform() = image_transforms[frame.first].fetch_and_tick(1);\r
- if(frame1->get_image_transform() != frame2->get_image_transform())\r
- frame1 = core::basic_frame::interlace(frame1, frame2, channel_.get_format_desc().mode);\r
- }\r
-\r
- frame1->accept(image_mixer_);\r
-\r
- image_mixer_.end_layer();\r
- }\r
-\r
- return image_mixer_.render();\r
- }\r
-\r
- std::vector<int16_t> mix_audio(const std::map<int, safe_ptr<core::basic_frame>>& frames)\r
- {\r
- auto& audio_transforms = boost::fusion::at_key<core::audio_transform>(transforms_);\r
-\r
- BOOST_FOREACH(auto& frame, frames)\r
- {\r
- const unsigned int num = channel_.get_format_desc().mode == core::field_mode::progressive ? 1 : 2;\r
-\r
- auto frame1 = make_safe<core::basic_frame>(frame.second);\r
- frame1->get_audio_transform() = audio_transforms[frame.first].fetch_and_tick(num);\r
- frame1->accept(audio_mixer_);\r
- }\r
-\r
- return audio_mixer_.mix();\r
- }\r
};\r
\r
mixer::mixer(video_channel_context& video_channel) : impl_(new implementation(video_channel)){}\r
desc.planes.push_back( core::pixel_format_desc::plane(width, height, 4));\r
return create_frame(tag, desc);\r
}\r
-void mixer::reset_transforms(){impl_->reset_transforms();}\r
-void mixer::set_image_transform(int index, const core::image_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform<core::image_transform>(index, transform, mix_duration, tween);}\r
-void mixer::set_audio_transform(int index, const core::audio_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform<core::audio_transform>(index, transform, mix_duration, tween);}\r
-void mixer::apply_image_transform(int index, const std::function<core::image_transform(core::image_transform)>& transform, unsigned int mix_duration, const std::wstring& tween){impl_->apply_transform<core::image_transform>(index, transform, mix_duration, tween);}\r
-void mixer::apply_audio_transform(int index, const std::function<core::audio_transform(core::audio_transform)>& transform, unsigned int mix_duration, const std::wstring& tween){impl_->apply_transform<core::audio_transform>(index, transform, mix_duration, tween);}\r
+void mixer::set_frame_transform(int index, const core::frame_transform& transform, unsigned int mix_duration, const std::wstring& tween){impl_->set_transform(index, transform, mix_duration, tween);}\r
+void mixer::apply_frame_transform(int index, const std::function<core::frame_transform(core::frame_transform)>& transform, unsigned int mix_duration, const std::wstring& tween){impl_->apply_transform(index, transform, mix_duration, tween);}\r
+void mixer::clear_transforms(){impl_->clear_transforms();}\r
void mixer::set_blend_mode(int index, blend_mode::type value){impl_->set_blend_mode(index, value);}\r
}}
\ No newline at end of file
class read_frame;\r
class write_frame;\r
class basic_frame;\r
-class audio_transform;\r
-class image_transform;\r
+struct frame_transform;\r
+struct frame_transform;\r
class video_channel_context;;\r
struct pixel_format;\r
\r
\r
core::video_format_desc get_video_format_desc() const; // nothrow\r
\r
- void reset_transforms();\r
\r
- void set_image_transform(int index, const core::image_transform& transform, unsigned int mix_duration = 0, const std::wstring& tween = L"linear");\r
- void set_audio_transform(int index, const core::audio_transform& transform, unsigned int mix_duration = 0, const std::wstring& tween = L"linear");\r
- void apply_image_transform(int index, const std::function<core::image_transform(core::image_transform)>& transform, unsigned int mix_duration = 0, const std::wstring& tween = L"linear");\r
- void apply_audio_transform(int index, const std::function<core::audio_transform(core::audio_transform)>& transform, unsigned int mix_duration = 0, const std::wstring& tween = L"linear");\r
+ void set_frame_transform(int index, const core::frame_transform& transform, unsigned int mix_duration = 0, const std::wstring& tween = L"linear");\r
+ void apply_frame_transform(int index, const std::function<core::frame_transform(core::frame_transform)>& transform, unsigned int mix_duration = 0, const std::wstring& tween = L"linear");\r
+ void clear_transforms();\r
\r
void set_blend_mode(int index, blend_mode::type value);\r
\r
+++ /dev/null
-/*\r
-* copyright (c) 2010 Sveriges Television AB <info@casparcg.com>\r
-*\r
-* This file is part of CasparCG.\r
-*\r
-* CasparCG is free software: you can redistribute it and/or modify\r
-* it under the terms of the GNU General Public License as published by\r
-* the Free Software Foundation, either version 3 of the License, or\r
-* (at your option) any later version.\r
-*\r
-* CasparCG is distributed in the hope that it will be useful,\r
-* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-* GNU General Public License for more details.\r
-\r
-* You should have received a copy of the GNU General Public License\r
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
-*\r
-*/\r
-#include "../../stdafx.h"\r
-\r
-#include "audio_transform.h"\r
-\r
-namespace caspar { namespace core {\r
- \r
-audio_transform::audio_transform()\r
- : volume_(1.0)\r
- , has_audio_(true){}\r
-\r
-void audio_transform::set_volume(double value)\r
-{\r
- volume_ = std::max(0.0, value);\r
-}\r
-\r
-double audio_transform::get_volume() const\r
-{\r
- return volume_;\r
-}\r
-\r
-void audio_transform::set_has_audio(bool value)\r
-{\r
- has_audio_ = value;\r
-}\r
-\r
-bool audio_transform::get_has_audio() const\r
-{\r
- return has_audio_;\r
-}\r
-\r
-audio_transform& audio_transform::operator*=(const audio_transform &other) \r
-{\r
- volume_ *= other.volume_;\r
- has_audio_ &= other.has_audio_;\r
- return *this;\r
-}\r
-\r
-const audio_transform audio_transform::operator*(const audio_transform &other) const\r
-{\r
- return audio_transform(*this) *= other;\r
-}\r
-\r
-audio_transform tween(double time, const audio_transform& source, const audio_transform& dest, double duration, const tweener_t& tweener)\r
-{\r
- auto do_tween = [](double time, double source, double dest, double duration, const tweener_t& tweener)\r
- {\r
- return tweener(time, source, dest-source, duration);\r
- };\r
-\r
- audio_transform result;\r
- result.set_volume(do_tween(time, source.get_volume(), dest.get_volume(), duration, tweener));\r
- result.set_has_audio(source.get_has_audio() || dest.get_has_audio());\r
- return result;\r
-}\r
-\r
-}}
\ No newline at end of file
+++ /dev/null
-/*\r
-* copyright (c) 2010 Sveriges Television AB <info@casparcg.com>\r
-*\r
-* This file is part of CasparCG.\r
-*\r
-* CasparCG is free software: you can redistribute it and/or modify\r
-* it under the terms of the GNU General Public License as published by\r
-* the Free Software Foundation, either version 3 of the License, or\r
-* (at your option) any later version.\r
-*\r
-* CasparCG is distributed in the hope that it will be useful,\r
-* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-* GNU General Public License for more details.\r
-\r
-* You should have received a copy of the GNU General Public License\r
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
-*\r
-*/\r
-#pragma once\r
-\r
-#include <common/utility/tweener.h>\r
-\r
-namespace caspar { namespace core {\r
-\r
-class audio_transform\r
-{\r
-public:\r
- audio_transform();\r
-\r
- void set_volume(double value);\r
- double get_volume() const;\r
- \r
- void set_has_audio(bool value);\r
- bool get_has_audio() const;\r
-\r
- audio_transform& operator*=(const audio_transform &other);\r
- const audio_transform operator*(const audio_transform &other) const;\r
-private:\r
- double volume_;\r
- bool has_audio_;\r
-};\r
-\r
-audio_transform tween(double time, const audio_transform& source, const audio_transform& dest, double duration, const tweener_t& tweener);\r
-\r
-inline bool operator==(const audio_transform& lhs, const audio_transform& rhs)\r
-{\r
- return memcmp(&lhs, &rhs, sizeof(audio_transform)) == 0;\r
-}\r
-\r
-inline bool operator!=(const audio_transform& lhs, const audio_transform& rhs)\r
-{\r
- return !(lhs == rhs);\r
-}\r
-\r
-}}
\ No newline at end of file
\r
#include "basic_frame.h"\r
\r
-#include "image_transform.h"\r
-#include "audio_transform.h"\r
+#include "frame_transform.h"\r
#include "../../video_format.h"\r
\r
#include <boost/foreach.hpp>\r
{ \r
std::vector<safe_ptr<basic_frame>> frames_;\r
\r
- image_transform image_transform_; \r
- audio_transform audio_transform_;\r
+ frame_transform frame_transform_; \r
\r
public:\r
implementation(const std::vector<safe_ptr<basic_frame>>& frames) : frames_(frames) \r
}\r
void basic_frame::swap(basic_frame& other){impl_.swap(other.impl_);}\r
\r
-const image_transform& basic_frame::get_image_transform() const { return impl_->image_transform_;}\r
-image_transform& basic_frame::get_image_transform() { return impl_->image_transform_;}\r
-const audio_transform& basic_frame::get_audio_transform() const { return impl_->audio_transform_;}\r
-audio_transform& basic_frame::get_audio_transform() { return impl_->audio_transform_;}\r
+const frame_transform& basic_frame::get_frame_transform() const { return impl_->frame_transform_;}\r
+frame_transform& basic_frame::get_frame_transform() { return impl_->frame_transform_;}\r
\r
std::wstring basic_frame::print() const{return impl_->print();}\r
void basic_frame::accept(frame_visitor& visitor){impl_->accept(*this, visitor);}\r
auto my_frame2 = make_safe<basic_frame>(frame2);\r
if(mode == field_mode::upper)\r
{\r
- my_frame1->get_image_transform().set_field_mode(field_mode::upper); \r
- my_frame2->get_image_transform().set_field_mode(field_mode::lower); \r
- } \r
- else \r
- { \r
- my_frame1->get_image_transform().set_field_mode(field_mode::lower); \r
- my_frame2->get_image_transform().set_field_mode(field_mode::upper); \r
+ my_frame1->get_frame_transform().field_mode = field_mode::upper; \r
+ my_frame2->get_frame_transform().field_mode = field_mode::lower; \r
+ } \r
+ else \r
+ { \r
+ my_frame1->get_frame_transform().field_mode = field_mode::lower; \r
+ my_frame2->get_frame_transform().field_mode = field_mode::upper; \r
}\r
\r
std::vector<safe_ptr<basic_frame>> frames;\r
return basic_frame::empty();\r
\r
std::vector<safe_ptr<basic_frame>> frames;\r
- key->get_image_transform().set_is_key(true);\r
+ key->get_frame_transform().is_key = true;\r
frames.push_back(key);\r
frames.push_back(fill);\r
return basic_frame(std::move(frames));\r
safe_ptr<basic_frame> disable_audio(const safe_ptr<basic_frame>& frame)\r
{\r
basic_frame frame2 = frame;\r
- frame2.get_audio_transform().set_has_audio(false);\r
+ frame2.get_frame_transform().volume = 0.0;\r
return std::move(frame2);\r
}\r
\r
\r
namespace caspar { namespace core {\r
\r
-class image_transform;\r
-class audio_transform;\r
- \r
+struct frame_transform;\r
+\r
class basic_frame\r
{\r
basic_frame(std::vector<safe_ptr<basic_frame>>&& frames);\r
\r
void swap(basic_frame& other);\r
\r
- const image_transform& get_image_transform() const;\r
- image_transform& get_image_transform();\r
-\r
- const audio_transform& get_audio_transform() const;\r
- audio_transform& get_audio_transform();\r
- \r
+ const frame_transform& get_frame_transform() const;\r
+ frame_transform& get_frame_transform();\r
+ \r
static safe_ptr<basic_frame> interlace(const safe_ptr<basic_frame>& frame1, const safe_ptr<basic_frame>& frame2, field_mode::type mode);\r
static safe_ptr<basic_frame> combine(const safe_ptr<basic_frame>& frame1, const safe_ptr<basic_frame>& frame2);\r
static safe_ptr<basic_frame> fill_and_key(const safe_ptr<basic_frame>& fill, const safe_ptr<basic_frame>& key);\r
--- /dev/null
+/*\r
+* copyright (c) 2010 Sveriges Television AB <info@casparcg.com>\r
+*\r
+* This file is part of CasparCG.\r
+*\r
+* CasparCG is free software: you can redistribute it and/or modify\r
+* it under the terms of the GNU General Public License as published by\r
+* the Free Software Foundation, either version 3 of the License, or\r
+* (at your option) any later version.\r
+*\r
+* CasparCG is distributed in the hope that it will be useful,\r
+* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+* GNU General Public License for more details.\r
+\r
+* You should have received a copy of the GNU General Public License\r
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
+*\r
+*/\r
+// TODO: Move layer specific stuff out of frame related classes.\r
+#include "../../stdafx.h"\r
+\r
+#include "frame_transform.h"\r
+\r
+#include <common/utility/assert.h>\r
+\r
+namespace caspar { namespace core {\r
+ \r
+frame_transform::frame_transform() \r
+ : volume(1.0)\r
+ , opacity(1.0)\r
+ , brightness(1.0)\r
+ , contrast(1.0)\r
+ , saturation(1.0)\r
+ , is_key(false)\r
+ , field_mode(field_mode::progressive)\r
+{\r
+ std::fill(fill_translation.begin(), fill_translation.end(), 0.0);\r
+ std::fill(fill_scale.begin(), fill_scale.end(), 1.0);\r
+ std::fill(clip_translation.begin(), clip_translation.end(), 0.0);\r
+ std::fill(clip_scale.begin(), clip_scale.end(), 1.0);\r
+}\r
+\r
+frame_transform& frame_transform::operator*=(const frame_transform &other)\r
+{\r
+ volume *= other.volume;\r
+ opacity *= other.opacity; \r
+ brightness *= other.brightness;\r
+ contrast *= other.contrast;\r
+ saturation *= other.saturation;\r
+ levels.min_input = std::max(levels.min_input, other.levels.min_input);\r
+ levels.max_input = std::min(levels.max_input, other.levels.max_input); \r
+ levels.min_output = std::max(levels.min_output, other.levels.min_output);\r
+ levels.max_output = std::min(levels.max_output, other.levels.max_output);\r
+ levels.gamma *= other.levels.gamma;\r
+ field_mode = static_cast<field_mode::type>(field_mode & other.field_mode);\r
+ is_key |= other.is_key;\r
+ fill_translation[0] += other.fill_translation[0]*fill_scale[0];\r
+ fill_translation[1] += other.fill_translation[1]*fill_scale[1];\r
+ fill_scale[0] *= other.fill_scale[0];\r
+ fill_scale[1] *= other.fill_scale[1];\r
+ clip_translation[0] += other.clip_translation[0]*clip_scale[0];\r
+ clip_translation[1] += other.clip_translation[1]*clip_scale[1];\r
+ clip_scale[0] *= other.clip_scale[0];\r
+ clip_scale[1] *= other.clip_scale[1];\r
+ return *this;\r
+}\r
+\r
+frame_transform frame_transform::operator*(const frame_transform &other) const\r
+{\r
+ return frame_transform(*this) *= other;\r
+}\r
+\r
+frame_transform tween(double time, const frame_transform& source, const frame_transform& dest, double duration, const tweener_t& tweener)\r
+{ \r
+ auto do_tween = [](double time, double source, double dest, double duration, const tweener_t& tweener)\r
+ {\r
+ return tweener(time, source, dest-source, duration);\r
+ };\r
+ \r
+ frame_transform result; \r
+ result.is_key = source.is_key | dest.is_key;\r
+ result.field_mode = static_cast<field_mode::type>(source.field_mode & dest.field_mode);\r
+ result.volume = do_tween(time, source.volume, dest.volume, duration, tweener);\r
+ result.brightness = do_tween(time, source.brightness, dest.brightness, duration, tweener);\r
+ result.contrast = do_tween(time, source.contrast, dest.contrast, duration, tweener);\r
+ result.saturation = do_tween(time, source.saturation, dest.saturation, duration, tweener);\r
+ result.opacity = do_tween(time, source.opacity, dest.opacity, duration, tweener);\r
+ \r
+ result.fill_translation[0] = do_tween(time, source.fill_translation[0], dest.fill_translation[0], duration, tweener), \r
+ result.fill_translation[1] = do_tween(time, source.fill_translation[1], dest.fill_translation[1], duration, tweener);\r
+ \r
+ result.fill_scale[0] = do_tween(time, source.fill_scale[0], dest.fill_scale[0], duration, tweener), \r
+ result.fill_scale[1] = do_tween(time, source.fill_scale[1], dest.fill_scale[1], duration, tweener);\r
+ \r
+ result.clip_translation[0] = do_tween(time, source.clip_translation[0], dest.clip_translation[0], duration, tweener), \r
+ result.clip_translation[1] = do_tween(time, source.clip_translation[1], dest.clip_translation[1], duration, tweener);\r
+ \r
+ result.clip_scale[0] = do_tween(time, source.clip_scale[0], dest.clip_scale[0], duration, tweener), \r
+ result.clip_scale[1] = do_tween(time, source.clip_scale[1], dest.clip_scale[1], duration, tweener);\r
+\r
+ auto s_levels = source.levels;\r
+ auto d_levels = dest.levels;\r
+\r
+ result.levels.max_input = do_tween(time, s_levels.max_input, d_levels.max_input, duration, tweener);\r
+ result.levels.min_input = do_tween(time, s_levels.min_input, d_levels.min_input, duration, tweener); \r
+ result.levels.max_output = do_tween(time, s_levels.max_output, d_levels.max_output, duration, tweener);\r
+ result.levels.min_output = do_tween(time, s_levels.min_output, d_levels.min_output, duration, tweener);\r
+ result.levels.gamma = do_tween(time, s_levels.gamma, d_levels.gamma, duration, tweener);\r
+ \r
+ return result;\r
+}\r
+\r
+bool operator<(const frame_transform& lhs, const frame_transform& rhs)\r
+{\r
+ return memcmp(&lhs, &rhs, sizeof(frame_transform)) < 0;\r
+}\r
+\r
+bool operator==(const frame_transform& lhs, const frame_transform& rhs)\r
+{\r
+ return memcmp(&lhs, &rhs, sizeof(frame_transform)) == 0;\r
+}\r
+\r
+bool operator!=(const frame_transform& lhs, const frame_transform& rhs)\r
+{\r
+ return !(lhs == rhs);\r
+}\r
+\r
+}}
\ No newline at end of file
--- /dev/null
+/*\r
+* copyright (c) 2010 Sveriges Television AB <info@casparcg.com>\r
+*\r
+* This file is part of CasparCG.\r
+*\r
+* CasparCG is free software: you can redistribute it and/or modify\r
+* it under the terms of the GNU General Public License as published by\r
+* the Free Software Foundation, either version 3 of the License, or\r
+* (at your option) any later version.\r
+*\r
+* CasparCG is distributed in the hope that it will be useful,\r
+* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+* GNU General Public License for more details.\r
+\r
+* You should have received a copy of the GNU General Public License\r
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
+*\r
+*/\r
+#pragma once\r
+\r
+#include <common/utility/tweener.h>\r
+#include <core/video_format.h>\r
+\r
+#include <boost/array.hpp>\r
+#include <type_traits>\r
+\r
+namespace caspar { namespace core {\r
+\r
+struct pixel_format_desc;\r
+ \r
+struct levels\r
+{\r
+ levels() \r
+ : min_input(0.0)\r
+ , max_input(1.0)\r
+ , gamma(1.0)\r
+ , min_output(0.0)\r
+ , max_output(1.0)\r
+ { \r
+ }\r
+ double min_input;\r
+ double max_input;\r
+ double gamma;\r
+ double min_output;\r
+ double max_output;\r
+};\r
+\r
+struct frame_transform \r
+{\r
+public:\r
+\r
+ frame_transform();\r
+\r
+ double volume;\r
+ double opacity;\r
+ double gain;\r
+ double contrast;\r
+ double brightness;\r
+ double saturation;\r
+ levels levels;\r
+ boost::array<double, 2> fill_translation; \r
+ boost::array<double, 2> fill_scale; \r
+ boost::array<double, 2> clip_translation; \r
+ boost::array<double, 2> clip_scale; \r
+\r
+ field_mode::type field_mode;\r
+ bool is_key;\r
+ \r
+ frame_transform& frame_transform::operator*=(const frame_transform &other);\r
+ frame_transform frame_transform::operator*(const frame_transform &other) const;\r
+};\r
+\r
+frame_transform tween(double time, const frame_transform& source, const frame_transform& dest, double duration, const tweener_t& tweener);\r
+\r
+bool operator<(const frame_transform& lhs, const frame_transform& rhs);\r
+bool operator==(const frame_transform& lhs, const frame_transform& rhs);\r
+bool operator!=(const frame_transform& lhs, const frame_transform& rhs);\r
+\r
+}}
\ No newline at end of file
+++ /dev/null
-/*\r
-* copyright (c) 2010 Sveriges Television AB <info@casparcg.com>\r
-*\r
-* This file is part of CasparCG.\r
-*\r
-* CasparCG is free software: you can redistribute it and/or modify\r
-* it under the terms of the GNU General Public License as published by\r
-* the Free Software Foundation, either version 3 of the License, or\r
-* (at your option) any later version.\r
-*\r
-* CasparCG is distributed in the hope that it will be useful,\r
-* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-* GNU General Public License for more details.\r
-\r
-* You should have received a copy of the GNU General Public License\r
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
-*\r
-*/\r
-// TODO: Move layer specific stuff out of frame related classes.\r
-#include "../../stdafx.h"\r
-\r
-#include "image_transform.h"\r
-\r
-#include <common/utility/assert.h>\r
-\r
-namespace caspar { namespace core {\r
- \r
-image_transform::image_transform() \r
- : opacity_(1.0)\r
- , brightness_(1.0)\r
- , contrast_(1.0)\r
- , saturation_(1.0)\r
- , is_key_(false)\r
- , field_mode_(field_mode::progressive)\r
-{\r
- std::fill(fill_translation_.begin(), fill_translation_.end(), 0.0);\r
- std::fill(fill_scale_.begin(), fill_scale_.end(), 1.0);\r
- std::fill(clip_translation_.begin(), clip_translation_.end(), 0.0);\r
- std::fill(clip_scale_.begin(), clip_scale_.end(), 1.0);\r
-}\r
-\r
-void image_transform::set_opacity(double value)\r
-{\r
- opacity_ = std::max(value, 0.0);\r
-}\r
-\r
-double image_transform::get_opacity() const\r
-{\r
- return opacity_;\r
-}\r
-\r
-void image_transform::set_brightness(double value)\r
-{\r
- brightness_ = std::max(0.0, value);\r
-}\r
-\r
-double image_transform::get_brightness() const\r
-{\r
- return brightness_;\r
-}\r
-\r
-void image_transform::set_contrast(double value)\r
-{\r
- contrast_ = std::max(0.0, value);\r
-}\r
-\r
-double image_transform::get_contrast() const\r
-{\r
- return contrast_;\r
-}\r
-\r
-void image_transform::set_saturation(double value)\r
-{\r
- saturation_ = std::max(0.0, value);\r
-}\r
-\r
-double image_transform::get_saturation() const\r
-{\r
- return saturation_;\r
-}\r
-\r
-void image_transform::set_levels(const image_transform::levels& value)\r
-{\r
- levels_ = value;\r
-}\r
-\r
-image_transform::levels image_transform::get_levels() const\r
-{\r
- return levels_;\r
-}\r
-\r
-\r
-void image_transform::set_fill_translation(double x, double y)\r
-{\r
- fill_translation_[0] = x;\r
- fill_translation_[1] = y;\r
-}\r
-\r
-void image_transform::set_fill_scale(double x, double y)\r
-{\r
- fill_scale_[0] = x;\r
- fill_scale_[1] = y; \r
-}\r
-\r
-std::array<double, 2> image_transform::get_fill_translation() const\r
-{\r
- return fill_translation_;\r
-}\r
-\r
-std::array<double, 2> image_transform::get_fill_scale() const\r
-{\r
- return fill_scale_;\r
-}\r
-\r
-void image_transform::set_clip_translation(double x, double y)\r
-{\r
- clip_translation_[0] = x;\r
- clip_translation_[1] = y;\r
-}\r
-\r
-void image_transform::set_clip_scale(double x, double y)\r
-{\r
- clip_scale_[0] = x;\r
- clip_scale_[1] = y; \r
-}\r
-\r
-std::array<double, 2> image_transform::get_clip_translation() const\r
-{\r
- return clip_translation_;\r
-}\r
-\r
-std::array<double, 2> image_transform::get_clip_scale() const\r
-{\r
- return clip_scale_;\r
-}\r
-\r
-void image_transform::set_field_mode(field_mode::type field_mode)\r
-{\r
- field_mode_ = field_mode;\r
-}\r
-\r
-field_mode::type image_transform::get_field_mode() const\r
-{\r
- return field_mode_;\r
-}\r
-\r
-image_transform& image_transform::operator*=(const image_transform &other)\r
-{\r
- opacity_ *= other.opacity_; \r
- brightness_ *= other.brightness_;\r
- contrast_ *= other.contrast_;\r
- saturation_ *= other.saturation_;\r
-\r
- levels_.min_input = std::max(levels_.min_input, other.levels_.min_input);\r
- levels_.max_input = std::min(levels_.max_input, other.levels_.max_input);\r
- \r
- levels_.min_output = std::max(levels_.min_output, other.levels_.min_output);\r
- levels_.max_output = std::min(levels_.max_output, other.levels_.max_output);\r
-\r
- levels_.gamma *= other.levels_.gamma;\r
-\r
- field_mode_ = static_cast<field_mode::type>(field_mode_ & other.field_mode_);\r
- is_key_ |= other.is_key_;\r
- fill_translation_[0] += other.fill_translation_[0]*fill_scale_[0];\r
- fill_translation_[1] += other.fill_translation_[1]*fill_scale_[1];\r
- fill_scale_[0] *= other.fill_scale_[0];\r
- fill_scale_[1] *= other.fill_scale_[1];\r
- clip_translation_[0] += other.clip_translation_[0]*clip_scale_[0];\r
- clip_translation_[1] += other.clip_translation_[1]*clip_scale_[1];\r
- clip_scale_[0] *= other.clip_scale_[0];\r
- clip_scale_[1] *= other.clip_scale_[1];\r
- return *this;\r
-}\r
-\r
-const image_transform image_transform::operator*(const image_transform &other) const\r
-{\r
- return image_transform(*this) *= other;\r
-}\r
-\r
-void image_transform::set_is_key(bool value){is_key_ = value;}\r
-bool image_transform::get_is_key() const{return is_key_;}\r
-\r
-image_transform tween(double time, const image_transform& source, const image_transform& dest, double duration, const tweener_t& tweener)\r
-{ \r
- auto do_tween = [](double time, double source, double dest, double duration, const tweener_t& tweener)\r
- {\r
- return tweener(time, source, dest-source, duration);\r
- };\r
- \r
- image_transform result; \r
- result.set_is_key (source.get_is_key() | dest.get_is_key());\r
- result.set_field_mode (static_cast<field_mode::type>(source.get_field_mode() & dest.get_field_mode()));\r
- result.set_brightness (do_tween(time, source.get_brightness(), dest.get_brightness(), duration, tweener));\r
- result.set_contrast (do_tween(time, source.get_contrast(), dest.get_contrast(), duration, tweener));\r
- result.set_saturation (do_tween(time, source.get_saturation(), dest.get_saturation(), duration, tweener));\r
- result.set_opacity (do_tween(time, source.get_opacity(), dest.get_opacity(), duration, tweener));\r
- result.set_fill_translation (do_tween(time, source.get_fill_translation()[0], dest.get_fill_translation()[0], duration, tweener), do_tween(time, source.get_fill_translation()[1], dest.get_fill_translation()[1], duration, tweener));\r
- result.set_fill_scale (do_tween(time, source.get_fill_scale()[0], dest.get_fill_scale()[0], duration, tweener), do_tween(time, source.get_fill_scale()[1], dest.get_fill_scale()[1], duration, tweener));\r
- result.set_clip_translation (do_tween(time, source.get_clip_translation()[0], dest.get_clip_translation()[0], duration, tweener), do_tween(time, source.get_clip_translation()[1], dest.get_clip_translation()[1], duration, tweener));\r
- result.set_clip_scale (do_tween(time, source.get_clip_scale()[0], dest.get_clip_scale()[0], duration, tweener), do_tween(time, source.get_clip_scale()[1], dest.get_clip_scale()[1], duration, tweener));\r
- \r
- auto s_levels = source.get_levels();\r
- auto d_levels = dest.get_levels();\r
-\r
- d_levels.max_input = do_tween(time, s_levels.max_input, d_levels.max_input, duration, tweener);\r
- d_levels.min_input = do_tween(time, s_levels.min_input, d_levels.min_input, duration, tweener);\r
- \r
- d_levels.max_output = do_tween(time, s_levels.max_output, d_levels.max_output, duration, tweener);\r
- d_levels.min_output = do_tween(time, s_levels.min_output, d_levels.min_output, duration, tweener);\r
-\r
- d_levels.gamma = do_tween(time, s_levels.gamma, d_levels.gamma, duration, tweener);\r
-\r
- result.set_levels(d_levels);\r
-\r
- return result;\r
-}\r
-\r
-bool operator<(const image_transform& lhs, const image_transform& rhs)\r
-{\r
- return memcmp(&lhs, &rhs, sizeof(image_transform)) < 0;\r
-}\r
-\r
-bool operator==(const image_transform& lhs, const image_transform& rhs)\r
-{\r
- return memcmp(&lhs, &rhs, sizeof(image_transform)) == 0;\r
-}\r
-\r
-bool operator!=(const image_transform& lhs, const image_transform& rhs)\r
-{\r
- return !(lhs == rhs);\r
-}\r
-\r
-}}
\ No newline at end of file
+++ /dev/null
-/*\r
-* copyright (c) 2010 Sveriges Television AB <info@casparcg.com>\r
-*\r
-* This file is part of CasparCG.\r
-*\r
-* CasparCG is free software: you can redistribute it and/or modify\r
-* it under the terms of the GNU General Public License as published by\r
-* the Free Software Foundation, either version 3 of the License, or\r
-* (at your option) any later version.\r
-*\r
-* CasparCG is distributed in the hope that it will be useful,\r
-* but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
-* GNU General Public License for more details.\r
-\r
-* You should have received a copy of the GNU General Public License\r
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
-*\r
-*/\r
-#pragma once\r
-\r
-#include <common/utility/tweener.h>\r
-#include <core/video_format.h>\r
-\r
-#include <array>\r
-#include <type_traits>\r
-\r
-namespace caspar { namespace core {\r
-\r
-struct pixel_format_desc;\r
- \r
-class image_transform \r
-{\r
-public:\r
- \r
- struct levels\r
- {\r
- levels() \r
- : min_input(0.0)\r
- , max_input(1.0)\r
- , gamma(1.0)\r
- , min_output(0.0)\r
- , max_output(1.0)\r
- { \r
- }\r
- double min_input;\r
- double max_input;\r
- double gamma;\r
- double min_output;\r
- double max_output;\r
- };\r
-\r
- image_transform();\r
-\r
- void set_opacity(double value);\r
- double get_opacity() const;\r
- \r
- void set_brightness(double value);\r
- double get_brightness() const;\r
-\r
- void set_contrast(double value);\r
- double get_contrast() const;\r
-\r
- void set_saturation(double value);\r
- double get_saturation() const;\r
- \r
- void set_levels(const levels& value);\r
- levels get_levels() const;\r
- \r
- void set_fill_translation(double x, double y);\r
- std::array<double, 2> get_fill_translation() const;\r
-\r
- void set_fill_scale(double x, double y);\r
- std::array<double, 2> get_fill_scale() const;\r
- \r
- void set_clip_translation(double x, double y);\r
- std::array<double, 2> get_clip_translation() const;\r
-\r
- void set_clip_scale(double x, double y);\r
- std::array<double, 2> get_clip_scale() const;\r
- \r
- image_transform& operator*=(const image_transform &other);\r
- const image_transform operator*(const image_transform &other) const;\r
-\r
- void set_is_key(bool value);\r
- bool get_is_key() const;\r
-\r
- void set_field_mode(field_mode::type field_mode);\r
- field_mode::type get_field_mode() const;\r
- \r
-private:\r
- double opacity_;\r
- double gain_;\r
- double contrast_;\r
- double brightness_;\r
- double saturation_;\r
- double desaturation_;\r
- levels levels_;\r
- std::array<double, 2> fill_translation_; \r
- std::array<double, 2> fill_scale_; \r
- std::array<double, 2> clip_translation_; \r
- std::array<double, 2> clip_scale_; \r
- field_mode::type field_mode_;\r
- bool is_key_;\r
-};\r
-\r
-image_transform tween(double time, const image_transform& source, const image_transform& dest, double duration, const tweener_t& tweener);\r
-\r
-bool operator<(const image_transform& lhs, const image_transform& rhs);\r
-bool operator==(const image_transform& lhs, const image_transform& rhs);\r
-bool operator!=(const image_transform& lhs, const image_transform& rhs);\r
-\r
-}}
\ No newline at end of file
\r
#include "frame_producer.h"\r
#include "frame/basic_frame.h"\r
-#include "frame/audio_transform.h"\r
+#include "frame/frame_transform.h"\r
\r
#include "color/color_producer.h"\r
#include "separated/separated_producer.h"\r
#include <core/video_format.h>\r
\r
#include <core/producer/frame/basic_frame.h>\r
-#include <core/producer/frame/image_transform.h>\r
-#include <core/producer/frame/audio_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
\r
#include <tbb/parallel_invoke.h>\r
\r
+#include <boost/assign.hpp>\r
+\r
+using namespace boost::assign;\r
+\r
namespace caspar { namespace core { \r
\r
struct transition_producer : public frame_producer\r
auto s_frame1 = make_safe<basic_frame>(src_frame);\r
auto s_frame2 = make_safe<basic_frame>(src_frame);\r
\r
- s_frame1->get_audio_transform().set_has_audio(false);\r
- s_frame2->get_audio_transform().set_volume(1.0-delta2);\r
+ s_frame1->get_frame_transform().volume = 0.0;\r
+ s_frame2->get_frame_transform().volume = 1.0-delta2;\r
\r
auto d_frame1 = make_safe<basic_frame>(dest_frame);\r
auto d_frame2 = make_safe<basic_frame>(dest_frame);\r
\r
- d_frame1->get_audio_transform().set_has_audio(false);\r
- d_frame2->get_audio_transform().set_volume(delta2);\r
+ d_frame1->get_frame_transform().volume = 0.0;\r
+ d_frame2->get_frame_transform().volume = delta2;\r
\r
//if(info_.type == transition::mix)\r
//{\r
- // d_frame1->get_image_transform().set_opacity(delta1); \r
- // d_frame2->get_image_transform().set_opacity(delta2);\r
+ // d_frame1->get_frame_transform().set_opacity(delta1); \r
+ // d_frame2->get_frame_transform().set_opacity(delta2);\r
\r
- // s_frame1->get_image_transform().set_opacity(1.0-delta1); \r
- // s_frame2->get_image_transform().set_opacity(1.0-delta2); \r
+ // s_frame1->get_frame_transform().set_opacity(1.0-delta1); \r
+ // s_frame2->get_frame_transform().set_opacity(1.0-delta2); \r
//}\r
if(info_.type == transition::slide)\r
{\r
- d_frame1->get_image_transform().set_fill_translation((-1.0+delta1)*dir, 0.0); \r
- d_frame2->get_image_transform().set_fill_translation((-1.0+delta2)*dir, 0.0); \r
+ d_frame1->get_frame_transform().fill_translation[0] = (-1.0+delta1)*dir; \r
+ d_frame2->get_frame_transform().fill_translation[0] = (-1.0+delta2)*dir; \r
}\r
else if(info_.type == transition::push)\r
{\r
- d_frame1->get_image_transform().set_fill_translation((-1.0+delta1)*dir, 0.0);\r
- d_frame2->get_image_transform().set_fill_translation((-1.0+delta2)*dir, 0.0);\r
+ d_frame1->get_frame_transform().fill_translation[0] = (-1.0+delta1)*dir;\r
+ d_frame2->get_frame_transform().fill_translation[0] = (-1.0+delta2)*dir;\r
\r
- s_frame1->get_image_transform().set_fill_translation((0.0+delta1)*dir, 0.0); \r
- s_frame2->get_image_transform().set_fill_translation((0.0+delta2)*dir, 0.0); \r
+ s_frame1->get_frame_transform().fill_translation[0] = (0.0+delta1)*dir; \r
+ s_frame2->get_frame_transform().fill_translation[0] = (0.0+delta2)*dir; \r
}\r
else if(info_.type == transition::wipe) \r
{\r
- d_frame1->get_image_transform().set_clip_scale(delta1, 1.0); \r
- d_frame2->get_image_transform().set_clip_scale(delta2, 1.0); \r
+ d_frame1->get_frame_transform().clip_scale[0] = delta1; \r
+ d_frame2->get_frame_transform().clip_scale[0] = delta2; \r
}\r
\r
- const auto s_frame = s_frame1->get_image_transform() == s_frame2->get_image_transform() ? s_frame2 : basic_frame::interlace(s_frame1, s_frame2, mode_);\r
- const auto d_frame = d_frame1->get_image_transform() == d_frame2->get_image_transform() ? d_frame2 : basic_frame::interlace(d_frame1, d_frame2, mode_);\r
+ const auto s_frame = s_frame1->get_frame_transform() == s_frame2->get_frame_transform() ? s_frame2 : basic_frame::interlace(s_frame1, s_frame2, mode_);\r
+ const auto d_frame = d_frame1->get_frame_transform() == d_frame2->get_frame_transform() ? d_frame2 : basic_frame::interlace(d_frame1, d_frame2, mode_);\r
\r
last_frame_ = basic_frame::combine(s_frame2, d_frame2);\r
\r
\r
size_t width; // output frame width\r
size_t height; // output frame height\r
- field_mode::type mode; // progressive, interlaced upper field first, interlaced lower field first\r
+ field_mode::type field_mode; // progressive, interlaced upper field first, interlaced lower field first\r
double fps; // actual framerate, e.g. i50 = 25 fps, p50 = 50 fps\r
size_t time_scale;\r
size_t duration;\r
\r
\r
/* File created by MIDL compiler version 7.00.0555 */\r
-/* at Fri Aug 19 23:43:33 2011\r
+/* at Sun Aug 21 14:54:35 2011\r
*/\r
/* Compiler settings for interop\DeckLinkAPI.idl:\r
Oicf, W1, Zp8, env=Win32 (32b run), target_arch=X86 7.00.0555 \r
\r
\r
/* File created by MIDL compiler version 7.00.0555 */\r
-/* at Fri Aug 19 23:43:33 2011\r
+/* at Sun Aug 21 14:54:35 2011\r
*/\r
/* Compiler settings for interop\DeckLinkAPI.idl:\r
Oicf, W1, Zp8, env=Win32 (32b run), target_arch=X86 7.00.0555 \r
#include <common/memory/memclr.h>\r
\r
#include <core/mixer/write_frame.h>\r
-#include <core/producer/frame/audio_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
#include <core/producer/frame/frame_factory.h>\r
\r
#include <tbb/concurrent_queue.h>\r
av_frame->format = PIX_FMT_UYVY422;\r
av_frame->width = video->GetWidth();\r
av_frame->height = video->GetHeight();\r
- av_frame->interlaced_frame = format_desc_.mode != core::field_mode::progressive;\r
- av_frame->top_field_first = format_desc_.mode == core::field_mode::upper ? 1 : 0;\r
+ av_frame->interlaced_frame = format_desc_.field_mode != core::field_mode::progressive;\r
+ av_frame->top_field_first = format_desc_.field_mode == core::field_mode::upper ? 1 : 0;\r
\r
BOOST_FOREACH(auto& av_frame2, filter_.execute(av_frame))\r
muxer_.push(av_frame2); \r
\r
#include <core/producer/frame_producer.h>\r
#include <core/producer/frame/basic_frame.h>\r
-#include <core/producer/frame/image_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
#include <core/producer/frame/pixel_format.h>\r
#include <core/producer/frame/frame_factory.h>\r
#include <core/mixer/write_frame.h>\r
if(auto_transcode_)\r
{\r
auto in_mode = get_mode(*video_frame);\r
- display_mode_ = get_display_mode(in_mode, in_fps_, format_desc_.mode, format_desc_.fps);\r
+ display_mode_ = get_display_mode(in_mode, in_fps_, format_desc_.field_mode, format_desc_.fps);\r
\r
- if(display_mode_ == display_mode::simple && in_mode != core::field_mode::progressive && format_desc_.mode != core::field_mode::progressive && video_frame->height != static_cast<int>(format_desc_.height))\r
+ if(display_mode_ == display_mode::simple && in_mode != core::field_mode::progressive && format_desc_.field_mode != core::field_mode::progressive && video_frame->height != static_cast<int>(format_desc_.height))\r
display_mode_ = display_mode::deinterlace_bob_reinterlace; // The frame will most likely be scaled, we need to deinterlace->reinterlace \r
\r
if(display_mode_ == display_mode::deinterlace)\r
auto frame = make_write_frame(this, av_frame, frame_factory_, hints);\r
\r
// Fix field-order if needed\r
- if(frame->get_type() == core::field_mode::lower && format_desc_.mode == core::field_mode::upper)\r
- frame->get_image_transform().set_fill_translation(0.0f, 0.5/static_cast<double>(frame->get_pixel_format_desc().planes[0].height));\r
- else if(frame->get_type() == core::field_mode::upper && format_desc_.mode == core::field_mode::lower)\r
- frame->get_image_transform().set_fill_translation(0.0f, -0.5/static_cast<double>(frame->get_pixel_format_desc().planes[0].height));\r
+ if(frame->get_type() == core::field_mode::lower && format_desc_.field_mode == core::field_mode::upper)\r
+ frame->get_frame_transform().fill_translation[1] += 0.5/static_cast<double>(frame->get_pixel_format_desc().planes[0].height);\r
+ else if(frame->get_type() == core::field_mode::upper && format_desc_.field_mode == core::field_mode::lower)\r
+ frame->get_frame_transform().fill_translation[1] -= 0.5/static_cast<double>(frame->get_pixel_format_desc().planes[0].height);\r
\r
video_streams_.back().push(frame);\r
++video_frame_count_;\r
\r
auto frame2 = pop_video();\r
\r
- dest.push_back(core::basic_frame::interlace(frame1, frame2, format_desc_.mode)); \r
+ dest.push_back(core::basic_frame::interlace(frame1, frame2, format_desc_.field_mode)); \r
}\r
\r
int64_t calc_nb_frames(int64_t nb_frames) const\r
#include <tbb/concurrent_unordered_map.h>\r
#include <tbb/concurrent_queue.h>\r
\r
-#include <core/producer/frame/image_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
#include <core/producer/frame/frame_factory.h>\r
#include <core/producer/frame_producer.h>\r
#include <core/mixer/write_frame.h>\r
#include "../../ffmpeg_error.h"\r
#include "../../tbb_avcodec.h"\r
\r
-#include <core/producer/frame/image_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
#include <core/producer/frame/frame_factory.h>\r
\r
#include <boost/range/algorithm_ext/push_back.hpp>\r
{\r
auto frame1 = context_->render_frame(frame_buffer_.size() < frame_buffer_.capacity());\r
auto frame2 = context_->render_frame(frame_buffer_.size() < frame_buffer_.capacity());\r
- frame_buffer_.push(core::basic_frame::interlace(frame1, frame2, format_desc.mode));\r
+ frame_buffer_.push(core::basic_frame::interlace(frame1, frame2, format_desc.field_mode));\r
}\r
else if(abs(context_->fps()- format_desc.fps/2.0) < 2.0) // format == 2 * flash -> duplicate\r
{\r
\r
#include <core/producer/frame/basic_frame.h>\r
#include <core/producer/frame/frame_factory.h>\r
-#include <core/producer/frame/image_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
#include <core/mixer/write_frame.h>\r
\r
#include <common/env.h>\r
#include <boost/lexical_cast.hpp>\r
\r
#include <algorithm>\r
+#include <array>\r
\r
using namespace boost::assign;\r
\r
return core::basic_frame::eof();\r
\r
for(size_t n = 0; n < frames_.size(); ++n)\r
- frames_[n]->get_image_transform().set_fill_translation(start_offset_[0], start_offset_[1] -0.5*(n+1) + delta_ * 0.5/static_cast<double>(format_desc_.height));\r
+ {\r
+ frames_[n]->get_frame_transform().fill_translation[0] = start_offset_[0];\r
+ frames_[n]->get_frame_transform().fill_translation[0] = start_offset_[1] -0.5*(n+1) + delta_ * 0.5/static_cast<double>(format_desc_.height);\r
+ }\r
}\r
else\r
{\r
return core::basic_frame::eof();\r
\r
for(size_t n = 0; n < frames_.size(); ++n)\r
- frames_[n]->get_image_transform().set_fill_translation(start_offset_[0] -0.5*(n+1) + delta_ * 0.5/static_cast<double>(format_desc_.height), start_offset_[1]);\r
+ {\r
+ frames_[n]->get_frame_transform().fill_translation[0] = start_offset_[0] -0.5*(n+1) + delta_ * 0.5/static_cast<double>(format_desc_.height); \r
+ frames_[n]->get_frame_transform().fill_translation[1] = start_offset_[1];\r
+ }\r
}\r
\r
return last_frame_ = core::basic_frame(frames_);\r
\r
#include "../StdAfx.h"\r
\r
+#if defined(_MSC_VER)\r
+#pragma warning (push, 1) // TODO: Legacy code, just disable warnings\r
+#endif\r
+\r
#include "AMCPCommandsImpl.h"\r
#include "AMCPProtocolStrategy.h"\r
\r
#include <core/producer/frame_producer.h>\r
#include <core/video_format.h>\r
#include <core/producer/transition/transition_producer.h>\r
-#include <core/producer/frame/image_transform.h>\r
-#include <core/producer/frame/audio_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
#include <core/producer/stage.h>\r
#include <core/producer/layer.h>\r
#include <core/mixer/mixer.h>\r
#include <boost/filesystem.hpp>\r
#include <boost/regex.hpp>\r
\r
-#if defined(_MSC_VER)\r
-#pragma warning (push, 1) // TODO: Legacy code, just disable warnings\r
-#endif\r
-\r
/* Return codes\r
\r
100 [action] Information om att något har hänt \r
if(_parameters[0] == L"KEYER")\r
{\r
bool value = lexical_cast_or_default(_parameters.at(1), false);\r
- auto transform = [=](image_transform transform) -> image_transform\r
+ auto transform = [=](frame_transform transform) -> frame_transform\r
{\r
- transform.set_is_key(value);\r
+ transform.is_key = value;\r
return transform; \r
};\r
\r
int layer = GetLayerIndex();\r
- GetChannel()->mixer()->apply_image_transform(GetLayerIndex(), transform);\r
+ GetChannel()->mixer()->apply_frame_transform(GetLayerIndex(), transform);\r
}\r
else if(_parameters[0] == L"OPACITY")\r
{\r
\r
double value = boost::lexical_cast<double>(_parameters.at(1));\r
\r
- auto transform = [=](image_transform transform) -> image_transform\r
+ auto transform = [=](frame_transform transform) -> frame_transform\r
{\r
- transform.set_opacity(value);\r
+ transform.opacity = value;\r
return transform; \r
};\r
\r
int layer = GetLayerIndex();\r
- GetChannel()->mixer()->apply_image_transform(GetLayerIndex(), transform, duration, tween);\r
+ GetChannel()->mixer()->apply_frame_transform(GetLayerIndex(), transform, duration, tween);\r
}\r
else if(_parameters[0] == L"FILL")\r
{\r
double x_s = boost::lexical_cast<double>(_parameters.at(3));\r
double y_s = boost::lexical_cast<double>(_parameters.at(4));\r
\r
- auto transform = [=](image_transform transform) -> image_transform\r
+ auto transform = [=](frame_transform transform) mutable -> frame_transform\r
{\r
- transform.set_fill_translation(x, y);\r
- transform.set_fill_scale(x_s, y_s);\r
- transform.set_clip_translation(x, y);\r
- transform.set_clip_scale(x_s, y_s);\r
+ transform.fill_translation[0] = x;\r
+ transform.fill_translation[1] = y;\r
+ transform.fill_scale[0] = x_s;\r
+ transform.fill_scale[1] = y_s;\r
+ transform.clip_translation[0] = x;\r
+ transform.clip_translation[1] = y;\r
+ transform.clip_scale[0] = x_s;\r
+ transform.clip_scale[1] = y_s;\r
return transform;\r
};\r
\r
int layer = GetLayerIndex();\r
- GetChannel()->mixer()->apply_image_transform(GetLayerIndex(), transform, duration, tween);\r
+ GetChannel()->mixer()->apply_frame_transform(GetLayerIndex(), transform, duration, tween);\r
}\r
- else if(_parameters[0] == L"MASK")\r
+ else if(_parameters[0] == L"CLIP")\r
{\r
int duration = _parameters.size() > 5 ? lexical_cast_or_default(_parameters[5], 0) : 0;\r
std::wstring tween = _parameters.size() > 6 ? _parameters[6] : L"linear";\r
double x_s = boost::lexical_cast<double>(_parameters.at(3));\r
double y_s = boost::lexical_cast<double>(_parameters.at(4));\r
\r
- auto transform = [=](image_transform transform) -> image_transform\r
+ auto transform = [=](frame_transform transform) -> frame_transform\r
{\r
- transform.set_clip_translation(x, y);\r
- transform.set_clip_scale(x_s, y_s);\r
+ transform.clip_translation[0] = x;\r
+ transform.clip_translation[1] = y;\r
+ transform.clip_scale[0] = x_s;\r
+ transform.clip_scale[1] = y_s;\r
return transform;\r
};\r
\r
int layer = GetLayerIndex();\r
- GetChannel()->mixer()->apply_image_transform(GetLayerIndex(), transform, duration, tween);\r
+ GetChannel()->mixer()->apply_frame_transform(GetLayerIndex(), transform, duration, tween);\r
}\r
else if(_parameters[0] == L"GRID")\r
{\r
for(int y = 0; y < n; ++y)\r
{\r
int index = x+y*n+1;\r
- auto transform = [=](image_transform transform) -> image_transform\r
- { \r
- transform.set_fill_translation(x*delta, y*delta);\r
- transform.set_fill_scale(delta, delta); \r
- transform.set_clip_translation(x*delta, y*delta);\r
- transform.set_clip_scale(delta, delta);\r
+ auto transform = [=](frame_transform transform) -> frame_transform\r
+ { \r
+ transform.fill_translation[0] = x*delta;\r
+ transform.fill_translation[1] = y*delta;\r
+ transform.fill_scale[0] = delta;\r
+ transform.fill_scale[1] = delta;\r
+ transform.clip_translation[0] = x*delta;\r
+ transform.clip_translation[1] = y*delta;\r
+ transform.clip_scale[0] = delta;\r
+ transform.clip_scale[1] = delta; \r
return transform;\r
};\r
- GetChannel()->mixer()->apply_image_transform(index, transform, duration, tween);\r
+ GetChannel()->mixer()->apply_frame_transform(index, transform, duration, tween);\r
}\r
}\r
}\r
auto value = boost::lexical_cast<double>(_parameters.at(1));\r
int duration = _parameters.size() > 2 ? lexical_cast_or_default(_parameters[2], 0) : 0;\r
std::wstring tween = _parameters.size() > 3 ? _parameters[3] : L"linear";\r
- auto transform = [=](image_transform transform) -> image_transform\r
+ auto transform = [=](frame_transform transform) -> frame_transform\r
{\r
- transform.set_brightness(value);\r
+ transform.brightness = value;\r
return transform;\r
};\r
\r
int layer = GetLayerIndex();\r
- GetChannel()->mixer()->apply_image_transform(GetLayerIndex(), transform, duration, tween); \r
+ GetChannel()->mixer()->apply_frame_transform(GetLayerIndex(), transform, duration, tween); \r
}\r
else if(_parameters[0] == L"SATURATION")\r
{\r
auto value = boost::lexical_cast<double>(_parameters.at(1));\r
int duration = _parameters.size() > 2 ? lexical_cast_or_default(_parameters[2], 0) : 0;\r
std::wstring tween = _parameters.size() > 3 ? _parameters[3] : L"linear";\r
- auto transform = [=](image_transform transform) -> image_transform\r
+ auto transform = [=](frame_transform transform) -> frame_transform\r
{\r
- transform.set_saturation(value);\r
+ transform.saturation = value;\r
return transform;\r
};\r
\r
int layer = GetLayerIndex();\r
- GetChannel()->mixer()->apply_image_transform(GetLayerIndex(), transform, duration, tween); \r
+ GetChannel()->mixer()->apply_frame_transform(GetLayerIndex(), transform, duration, tween); \r
}\r
else if(_parameters[0] == L"CONTRAST")\r
{\r
auto value = boost::lexical_cast<double>(_parameters.at(1));\r
int duration = _parameters.size() > 2 ? lexical_cast_or_default(_parameters[2], 0) : 0;\r
std::wstring tween = _parameters.size() > 3 ? _parameters[3] : L"linear";\r
- auto transform = [=](image_transform transform) -> image_transform\r
+ auto transform = [=](frame_transform transform) -> frame_transform\r
{\r
- transform.set_contrast(value);\r
+ transform.contrast = value;\r
return transform;\r
};\r
\r
int layer = GetLayerIndex();\r
- GetChannel()->mixer()->apply_image_transform(GetLayerIndex(), transform, duration, tween); \r
+ GetChannel()->mixer()->apply_frame_transform(GetLayerIndex(), transform, duration, tween); \r
}\r
else if(_parameters[0] == L"LEVELS")\r
{\r
- image_transform::levels value;\r
+ levels value;\r
value.min_input = boost::lexical_cast<double>(_parameters.at(1));\r
value.max_input = boost::lexical_cast<double>(_parameters.at(2));\r
value.gamma = boost::lexical_cast<double>(_parameters.at(3));\r
int duration = _parameters.size() > 6 ? lexical_cast_or_default(_parameters[6], 0) : 0;\r
std::wstring tween = _parameters.size() > 7 ? _parameters[7] : L"linear";\r
\r
- auto transform = [=](image_transform transform) -> image_transform\r
+ auto transform = [=](frame_transform transform) -> frame_transform\r
{\r
- transform.set_levels(value);\r
+ transform.levels = value;\r
return transform;\r
};\r
\r
int layer = GetLayerIndex();\r
- GetChannel()->mixer()->apply_image_transform(GetLayerIndex(), transform, duration, tween); \r
+ GetChannel()->mixer()->apply_frame_transform(GetLayerIndex(), transform, duration, tween); \r
}\r
else if(_parameters[0] == L"VOLUME")\r
{\r
std::wstring tween = _parameters.size() > 3 ? _parameters[3] : L"linear";\r
double value = boost::lexical_cast<double>(_parameters[1]);\r
\r
- auto transform = [=](audio_transform transform) -> audio_transform\r
+ auto transform = [=](frame_transform transform) -> frame_transform\r
{\r
- transform.set_volume(value);\r
+ transform.volume = value;\r
return transform;\r
};\r
\r
int layer = GetLayerIndex();\r
- GetChannel()->mixer()->apply_audio_transform(GetLayerIndex(), transform, duration, tween);\r
+ GetChannel()->mixer()->apply_frame_transform(GetLayerIndex(), transform, duration, tween);\r
}\r
- else if(_parameters[0] == L"RESET")\r
+ else if(_parameters[0] == L"CLEAR")\r
{\r
- GetChannel()->mixer()->reset_transforms();\r
+ GetChannel()->mixer()->clear_transforms();\r
}\r
else\r
{\r
\r
bool auto_play = std::find(_parameters.begin(), _parameters.end(), L"AUTO") != _parameters.end();\r
\r
- auto pFP2 = create_transition_producer(GetChannel()->get_video_format_desc().mode, pFP, transitionInfo);\r
+ auto pFP2 = create_transition_producer(GetChannel()->get_video_format_desc().field_mode, pFP, transitionInfo);\r
GetChannel()->stage()->load(GetLayerIndex(), pFP2, false, auto_play ? transitionInfo.duration : -1); // TODO: LOOP\r
\r
CASPAR_LOG(info) << "Loaded " << _parameters[0] << TEXT(" successfully to background");\r
transition.duration = 12;\r
\r
auto pFP = create_producer(GetChannel()->mixer(), boost::assign::list_of(filename));\r
- auto pTransition = create_transition_producer(GetChannel()->get_video_format_desc().mode, pFP, transition);\r
+ auto pTransition = create_transition_producer(GetChannel()->get_video_format_desc().field_mode, pFP, transition);\r
\r
try\r
{\r
<screen>\r
<key-only>false</key-only>\r
</screen>\r
+ <audio></audio>\r
</consumers>\r
</channel>\r
</channels>\r