]> git.sesse.net Git - casparcg/blobdiff - core/producer/transition/transition_producer.cpp
2.0. Fixed mix transition.
[casparcg] / core / producer / transition / transition_producer.cpp
index f472e16758dae9e70df62b31bb232b86ce2529e9..24cf9287533810a4854657b8f5cab816ac344fb8 100644 (file)
 #include <core/video_format.h>\r
 \r
 #include <core/producer/frame/basic_frame.h>\r
-#include <core/producer/frame/image_transform.h>\r
-#include <core/producer/frame/audio_transform.h>\r
+#include <core/producer/frame/frame_transform.h>\r
+\r
+#include <tbb/parallel_invoke.h>\r
+\r
+#include <boost/assign.hpp>\r
+\r
+using namespace boost::assign;\r
 \r
 namespace caspar { namespace core {    \r
 \r
 struct transition_producer : public frame_producer\r
 {      \r
-       const video_format_desc         format_desc_;\r
-       unsigned short                          current_frame_;\r
+       const field_mode::type          mode_;\r
+       unsigned int                            current_frame_;\r
        \r
        const transition_info           info_;\r
        \r
        safe_ptr<frame_producer>        dest_producer_;\r
        safe_ptr<frame_producer>        source_producer_;\r
-       \r
-       std::vector<safe_ptr<basic_frame>> frame_buffer_;\r
-       \r
-       transition_producer(const video_format_desc& format_desc, const safe_ptr<frame_producer>& dest, const transition_info& info) \r
-               : format_desc_(format_desc)\r
+\r
+       safe_ptr<basic_frame>           last_frame_;\r
+               \r
+       explicit transition_producer(const field_mode::type& mode, const safe_ptr<frame_producer>& dest, const transition_info& info) \r
+               : mode_(mode)\r
                , current_frame_(0)\r
                , info_(info)\r
                , dest_producer_(dest)\r
                , source_producer_(frame_producer::empty())\r
-       {\r
-               frame_buffer_.push_back(basic_frame::empty());\r
-       }\r
-                                       \r
-       safe_ptr<frame_producer> get_following_producer() const\r
+               , last_frame_(basic_frame::empty()){}\r
+       \r
+       // frame_producer\r
+\r
+       virtual safe_ptr<frame_producer> get_following_producer() const\r
        {\r
                return dest_producer_;\r
        }\r
        \r
-       void set_leading_producer(const safe_ptr<frame_producer>& producer)\r
+       virtual void set_leading_producer(const safe_ptr<frame_producer>& producer)\r
        {\r
                source_producer_ = producer;\r
        }\r
 \r
-       safe_ptr<basic_frame> receive()\r
+       virtual safe_ptr<basic_frame> receive(int hints)\r
        {\r
-               if(current_frame_++ >= info_.duration)\r
+               if(++current_frame_ >= info_.duration)\r
                        return basic_frame::eof();\r
-\r
-               auto source = basic_frame::empty();\r
+               \r
                auto dest = basic_frame::empty();\r
+               auto source = basic_frame::empty();\r
 \r
-               tbb::parallel_invoke\r
-               (\r
-                       [&]{dest   = core::receive(dest_producer_);},\r
-                       [&]{source = core::receive(source_producer_);}\r
-               );\r
+               tbb::parallel_invoke(\r
+               [&]\r
+               {\r
+                       dest = receive_and_follow(dest_producer_, hints);\r
+                       if(dest == core::basic_frame::late())\r
+                               dest = dest_producer_->last_frame();\r
+               },\r
+               [&]\r
+               {\r
+                       source = receive_and_follow(source_producer_, hints);\r
+                       if(source == core::basic_frame::late())\r
+                               source = source_producer_->last_frame();\r
+               });\r
 \r
                return compose(dest, source);\r
        }\r
+\r
+       virtual safe_ptr<core::basic_frame> last_frame() const\r
+       {\r
+               return disable_audio(last_frame_);\r
+       }\r
+\r
+       virtual int64_t nb_frames() const \r
+       {\r
+               return get_following_producer()->nb_frames();\r
+       }\r
+\r
+       virtual std::wstring print() const\r
+       {\r
+               return L"transition[" + source_producer_->print() + L"|" + dest_producer_->print() + L"]";\r
+       }\r
+       \r
+       // transition_producer\r
                                                \r
        safe_ptr<basic_frame> compose(const safe_ptr<basic_frame>& dest_frame, const safe_ptr<basic_frame>& src_frame) \r
        {       \r
-               if(dest_frame == basic_frame::eof() && src_frame == basic_frame::eof())\r
-                       return basic_frame::eof();\r
-\r
                if(info_.type == transition::cut)               \r
-                       return src_frame != basic_frame::eof() ? src_frame : basic_frame::empty();\r
+                       return src_frame;\r
                                                                                \r
-               double delta1 = info_.tweener(current_frame_*2-1, 0.0, 1.0, info_.duration*2);\r
-               double delta2 = info_.tweener(current_frame_*2, 0.0, 1.0, info_.duration*2);  \r
+               const double delta1 = info_.tweener(current_frame_*2-1, 0.0, 1.0, info_.duration*2);\r
+               const double delta2 = info_.tweener(current_frame_*2, 0.0, 1.0, info_.duration*2);  \r
 \r
-               double dir = info_.direction == transition_direction::from_left ? 1.0 : -1.0;           \r
+               const double dir = info_.direction == transition_direction::from_left ? 1.0 : -1.0;             \r
                \r
                // For interlaced transitions. Seperate fields into seperate frames which are transitioned accordingly.\r
                \r
                auto s_frame1 = make_safe<basic_frame>(src_frame);\r
                auto s_frame2 = make_safe<basic_frame>(src_frame);\r
 \r
-               s_frame1->get_audio_transform().set_has_audio(false);\r
-               s_frame2->get_audio_transform().set_gain(1.0-delta2);\r
+               s_frame1->get_frame_transform().volume = 0.0;\r
+               s_frame2->get_frame_transform().volume = 1.0-delta2;\r
 \r
                auto d_frame1 = make_safe<basic_frame>(dest_frame);\r
                auto d_frame2 = make_safe<basic_frame>(dest_frame);\r
                \r
-               d_frame1->get_audio_transform().set_has_audio(false);\r
-               d_frame2->get_audio_transform().set_gain(delta2);\r
+               d_frame1->get_frame_transform().volume = 0.0;\r
+               d_frame2->get_frame_transform().volume = delta2;\r
 \r
                if(info_.type == transition::mix)\r
                {\r
-                       d_frame1->get_image_transform().set_opacity(delta1);    \r
-                       d_frame2->get_image_transform().set_opacity(delta2);    \r
+                       d_frame1->get_frame_transform().opacity = delta1;       \r
+                       d_frame1->get_frame_transform().is_mix = true;\r
+                       d_frame2->get_frame_transform().opacity = delta2;\r
+                       d_frame2->get_frame_transform().is_mix = true;\r
+\r
+                       s_frame1->get_frame_transform().opacity = 1.0-delta1;   \r
+                       s_frame1->get_frame_transform().is_mix = true;\r
+                       s_frame2->get_frame_transform().opacity = 1.0-delta2;   \r
+                       s_frame2->get_frame_transform().is_mix = true;\r
                }\r
-               else if(info_.type == transition::slide)\r
+               if(info_.type == transition::slide)\r
                {\r
-                       d_frame1->get_image_transform().set_fill_translation((-1.0+delta1)*dir, 0.0);   \r
-                       d_frame2->get_image_transform().set_fill_translation((-1.0+delta2)*dir, 0.0);           \r
+                       d_frame1->get_frame_transform().fill_translation[0] = (-1.0+delta1)*dir;        \r
+                       d_frame2->get_frame_transform().fill_translation[0] = (-1.0+delta2)*dir;                \r
                }\r
                else if(info_.type == transition::push)\r
                {\r
-                       d_frame1->get_image_transform().set_fill_translation((-1.0+delta1)*dir, 0.0);\r
-                       d_frame2->get_image_transform().set_fill_translation((-1.0+delta2)*dir, 0.0);\r
+                       d_frame1->get_frame_transform().fill_translation[0] = (-1.0+delta1)*dir;\r
+                       d_frame2->get_frame_transform().fill_translation[0] = (-1.0+delta2)*dir;\r
 \r
-                       s_frame1->get_image_transform().set_fill_translation((0.0+delta1)*dir, 0.0);    \r
-                       s_frame2->get_image_transform().set_fill_translation((0.0+delta2)*dir, 0.0);            \r
+                       s_frame1->get_frame_transform().fill_translation[0] = (0.0+delta1)*dir; \r
+                       s_frame2->get_frame_transform().fill_translation[0] = (0.0+delta2)*dir;         \r
                }\r
                else if(info_.type == transition::wipe)         \r
                {\r
-                       d_frame1->get_image_transform().set_key_scale(delta1, 1.0);     \r
-                       d_frame2->get_image_transform().set_key_scale(delta2, 1.0);                     \r
+                       d_frame1->get_frame_transform().clip_scale[0] = delta1; \r
+                       d_frame2->get_frame_transform().clip_scale[0] = delta2;                 \r
                }\r
+                               \r
+               const auto s_frame = s_frame1->get_frame_transform() == s_frame2->get_frame_transform() ? s_frame2 : basic_frame::interlace(s_frame1, s_frame2, mode_);\r
+               const auto d_frame = d_frame1->get_frame_transform() == d_frame2->get_frame_transform() ? d_frame2 : basic_frame::interlace(d_frame1, d_frame2, mode_);\r
                \r
-               auto s_frame = s_frame1->get_image_transform() == s_frame2->get_image_transform() ? s_frame2 : basic_frame::interlace(s_frame1, s_frame2, format_desc_.mode);\r
-               auto d_frame = basic_frame::interlace(d_frame1, d_frame2, format_desc_.mode);\r
+               last_frame_ = basic_frame::combine(s_frame2, d_frame2);\r
 \r
-               return basic_frame(s_frame, d_frame);\r
+               return basic_frame::combine(s_frame, d_frame);\r
        }\r
-\r
-       std::wstring print() const\r
-       {\r
-               return L"transition[" + transition::print(info_.type) + L":" + boost::lexical_cast<std::wstring>(info_.duration) + L"]";\r
-       }\r
-\r
-       std::wstring source_print() const { return print() + L"/source";}\r
-       std::wstring dest_print() const { return print() + L"/dest";}\r
 };\r
 \r
-safe_ptr<frame_producer> create_transition_producer(const video_format_desc& format_desc, const safe_ptr<frame_producer>& destination, const transition_info& info)\r
+safe_ptr<frame_producer> create_transition_producer(const field_mode::type& mode, const safe_ptr<frame_producer>& destination, const transition_info& info)\r
 {\r
-       return make_safe<transition_producer>(format_desc, destination, info);\r
+       return make_safe<transition_producer>(mode, destination, info);\r
 }\r
 \r
 }}\r