]> git.sesse.net Git - casparcg/commitdiff
2.0. decklink_producer: Added support to deinterlace into 50p.
authorronag <ronag@362d55ac-95cf-4e76-9f9a-cbaa9c17b72d>
Wed, 22 Jun 2011 09:47:08 +0000 (09:47 +0000)
committerronag <ronag@362d55ac-95cf-4e76-9f9a-cbaa9c17b72d>
Wed, 22 Jun 2011 09:47:08 +0000 (09:47 +0000)
git-svn-id: https://casparcg.svn.sourceforge.net/svnroot/casparcg/server/branches/2.0.0.2@940 362d55ac-95cf-4e76-9f9a-cbaa9c17b72d

modules/decklink/producer/decklink_producer.cpp

index f9c8be8df30cce8923cd7b7c17b435c653edf8c8..430fc79426609987cb86e9b5fc214c6ad1f2cfda 100644 (file)
@@ -75,9 +75,6 @@ class frame_filter
 {\r
        std::unique_ptr<filter>                                 filter_;\r
        safe_ptr<core::frame_factory>                   frame_factory_;\r
-       std::deque<std::vector<int16_t>>                audio_buffer_;\r
-\r
-       std::vector<safe_ptr<AVFrame>>                  buffer_;\r
 \r
 public:\r
        frame_filter(const std::string& filter_str, const safe_ptr<core::frame_factory>& frame_factory) \r
@@ -86,54 +83,55 @@ public:
        {\r
        }\r
 \r
-       bool execute(const safe_ptr<core::write_frame>& input_frame, safe_ptr<core::basic_frame>& output_frame)\r
+       std::vector<safe_ptr<core::basic_frame>> execute(const safe_ptr<core::write_frame>& input_frame)\r
        {               \r
+               std::vector<safe_ptr<core::basic_frame>> result;\r
+\r
                if(!filter_)\r
                {\r
                        input_frame->commit();\r
-                       output_frame = input_frame;\r
-                       return true;\r
+                       result.push_back(input_frame);\r
                }\r
-               \r
-               auto desc = input_frame->get_pixel_format_desc();\r
-\r
-               auto av_frame = as_av_frame(input_frame);\r
-\r
-               audio_buffer_.push_back(std::move(input_frame->audio_data()));\r
-               \r
-               filter_->push(av_frame);        \r
-               buffer_ = filter_->poll();      \r
-               \r
-               return try_pop(output_frame);\r
-       }\r
-\r
-private:               \r
-\r
-       bool try_pop(safe_ptr<core::basic_frame>& output)\r
-       {\r
-               if(buffer_.empty())\r
-                       return false;\r
+               else\r
+               {\r
+                       auto desc = input_frame->get_pixel_format_desc();\r
 \r
-               auto audio_data = std::move(audio_buffer_.front());\r
-               audio_buffer_.pop_back();\r
+                       auto av_frame = as_av_frame(input_frame);\r
+                                       \r
+                       filter_->push(av_frame);        \r
+                       auto buffer = filter_->poll();  \r
+                                               \r
+                       if(buffer.size() == 2)\r
+                       {\r
+                               auto frame1 = make_write_frame(this, buffer[0], frame_factory_);\r
+                               auto frame2 = make_write_frame(this, buffer[1], frame_factory_);\r
+                               frame1->audio_data() = std::move(input_frame->audio_data());\r
+                               \r
+                               if(frame_factory_->get_video_format_desc().mode == core::video_mode::progressive)\r
+                               {\r
+                                       frame2->audio_data().insert(frame2->audio_data().begin(), frame1->audio_data().begin() + frame1->audio_data().size()/2, frame1->audio_data().end());\r
+                                       frame1->audio_data().erase(frame1->audio_data().begin() + frame1->audio_data().size()/2, frame1->audio_data().end());\r
+                                       CASPAR_LOG(trace) << frame1->audio_data().size();\r
+                                       CASPAR_LOG(trace) << frame2->audio_data().size();\r
+                                       result.push_back(frame1);\r
+                                       result.push_back(frame2);\r
+                               }\r
+                               else\r
+                               {\r
+                                       frame2->get_audio_transform().set_has_audio(false);\r
+                                       result.push_back(core::basic_frame::interlace(frame1, frame2, frame_factory_->get_video_format_desc().mode));\r
+                               }\r
+                       }\r
+                       else if(buffer.size() > 0)\r
+                       {\r
+                               auto frame1 = make_write_frame(this, buffer[0], frame_factory_);\r
+                               frame1->audio_data() = std::move(input_frame->audio_data());\r
+                               result.push_back(frame1);\r
+                       }\r
 \r
-               if(buffer_.size() == 2)\r
-               {\r
-                       auto frame1 = make_write_frame(this, buffer_[0], frame_factory_);\r
-                       auto frame2 = make_write_frame(this, buffer_[1], frame_factory_);\r
-                       frame1->audio_data() = std::move(audio_data);\r
-                       frame2->get_audio_transform().set_has_audio(false);\r
-                       output = core::basic_frame::interlace(frame1, frame2, frame_factory_->get_video_format_desc().mode);\r
                }\r
-               else if(buffer_.size() > 0)\r
-               {\r
-                       auto frame1 = make_write_frame(this, buffer_[0], frame_factory_);\r
-                       frame1->audio_data() = std::move(audio_data);\r
-                       output = frame1;\r
-               }\r
-               buffer_.clear();\r
-\r
-               return true;\r
+               \r
+               return result;\r
        }\r
 };\r
        \r
@@ -287,11 +285,14 @@ public:
                                }\r
                        }\r
                \r
-                       filter_.execute(frame, result);         \r
-                       \r
-                       if(!frame_buffer_.try_push(result))\r
-                               graph_->add_tag("dropped-frame");\r
+                       auto frames = filter_.execute(frame);           \r
                        \r
+                       for(size_t n = 0; n < frames.size(); ++n)\r
+                       {\r
+                               if(!frame_buffer_.try_push(frames[n]))\r
+                                       graph_->add_tag("dropped-frame");\r
+                       }\r
+\r
                        graph_->update_value("frame-time", frame_timer_.elapsed()*format_desc_.fps*0.5);\r
 \r
                        graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity()));      \r