]> git.sesse.net Git - casparcg/commitdiff
2.0: ffmpeg_producer: Re-added and fixed filter capabilities.
authorronag <ronag@362d55ac-95cf-4e76-9f9a-cbaa9c17b72d>
Sun, 19 Jun 2011 12:39:13 +0000 (12:39 +0000)
committerronag <ronag@362d55ac-95cf-4e76-9f9a-cbaa9c17b72d>
Sun, 19 Jun 2011 12:39:13 +0000 (12:39 +0000)
git-svn-id: https://casparcg.svn.sourceforge.net/svnroot/casparcg/server/branches/2.0.0.2@914 362d55ac-95cf-4e76-9f9a-cbaa9c17b72d

modules/ffmpeg/producer/ffmpeg_producer.cpp
modules/ffmpeg/producer/video/video_decoder.cpp
modules/ffmpeg/producer/video/video_decoder.h
modules/ffmpeg/util/filter.cpp
modules/ffmpeg/util/filter.h
shell/casparcg.config
shell/main.cpp

index a30aed1c3d350f550d7b1a96b041181c14035e8b..2ad423be07c4177c6b5a30475bd837f5aad9d1d1 100644 (file)
@@ -59,10 +59,10 @@ struct ffmpeg_producer : public core::frame_producer
        std::unique_ptr<video_decoder>                  video_decoder_;\r
        std::unique_ptr<audio_decoder>                  audio_decoder_;\r
 \r
-       std::deque<std::pair<int, std::vector<short>>> audio_chunks_;\r
+       std::deque<std::pair<int, std::vector<int16_t>>> audio_chunks_;\r
        std::deque<std::pair<int, safe_ptr<core::write_frame>>> video_frames_;\r
 public:\r
-       explicit ffmpeg_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filename, bool loop, int start, int length) \r
+       explicit ffmpeg_producer(const safe_ptr<core::frame_factory>& frame_factory, const std::wstring& filename, const std::wstring& filter_str, bool loop, int start, int length) \r
                : filename_(filename)\r
                , graph_(diagnostics::create_graph(narrow(print())))\r
                , frame_factory_(frame_factory)         \r
@@ -78,7 +78,7 @@ public:
                        CASPAR_LOG(warning) << print() << L" Invalid framerate detected. This may cause distorted audio during playback. frame-time: " << frame_time;\r
                \r
                video_decoder_.reset(input_.get_video_codec_context() ? \r
-                       new video_decoder(input_, frame_factory) : nullptr);\r
+                       new video_decoder(input_, frame_factory, narrow(filter_str)) : nullptr);\r
                        \r
                audio_decoder_.reset(input_.get_audio_codec_context() ? \r
                        new audio_decoder(input_, frame_factory->get_video_format_desc()) : nullptr);           \r
@@ -110,12 +110,12 @@ public:
                (\r
                        [&]\r
                        {\r
-                               if(video_decoder_ && video_frames_.size() < 2)\r
+                               if(video_decoder_ && video_frames_.size() < 3)\r
                                        boost::range::push_back(video_frames_, video_decoder_->receive());              \r
                        }, \r
                        [&]\r
                        {\r
-                               if(audio_decoder_ && audio_chunks_.size() < 2)\r
+                               if(audio_decoder_ && audio_chunks_.size() < 3)\r
                                        boost::range::push_back(audio_chunks_, audio_decoder_->receive());                              \r
                        }\r
                );\r
@@ -132,27 +132,44 @@ public:
                                      video_frames_.front().first == audio_chunks_.front().first);\r
        }\r
 \r
+       safe_ptr<core::basic_frame> get_video_frame(std::vector<int16_t>&& audio_chunk)\r
+       {\r
+               auto frame = std::move(video_frames_.front().second);   \r
+               auto frame_number = video_frames_.front().first;\r
+               video_frames_.pop_front();\r
+                               \r
+               frame->audio_data() = std::move(audio_chunk);\r
+               if(frame->audio_data().empty())\r
+                       frame->get_audio_transform().set_has_audio(false);      \r
+\r
+               if(!video_frames_.empty()) // interlace if we have double frames\r
+               {\r
+                       if(video_frames_.front().first == frame_number)\r
+                       {\r
+                               auto frame2 = std::move(video_frames_.front().second);  \r
+                               video_frames_.pop_front();\r
+\r
+                               return core::basic_frame::interlace(frame, frame2, frame_factory_->get_video_format_desc().mode);\r
+                       }\r
+               }\r
+\r
+               return frame;\r
+       }\r
+\r
        safe_ptr<core::basic_frame> decode_frame()\r
        {\r
                decode_packets();\r
 \r
                if(video_decoder_ && audio_decoder_ && !video_frames_.empty() && !audio_chunks_.empty())\r
                {\r
-                       auto frame = std::move(video_frames_.front().second);                           \r
-                       video_frames_.pop_front();\r
-                               \r
-                       frame->audio_data() = std::move(audio_chunks_.front().second);\r
+                       auto audio_chunk = std::move(audio_chunks_.front().second);\r
                        audio_chunks_.pop_front();\r
-                       \r
-                       return frame;\r
+                                               \r
+                       return get_video_frame(std::move(audio_chunk));\r
                }\r
                else if(video_decoder_ && !audio_decoder_ && !video_frames_.empty())\r
-               {\r
-                       auto frame = std::move(video_frames_.front().second);                           \r
-                       video_frames_.pop_front();\r
-                       frame->get_audio_transform().set_has_audio(false);      \r
-                       \r
-                       return frame;\r
+               {                                               \r
+                       return get_video_frame(std::vector<int16_t>());\r
                }\r
                else if(audio_decoder_ && !video_decoder_ && !audio_chunks_.empty())\r
                {\r
@@ -203,7 +220,16 @@ safe_ptr<core::frame_producer> create_ffmpeg_producer(const safe_ptr<core::frame
                        start = boost::lexical_cast<int>(*seek_it);\r
        }\r
        \r
-       return make_safe<ffmpeg_producer>(frame_factory, path, loop, start, length);\r
+       std::wstring filter_str = L"";\r
+\r
+       auto filter_it = std::find(params.begin(), params.end(), L"FILTER");\r
+       if(filter_it != params.end())\r
+       {\r
+               if(++filter_it != params.end())\r
+                       filter_str = *filter_it;\r
+       }\r
+\r
+       return make_safe<ffmpeg_producer>(frame_factory, path, filter_str, loop, start, length);\r
 }\r
 \r
 }
\ No newline at end of file
index 967dad5495937b432238ab74a0e84f3ce79d17cf..0879869227f5b213b8d7a136a47d1e6bd431f1ae 100644 (file)
@@ -21,6 +21,8 @@
 \r
 #include "video_decoder.h"\r
 #include "../../ffmpeg_error.h"\r
+#include "../../util/util.h"\r
+#include "../../util/filter.h"\r
 \r
 #include <common/memory/memcpy.h>\r
 \r
@@ -53,105 +55,29 @@ extern "C"
 \r
 namespace caspar {\r
        \r
-core::pixel_format::type get_pixel_format(PixelFormat pix_fmt)\r
-{\r
-       switch(pix_fmt)\r
-       {\r
-       case PIX_FMT_GRAY8:             return core::pixel_format::gray;\r
-       case PIX_FMT_BGRA:              return core::pixel_format::bgra;\r
-       case PIX_FMT_ARGB:              return core::pixel_format::argb;\r
-       case PIX_FMT_RGBA:              return core::pixel_format::rgba;\r
-       case PIX_FMT_ABGR:              return core::pixel_format::abgr;\r
-       case PIX_FMT_YUV444P:   return core::pixel_format::ycbcr;\r
-       case PIX_FMT_YUV422P:   return core::pixel_format::ycbcr;\r
-       case PIX_FMT_YUV420P:   return core::pixel_format::ycbcr;\r
-       case PIX_FMT_YUV411P:   return core::pixel_format::ycbcr;\r
-       case PIX_FMT_YUV410P:   return core::pixel_format::ycbcr;\r
-       case PIX_FMT_YUVA420P:  return core::pixel_format::ycbcra;\r
-       default:                                return core::pixel_format::invalid;\r
-       }\r
-}\r
-\r
-core::pixel_format_desc get_pixel_format_desc(PixelFormat pix_fmt, size_t width, size_t height)\r
-{\r
-       // Get linesizes\r
-       AVPicture dummy_pict;   \r
-       avpicture_fill(&dummy_pict, nullptr, pix_fmt, width, height);\r
-\r
-       core::pixel_format_desc desc;\r
-       desc.pix_fmt = get_pixel_format(pix_fmt);\r
-               \r
-       switch(desc.pix_fmt)\r
-       {\r
-       case core::pixel_format::gray:\r
-               {\r
-                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 1));                                             \r
-                       return desc;\r
-               }\r
-       case core::pixel_format::bgra:\r
-       case core::pixel_format::argb:\r
-       case core::pixel_format::rgba:\r
-       case core::pixel_format::abgr:\r
-               {\r
-                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 4));                                             \r
-                       return desc;\r
-               }\r
-       case core::pixel_format::ycbcr:\r
-       case core::pixel_format::ycbcra:\r
-               {               \r
-                       // Find chroma height\r
-                       size_t size2 = dummy_pict.data[2] - dummy_pict.data[1];\r
-                       size_t h2 = size2/dummy_pict.linesize[1];                       \r
-\r
-                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));\r
-                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[1], h2, 1));\r
-                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[2], h2, 1));\r
-\r
-                       if(desc.pix_fmt == core::pixel_format::ycbcra)                                          \r
-                               desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[3], height, 1));       \r
-                       return desc;\r
-               }               \r
-       default:                \r
-               desc.pix_fmt = core::pixel_format::invalid;\r
-               return desc;\r
-       }\r
-}\r
-\r
 struct video_decoder::implementation : boost::noncopyable\r
 {\r
        input& input_;\r
        std::shared_ptr<SwsContext>                                     sws_context_;\r
        const std::shared_ptr<core::frame_factory>      frame_factory_;\r
        AVCodecContext&                                                         codec_context_;\r
-       const int                                                                       width_;\r
-       const int                                                                       height_;\r
-       const PixelFormat                                                       pix_fmt_;\r
-       core::pixel_format_desc                                         desc_;\r
        size_t                                                                          frame_number_;\r
 \r
+       std::shared_ptr<filter>                                         filter_;\r
+       size_t                                                                          filter_delay_;\r
+\r
+       safe_ptr<AVFrame>                                                       last_frame_;\r
+\r
 public:\r
-       explicit implementation(input& input, const safe_ptr<core::frame_factory>& frame_factory) \r
+       explicit implementation(input& input, const safe_ptr<core::frame_factory>& frame_factory, const std::string& filter_str\r
                : input_(input)\r
                , frame_factory_(frame_factory)\r
                , codec_context_(*input_.get_video_codec_context())\r
-               , width_(codec_context_.width)\r
-               , height_(codec_context_.height)\r
-               , pix_fmt_(codec_context_.pix_fmt)\r
-               , desc_(get_pixel_format_desc(pix_fmt_, width_, height_))\r
                , frame_number_(0)\r
+               , filter_(filter_str.empty() ? nullptr : new filter(filter_str))\r
+               , filter_delay_(0)\r
+               , last_frame_(avcodec_alloc_frame(), av_free)\r
        {\r
-               if(desc_.pix_fmt == core::pixel_format::invalid)\r
-               {\r
-                       CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
-\r
-                       desc_ = get_pixel_format_desc(PIX_FMT_BGRA, width_, height_);\r
-                       double param;\r
-                       sws_context_.reset(sws_getContext(width_, height_, pix_fmt_, width_, height_, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);\r
-                       if(!sws_context_)\r
-                               BOOST_THROW_EXCEPTION(operation_failed() <<\r
-                                                                         msg_info("Could not create software scaling context.") << \r
-                                                                         boost::errinfo_api_function("sws_getContext"));\r
-               }\r
        }\r
 \r
        std::deque<std::pair<int, safe_ptr<core::write_frame>>> receive()\r
@@ -171,15 +97,18 @@ public:
 \r
                if(!video_packet) // eof\r
                {       \r
-                       avcodec_flush_buffers(&codec_context_);\r
+                       for(size_t n = 0; n < filter_delay_; ++n)\r
+                               boost::range::push_back(result, get_frames(last_frame_));\r
+                       \r
                        frame_number_ = 0;\r
+                       filter_delay_ = 0;\r
+                       avcodec_flush_buffers(&codec_context_);\r
+\r
                        return result;\r
                }\r
 \r
-               safe_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);\r
-\r
                int frame_finished = 0;\r
-               const int errn = avcodec_decode_video2(&codec_context_, decoded_frame.get(), &frame_finished, video_packet.get());\r
+               const int errn = avcodec_decode_video2(&codec_context_, last_frame_.get(), &frame_finished, video_packet.get());\r
                \r
                if(errn < 0)\r
                {\r
@@ -191,27 +120,71 @@ public:
                }\r
                \r
                if(frame_finished != 0)         \r
-                       result.push_back(std::make_pair(frame_number_++, make_write_frame(decoded_frame)));\r
+                       result = get_frames(last_frame_);\r
+\r
+               return result;\r
+       }\r
+\r
+       std::deque<std::pair<int, safe_ptr<core::write_frame>>> get_frames(const safe_ptr<AVFrame>& frame)\r
+       {\r
+               std::deque<std::pair<int, safe_ptr<core::write_frame>>> result;\r
+                       \r
+               if(filter_)\r
+               {\r
+                       auto frames = filter_->execute(frame);\r
+\r
+                       boost::range::transform(frames, std::back_inserter(result), [this](const safe_ptr<AVFrame>& frame)\r
+                       {\r
+                               return std::make_pair(frame_number_, make_write_frame(frame));\r
+                       });\r
+\r
+                       if(!frames.empty())\r
+                               ++frame_number_;\r
+                       else\r
+                               ++filter_delay_;\r
+               }\r
+               else\r
+                       result.push_back(std::make_pair(frame_number_++, make_write_frame(frame)));\r
 \r
                return result;\r
        }\r
 \r
        safe_ptr<core::write_frame> make_write_frame(safe_ptr<AVFrame> decoded_frame)\r
-       {               \r
-               auto write = frame_factory_->create_frame(this, desc_);\r
+       {                       \r
+               // We don't know what the filter output might give until we received the first frame. Initialize everything on first frame.\r
+               auto width   = decoded_frame->width;\r
+               auto height  = decoded_frame->height;\r
+               auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);\r
+               auto desc        = get_pixel_format_desc(pix_fmt, width, height);\r
+                       \r
+               if(desc.pix_fmt == core::pixel_format::invalid)\r
+               {\r
+                       CASPAR_VERIFY(!sws_context_); // Initialize only once. Nothing should change while running;\r
+                       CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
+\r
+                       desc = get_pixel_format_desc(PIX_FMT_BGRA, width, height);\r
+                       double param;\r
+                       sws_context_.reset(sws_getContext(width, height, pix_fmt, width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);\r
+                       if(!sws_context_)\r
+                               BOOST_THROW_EXCEPTION(operation_failed() <<\r
+                                                                         msg_info("Could not create software scaling context.") << \r
+                                                                         boost::errinfo_api_function("sws_getContext"));\r
+               }\r
+\r
+               auto write = frame_factory_->create_frame(this, desc);\r
                write->set_is_interlaced(decoded_frame->interlaced_frame != 0);\r
 \r
                if(sws_context_ == nullptr)\r
                {\r
-                       tbb::parallel_for(0, static_cast<int>(desc_.planes.size()), 1, [&](int n)\r
+                       tbb::parallel_for(0, static_cast<int>(desc.planes.size()), 1, [&](int n)\r
                        {\r
-                               auto plane            = desc_.planes[n];\r
+                               auto plane            = desc.planes[n];\r
                                auto result           = write->image_data(n).begin();\r
                                auto decoded          = decoded_frame->data[n];\r
                                auto decoded_linesize = decoded_frame->linesize[n];\r
                                \r
                                // Copy line by line since ffmpeg sometimes pads each line.\r
-                               tbb::parallel_for(tbb::blocked_range<size_t>(0, static_cast<int>(desc_.planes[n].height)), [&](const tbb::blocked_range<size_t>& r)\r
+                               tbb::parallel_for(tbb::blocked_range<size_t>(0, static_cast<int>(desc.planes[n].height)), [&](const tbb::blocked_range<size_t>& r)\r
                                {\r
                                        for(size_t y = r.begin(); y != r.end(); ++y)\r
                                                memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
@@ -225,22 +198,34 @@ public:
                        // Use sws_scale when provided colorspace has no hw-accel.\r
                        safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free);     \r
                        avcodec_get_frame_defaults(av_frame.get());                     \r
-                       avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width_, height_);\r
+                       avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width, height);\r
                 \r
-                       sws_scale(sws_context_.get(), decoded_frame->data, decoded_frame->linesize, 0, height_, av_frame->data, av_frame->linesize);    \r
+                       sws_scale(sws_context_.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize);     \r
 \r
                        write->commit();\r
                }       \r
 \r
-               // DVVIDEO is in lower field. Make it upper field if needed.\r
-               if(codec_context_.codec_id == CODEC_ID_DVVIDEO && frame_factory_->get_video_format_desc().mode == core::video_mode::upper)\r
-                       write->get_image_transform().set_fill_translation(0.0f, 0.5/static_cast<double>(height_));\r
+               // Fix field-order if needed. DVVIDEO is in lower field. Make it upper field if needed.\r
+               if(decoded_frame->interlaced_frame)\r
+               {\r
+                       switch(frame_factory_->get_video_format_desc().mode)\r
+                       {\r
+                       case core::video_mode::upper:\r
+                               if(!decoded_frame->top_field_first)\r
+                                       write->get_image_transform().set_fill_translation(0.0f, 0.5/static_cast<double>(height));\r
+                               break;\r
+                       case core::video_mode::lower:\r
+                               if(decoded_frame->top_field_first)\r
+                                       write->get_image_transform().set_fill_translation(0.0f, -0.5/static_cast<double>(height));\r
+                               break;\r
+                       }\r
+               }\r
 \r
                return write;\r
        }\r
 };\r
 \r
-video_decoder::video_decoder(input& input, const safe_ptr<core::frame_factory>& frame_factory) : impl_(new implementation(input, frame_factory)){}\r
+video_decoder::video_decoder(input& input, const safe_ptr<core::frame_factory>& frame_factory, const std::string& filter_str) : impl_(new implementation(input, frame_factory, filter_str)){}\r
 std::deque<std::pair<int, safe_ptr<core::write_frame>>> video_decoder::receive(){return impl_->receive();}\r
 \r
 }
\ No newline at end of file
index e3bdf0d1815065c75d3d998a26b7fd61d89ac5da..8efe341e708700415d6a59c6c13c75398ecfb9e6 100644 (file)
@@ -32,7 +32,7 @@ namespace core {
 class video_decoder : boost::noncopyable\r
 {\r
 public:\r
-       explicit video_decoder(input& input, const safe_ptr<core::frame_factory>& frame_factory);\r
+       explicit video_decoder(input& input, const safe_ptr<core::frame_factory>& frame_factory, const std::string& filter_str);\r
        std::deque<std::pair<int, safe_ptr<core::write_frame>>> receive();      \r
 \r
 private:\r
index 12d121975e60b896097c6f99b65c27fc516068b8..2b5cfcf6db363a9f7d828553e11efe7b11922777 100644 (file)
@@ -29,17 +29,18 @@ namespace caspar {
        \r
 struct filter::implementation\r
 {\r
-       const std::string                                               filters_;\r
+       std::string                                                             filters_;\r
        std::shared_ptr<AVFilterGraph>                  graph_;\r
        AVFilterContext*                                                video_in_filter_;\r
        AVFilterContext*                                                video_out_filter_;\r
-       std::deque<std::shared_ptr<AVFrame>>    buffer_;\r
                \r
        implementation(const std::string& filters) \r
                : filters_(filters)\r
-       {}\r
+       {\r
+               std::transform(filters_.begin(), filters_.end(), filters_.begin(), ::tolower);\r
+       }\r
 \r
-       void push(const safe_ptr<AVFrame>& frame)\r
+       std::vector<safe_ptr<AVFrame>> execute(const safe_ptr<AVFrame>& frame)\r
        {               \r
                int errn = 0;   \r
 \r
@@ -110,10 +111,14 @@ struct filter::implementation
                                boost::errinfo_api_function("avfilter_poll_frame") << boost::errinfo_errno(AVUNERROR(errn)));\r
                }\r
 \r
-               std::generate_n(std::back_inserter(buffer_), errn, [&]{return get_frame();});\r
+               std::vector<safe_ptr<AVFrame>> result;\r
+\r
+               std::generate_n(std::back_inserter(result), errn, [&]{return get_frame();});\r
+\r
+               return result;\r
        }\r
                \r
-       std::shared_ptr<AVFrame> get_frame()\r
+       safe_ptr<AVFrame> get_frame()\r
        {               \r
                auto link = video_out_filter_->inputs[0];\r
 \r
@@ -124,9 +129,9 @@ struct filter::implementation
                                boost::errinfo_api_function("avfilter_request_frame") << boost::errinfo_errno(AVUNERROR(errn)));\r
                }\r
                \r
-               auto pic   = reinterpret_cast<AVPicture*>(link->cur_buf->buf);\r
+               auto pic = reinterpret_cast<AVPicture*>(link->cur_buf->buf);\r
                \r
-               std::shared_ptr<AVFrame> frame(avcodec_alloc_frame(), av_free);\r
+               safe_ptr<AVFrame> frame(avcodec_alloc_frame(), av_free);\r
                avcodec_get_frame_defaults(frame.get());        \r
 \r
                for(size_t n = 0; n < 4; ++n)\r
@@ -135,28 +140,19 @@ struct filter::implementation
                        frame->linesize[n]      = pic->linesize[n];\r
                }\r
 \r
-               frame->width    = link->w;\r
-               frame->height   = link->h;\r
-               frame->format   = link->format;\r
+               // FIXME\r
+               frame->width                    = link->cur_buf->video->w;\r
+               frame->height                   = link->cur_buf->video->h;\r
+               frame->format                   = link->cur_buf->format;\r
+               frame->interlaced_frame = link->cur_buf->video->interlaced;\r
+               frame->top_field_first  = link->cur_buf->video->top_field_first;\r
+               frame->key_frame                = link->cur_buf->video->key_frame;\r
 \r
                return frame;\r
        }\r
-       \r
-       bool try_pop(std::shared_ptr<AVFrame>& frame)\r
-       {\r
-               if(buffer_.empty())\r
-                       return false;\r
-\r
-               frame = buffer_.front();\r
-               buffer_.pop_front();\r
-\r
-               return true;\r
-       }\r
 };\r
 \r
 filter::filter(const std::string& filters) : impl_(new implementation(filters)){}\r
-void filter::push(const safe_ptr<AVFrame>& frame) {return impl_->push(frame);}\r
-bool filter::try_pop(std::shared_ptr<AVFrame>& frame){return impl_->try_pop(frame);}\r
-size_t filter::size() const {return impl_->buffer_.size();}\r
+std::vector<safe_ptr<AVFrame>> filter::execute(const safe_ptr<AVFrame>& frame) {return impl_->execute(frame);}\r
 \r
 }
\ No newline at end of file
index 04a49eced19207c2c966d4b593c841153820a739..c964f487a5b7618391a741d9ec384a17e8d2c5e7 100644 (file)
@@ -2,6 +2,8 @@
 \r
 #include <common/memory/safe_ptr.h>\r
 \r
+#include <vector>\r
+\r
 struct AVFrame;\r
 \r
 namespace caspar {\r
@@ -11,9 +13,7 @@ class filter
 public:\r
        filter(const std::string& filters);\r
 \r
-       void push(const safe_ptr<AVFrame>& frame);\r
-       bool try_pop(std::shared_ptr<AVFrame>& frame);\r
-       size_t size() const;\r
+       std::vector<safe_ptr<AVFrame>> execute(const safe_ptr<AVFrame>& frame);\r
 \r
 private:\r
        struct implementation;\r
index 7a4d541272e0e025745f0ab2d0fb9e40902143fa..4f6e99a5d5c16a0c5c3df55a96cb19e67ee6807d 100644 (file)
@@ -1,10 +1,10 @@
 <?xml version="1.0" encoding="utf-8"?>\r
 <configuration>\r
   <paths>\r
-    <media-path>L:\\Casparcg\\_media\\</media-path>\r
-    <log-path>L:\\Casparcg\\_log\\</log-path>\r
-    <data-path>L:\\Casparcg\\_data\\</data-path>\r
-    <template-path>L:\\Casparcg\\_templates\\</template-path>\r
+    <media-path>C:\\Casparcg\\_media\\</media-path>\r
+    <log-path>C:\\Casparcg\\_log\\</log-path>\r
+    <data-path>C:\\Casparcg\\_data\\</data-path>\r
+    <template-path>C:\\Casparcg\\_templates\\</template-path>\r
     <template-host>cg.fth.18</template-host>\r
   </paths>\r
   <diagnostics>\r
   </diagnostics>\r
   <channels>\r
     <channel>\r
-      <video-mode>1080i5000</video-mode>\r
+      <video-mode>PAL</video-mode>\r
       <consumers>\r
-        <decklink>\r
-          <embedded-audio>true</embedded-audio>\r
-          <device>1</device>                      \r
-        </decklink>\r
+        <screen>                      \r
+        </screen>\r
+        <audio></audio>\r
       </consumers>\r
     </channel>\r
 </channels>\r
index b99aa7e0721692f2ee69c0e78926952ab6e60991..9d739a4640a3d326651fa2ec01c78dad9fec2d36 100644 (file)
@@ -200,6 +200,8 @@ int main(int argc, wchar_t* argv[])
                                wcmd = L"MIXER 1-0 VIDEO IS_KEY 1";\r
                        else if(wcmd.substr(0, 1) == L"3")\r
                                wcmd = L"CG 1-2 ADD 1 BBTELEFONARE 1";\r
+                       else if(wcmd.substr(0, 1) == L"4")\r
+                               wcmd = L"PLAY 1-1 DV SEEK 350 FILTER yadif=1:-1 LOOP";\r
                        else if(wcmd.substr(0, 1) == L"X")\r
                        {\r
                                int num = 0;\r