]> git.sesse.net Git - casparcg/commitdiff
[ffmpeg] Ported 2.0.7 ffmpeg producer to 2.1.0 while still keeping the usage of the...
authorHelge Norberg <helge.norberg@svt.se>
Thu, 29 Sep 2016 19:12:44 +0000 (21:12 +0200)
committerHelge Norberg <helge.norberg@svt.se>
Thu, 29 Sep 2016 19:12:44 +0000 (21:12 +0200)
31 files changed:
common/memory.h
common/param.h
core/producer/frame_producer.cpp
core/producer/framerate/framerate_producer.cpp
core/producer/framerate/framerate_producer.h
modules/CMakeLists.txt
modules/decklink/producer/decklink_producer.cpp
modules/ffmpeg/CMakeLists.txt
modules/ffmpeg/ffmpeg.cpp
modules/ffmpeg/ffmpeg.h
modules/ffmpeg/ffmpeg_pipeline.cpp [deleted file]
modules/ffmpeg/ffmpeg_pipeline.h [deleted file]
modules/ffmpeg/ffmpeg_pipeline_backend.h [deleted file]
modules/ffmpeg/ffmpeg_pipeline_backend_internal.cpp [deleted file]
modules/ffmpeg/ffmpeg_pipeline_backend_internal.h [deleted file]
modules/ffmpeg/producer/audio/audio_decoder.cpp
modules/ffmpeg/producer/audio/audio_decoder.h
modules/ffmpeg/producer/ffmpeg_producer.cpp
modules/ffmpeg/producer/filter/audio_filter.cpp
modules/ffmpeg/producer/filter/filter.cpp
modules/ffmpeg/producer/filter/filter.h
modules/ffmpeg/producer/input/input.cpp
modules/ffmpeg/producer/input/input.h
modules/ffmpeg/producer/muxer/display_mode.h
modules/ffmpeg/producer/muxer/frame_muxer.cpp
modules/ffmpeg/producer/muxer/frame_muxer.h
modules/ffmpeg/producer/util/util.cpp
modules/ffmpeg/producer/util/util.h
modules/ffmpeg/producer/video/video_decoder.cpp
modules/ffmpeg/producer/video/video_decoder.h
shell/casparcg.config

index 35127f674a5845afb653a1581f28fe6367d12303..810db56f024ca2e491c5a8385ab5853c7dd0d5b0 100644 (file)
@@ -727,6 +727,18 @@ shared_ptr<T> make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5,
        return shared_ptr<T>(std::make_shared<T>(std::forward<P0>(p0), std::forward<P1>(p1), std::forward<P2>(p2), std::forward<P3>(p3), std::forward<P4>(p4), std::forward<P5>(p5), std::forward<P6>(p6), std::forward<P7>(p7), std::forward<P8>(p8), std::forward<P9>(p9)));
 }
 
+template<typename T, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9, typename P10>
+shared_ptr<T> make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5, P6&& p6, P7&& p7, P8&& p8, P9&& p9, P10&& p10)
+{
+       return shared_ptr<T>(std::make_shared<T>(std::forward<P0>(p0), std::forward<P1>(p1), std::forward<P2>(p2), std::forward<P3>(p3), std::forward<P4>(p4), std::forward<P5>(p5), std::forward<P6>(p6), std::forward<P7>(p7), std::forward<P8>(p8), std::forward<P9>(p9), std::forward<P10>(p10)));
+}
+
+template<typename T, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9, typename P10, typename P11>
+shared_ptr<T> make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5, P6&& p6, P7&& p7, P8&& p8, P9&& p9, P10&& p10, P11&& p11)
+{
+       return shared_ptr<T>(std::make_shared<T>(std::forward<P0>(p0), std::forward<P1>(p1), std::forward<P2>(p2), std::forward<P3>(p3), std::forward<P4>(p4), std::forward<P5>(p5), std::forward<P6>(p6), std::forward<P7>(p7), std::forward<P8>(p8), std::forward<P9>(p9), std::forward<P10>(p10), std::forward<P11>(p11)));
+}
+
 template<typename T>
 shared_ptr<T>::shared_ptr() 
     : p_(make_shared<T>())
index 78d23c2071ed13b5423720dccdbd8523bccbe0eb..2a0f63224b81277d30f8ad909c8744fd76e42265 100644 (file)
@@ -31,6 +31,25 @@ void replace_placeholders(const std::wstring& placeholder, const std::wstring& r
                boost::ireplace_all(param, placeholder, replacement);
 }
 
+static std::vector<std::wstring> protocol_split(const std::wstring& s)
+{
+       std::vector<std::wstring> result;
+       size_t pos;
+
+       if ((pos = s.find(L"://")) != std::wstring::npos)
+       {
+               result.push_back(s.substr(0, pos));
+               result.push_back(s.substr(pos + 3));
+       }
+       else
+       {
+               result.push_back(L"");
+               result.push_back(s);
+       }
+
+       return result;
+}
+
 template<typename T, typename C>
 typename std::enable_if<!std::is_convertible<T, std::wstring>::value, typename std::decay<T>::type>::type get_param(const std::wstring& name, C&& params, T fail_value = T())
 {      
index 2b78c61778998ba10c57bf77552893eb2a830224..46ca3e4162f7a8cc682e4f568ff3eaa2e263dbda 100644 (file)
@@ -39,7 +39,6 @@
 #include <boost/thread.hpp>
 
 namespace caspar { namespace core {
-
 struct frame_producer_registry::impl
 {
        std::vector<producer_factory_t>         producer_factories;
@@ -103,7 +102,7 @@ struct frame_producer_base::impl
                frame_number_ = 0;
                paused_ = false;
        }
-       
+
        draw_frame receive()
        {
                if(paused_)
@@ -148,7 +147,7 @@ draw_frame frame_producer_base::last_frame()
        return impl_->last_frame();
 }
 
-std::future<std::wstring> frame_producer_base::call(const std::vector<std::wstring>&) 
+std::future<std::wstring> frame_producer_base::call(const std::vector<std::wstring>&)
 {
        CASPAR_THROW_EXCEPTION(not_supported());
 }
@@ -176,7 +175,7 @@ const std::vector<std::wstring>& frame_producer_base::get_variables() const
        return empty;
 }
 
-const spl::shared_ptr<frame_producer>& frame_producer::empty() 
+const spl::shared_ptr<frame_producer>& frame_producer::empty()
 {
        class empty_frame_producer : public frame_producer
        {
@@ -186,7 +185,7 @@ const spl::shared_ptr<frame_producer>& frame_producer::empty()
                void paused(bool value) override{}
                uint32_t nb_frames() const override {return 0;}
                std::wstring print() const override { return L"empty";}
-               monitor::subject& monitor_output() override {static monitor::subject monitor_subject(""); return monitor_subject;}                                                                              
+               monitor::subject& monitor_output() override {static monitor::subject monitor_subject(""); return monitor_subject;}
                std::wstring name() const override {return L"empty";}
                uint32_t frame_number() const override {return 0;}
                std::future<std::wstring> call(const std::vector<std::wstring>& params) override{CASPAR_THROW_EXCEPTION(not_implemented());}
@@ -194,7 +193,7 @@ const spl::shared_ptr<frame_producer>& frame_producer::empty()
                const std::vector<std::wstring>& get_variables() const override { static std::vector<std::wstring> empty; return empty; }
                draw_frame last_frame() {return draw_frame::empty();}
                constraints& pixel_constraints() override { static constraints c; return c; }
-       
+
                boost::property_tree::wptree info() const override
                {
                        boost::property_tree::wptree info;
@@ -220,10 +219,10 @@ void destroy_producers_synchronously()
 }
 
 class destroy_producer_proxy : public frame_producer
-{      
+{
        std::shared_ptr<frame_producer> producer_;
 public:
-       destroy_producer_proxy(spl::shared_ptr<frame_producer>&& producer) 
+       destroy_producer_proxy(spl::shared_ptr<frame_producer>&& producer)
                : producer_(std::move(producer))
        {
                destroy_producers_in_separate_thread() = true;
@@ -234,13 +233,13 @@ public:
                static tbb::atomic<int> counter;
                static std::once_flag counter_init_once;
                std::call_once(counter_init_once, []{ counter = 0; });
-               
+
                if(producer_ == core::frame_producer::empty() || !destroy_producers_in_separate_thread())
                        return;
 
                ++counter;
                CASPAR_VERIFY(counter < 8);
-               
+
                auto producer = new spl::shared_ptr<frame_producer>(std::move(producer_));
                boost::thread([=]
                {
@@ -256,7 +255,7 @@ public:
                                        CASPAR_LOG(debug) << str << L" Destroying on asynchronous destruction thread.";
                        }
                        catch(...){}
-                       
+
                        try
                        {
                                pointer_guard.reset();
@@ -268,9 +267,9 @@ public:
                        }
 
                        --counter;
-               }).detach(); 
+               }).detach();
        }
-       
+
        draw_frame                                                                                      receive() override                                                                                                                                                                                                              {return producer_->receive();}
        std::wstring                                                                            print() const override                                                                                                                  {return producer_->print();}
        void                                                                                            paused(bool value) override                                                                                                             {producer_->paused(value);}
@@ -283,7 +282,7 @@ public:
        void                                                                                            leading_producer(const spl::shared_ptr<frame_producer>& producer) override              {return producer_->leading_producer(producer);}
        uint32_t                                                                                        nb_frames() const override                                                                                                              {return producer_->nb_frames();}
        draw_frame                                                                                      last_frame()                                                                                                                                    {return producer_->last_frame();}
-       monitor::subject&                                                                       monitor_output() override                                                                                                               {return producer_->monitor_output();}                                                                           
+       monitor::subject&                                                                       monitor_output() override                                                                                                               {return producer_->monitor_output();}
        bool                                                                                            collides(double x, double y) const override                                                                             {return producer_->collides(x, y);}
        void                                                                                            on_interaction(const interaction_event::ptr& event)     override                                        {return producer_->on_interaction(event);}
        constraints&                                                                            pixel_constraints() override                                                                                                    {return producer_->pixel_constraints();}
@@ -298,7 +297,7 @@ spl::shared_ptr<core::frame_producer> do_create_producer(const frame_producer_de
 {
        if(params.empty())
                CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info("params cannot be empty"));
-       
+
        auto producer = frame_producer::empty();
        std::any_of(factories.begin(), factories.end(), [&](const producer_factory_t& factory) -> bool
                {
@@ -325,7 +324,7 @@ spl::shared_ptr<core::frame_producer> do_create_producer(const frame_producer_de
 
        if(producer == frame_producer::empty())
                return producer;
-               
+
        return producer;
 }
 
@@ -356,38 +355,41 @@ draw_frame frame_producer_registry::create_thumbnail(const frame_producer_depend
 
        if (key_frame == draw_frame::empty())
                key_frame = do_create_thumbnail_frame(dependencies, media_file + L"_ALPHA", thumbnail_producers);
-  
+
        if (fill_frame != draw_frame::empty() && key_frame != draw_frame::empty())
                return draw_frame::mask(fill_frame, key_frame);
-  
+
        return fill_frame;
 }
 
 spl::shared_ptr<core::frame_producer> frame_producer_registry::create_producer(const frame_producer_dependencies& dependencies, const std::vector<std::wstring>& params) const
-{      
+{
        auto& producer_factories = impl_->producer_factories;
        auto producer = do_create_producer(dependencies, params, producer_factories);
        auto key_producer = frame_producer::empty();
-       
-       try // to find a key file.
+
+       if (!params.empty() && !boost::contains(params.at(0), L"://"))
        {
-               auto params_copy = params;
-               if(params_copy.size() > 0)
+               try // to find a key file.
                {
-                       params_copy[0] += L"_A";
-                       key_producer = do_create_producer(dependencies, params_copy, producer_factories);
-                       if(key_producer == frame_producer::empty())
+                       auto params_copy = params;
+                       if (params_copy.size() > 0)
                        {
-                               params_copy[0] += L"LPHA";
+                               params_copy[0] += L"_A";
                                key_producer = do_create_producer(dependencies, params_copy, producer_factories);
+                               if (key_producer == frame_producer::empty())
+                               {
+                                       params_copy[0] += L"LPHA";
+                                       key_producer = do_create_producer(dependencies, params_copy, producer_factories);
+                               }
                        }
                }
+               catch (...) {}
        }
-       catch(...){}
 
        if(producer != frame_producer::empty() && key_producer != frame_producer::empty())
                return create_separated_producer(producer, key_producer);
-       
+
        if(producer == frame_producer::empty())
        {
                std::wstring str;
@@ -408,5 +410,4 @@ spl::shared_ptr<core::frame_producer> frame_producer_registry::create_producer(c
        std::copy(iterator(iss),  iterator(), std::back_inserter(tokens));
        return create_producer(dependencies, tokens);
 }
-
 }}
index 7b4fb3179a755b27b09c68acb1c1ce2512571d76..d84b1f1d39e4c38482039a1679dae7c7ff8bf68f 100644 (file)
@@ -190,8 +190,11 @@ public:
 class framerate_producer : public frame_producer_base
 {
        spl::shared_ptr<frame_producer>                                         source_;
-       boost::rational<int>                                                            source_framerate_;
-       audio_channel_layout                                                            source_channel_layout_          = audio_channel_layout::invalid();
+       std::function<boost::rational<int>()>                           get_source_framerate_;
+       boost::rational<int>                                                            source_framerate_                               = -1;
+       audio_channel_layout                                                            source_channel_layout_                  = audio_channel_layout::invalid();
+       boost::rational<int>                                                            original_destination_framerate_;
+       field_mode                                                                                      original_destination_fieldmode_;
        boost::rational<int>                                                            destination_framerate_;
        field_mode                                                                                      destination_fieldmode_;
        std::vector<int>                                                                        destination_audio_cadence_;
@@ -200,73 +203,28 @@ class framerate_producer : public frame_producer_base
        std::function<draw_frame (
                        const draw_frame& source,
                        const draw_frame& destination,
-                       const boost::rational<int64_t>& distance)>      interpolator_                           = drop_and_skip;
+                       const boost::rational<int64_t>& distance)>      interpolator_                                   = drop_and_skip;
        
-       boost::rational<std::int64_t>                                           current_frame_number_           = 0;
-       draw_frame                                                                                      previous_frame_                         = draw_frame::empty();
-       draw_frame                                                                                      next_frame_                                     = draw_frame::empty();
+       boost::rational<std::int64_t>                                           current_frame_number_                   = 0;
+       draw_frame                                                                                      previous_frame_                                 = draw_frame::empty();
+       draw_frame                                                                                      next_frame_                                             = draw_frame::empty();
        mutable_audio_buffer                                                            audio_samples_;
 
-       unsigned int                                                                            output_repeat_                          = 0;
-       unsigned int                                                                            output_frame_                           = 0;
+       unsigned int                                                                            output_repeat_                                  = 0;
+       unsigned int                                                                            output_frame_                                   = 0;
 public:
        framerate_producer(
                        spl::shared_ptr<frame_producer> source,
-                       boost::rational<int> source_framerate,
+                       std::function<boost::rational<int> ()> get_source_framerate,
                        boost::rational<int> destination_framerate,
                        field_mode destination_fieldmode,
                        std::vector<int> destination_audio_cadence)
                : source_(std::move(source))
-               , source_framerate_(std::move(source_framerate))
-               , destination_framerate_(std::move(destination_framerate))
-               , destination_fieldmode_(destination_fieldmode)
+               , get_source_framerate_(std::move(get_source_framerate))
+               , original_destination_framerate_(std::move(destination_framerate))
+               , original_destination_fieldmode_(destination_fieldmode)
                , destination_audio_cadence_(std::move(destination_audio_cadence))
        {
-               // Coarse adjustment to correct fps family (23.98 - 30 vs 47.95 - 60)
-               if (destination_fieldmode_ != field_mode::progressive)  // Interlaced output
-               {
-                       auto diff_double        = boost::abs(source_framerate_ - destination_framerate_ * 2);
-                       auto diff_keep          = boost::abs(source_framerate_ - destination_framerate_);
-
-                       if (diff_double < diff_keep)                                            // Double rate interlaced
-                       {
-                               destination_framerate_ *= 2;
-                       }
-                       else                                                                                            // Progressive non interlaced
-                       {
-                               destination_fieldmode_ = field_mode::progressive;
-                       }
-               }
-               else                                                                                                    // Progressive
-               {
-                       auto diff_halve = boost::abs(source_framerate_ * 2      - destination_framerate_);
-                       auto diff_keep  = boost::abs(source_framerate_          - destination_framerate_);
-
-                       if (diff_halve < diff_keep)                                                     // Repeat every frame two times
-                       {
-                               destination_framerate_  /= 2;
-                               output_repeat_                  = 2;
-                       }
-               }
-
-               speed_ = boost::rational<int64_t>(source_framerate_ / destination_framerate_);
-
-               // drop_and_skip will only be used by default for exact framerate multiples (half, same and double)
-               // for all other framerates a frame interpolator will be chosen.
-               if (speed_ != 1 && speed_ * 2 != 1 && speed_ != 2)
-               {
-                       auto high_source_framerate              = source_framerate_ > 47;
-                       auto high_destination_framerate = destination_framerate_ > 47
-                                       || destination_fieldmode_ != field_mode::progressive;
-
-                       if (high_source_framerate && high_destination_framerate)        // The bluriness of blend_all is acceptable on high framerates.
-                               interpolator_ = blend_all();
-                       else                                                                                                            // blend_all is mostly too blurry on low framerates. blend provides a compromise.
-                               interpolator_ = &blend;
-
-                       CASPAR_LOG(warning) << source_->print() << L" Frame blending frame rate conversion required to conform to channel frame rate.";
-               }
-
                // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
                // This cadence fills the audio mixer most optimally.
                boost::range::rotate(destination_audio_cadence_, std::end(destination_audio_cadence_) - 1);
@@ -433,6 +391,7 @@ private:
        draw_frame pop_frame_from_source()
        {
                auto frame = source_->receive();
+               update_source_framerate();
 
                if (user_speed_.fetch() == 1)
                {
@@ -512,6 +471,63 @@ private:
                                || user_speed_.fetch() != 1
                                || audio_samples_.size() / source_channel_layout_.num_channels >= destination_audio_cadence_.at(0);
        }
+
+       void update_source_framerate()
+       {
+               auto source_framerate = get_source_framerate_();
+
+               if (source_framerate_ == source_framerate)
+                       return;
+
+               source_framerate_               = source_framerate;
+               destination_framerate_  = original_destination_framerate_;
+               destination_fieldmode_  = original_destination_fieldmode_;
+
+               // Coarse adjustment to correct fps family (23.98 - 30 vs 47.95 - 60)
+               if (destination_fieldmode_ != field_mode::progressive)  // Interlaced output
+               {
+                       auto diff_double        = boost::abs(source_framerate_ - destination_framerate_ * 2);
+                       auto diff_keep          = boost::abs(source_framerate_ - destination_framerate_);
+
+                       if (diff_double < diff_keep)                                            // Double rate interlaced
+                       {
+                               destination_framerate_ *= 2;
+                       }
+                       else                                                                                            // Progressive non interlaced
+                       {
+                               destination_fieldmode_ = field_mode::progressive;
+                       }
+               }
+               else                                                                                                    // Progressive
+               {
+                       auto diff_halve = boost::abs(source_framerate_ * 2      - destination_framerate_);
+                       auto diff_keep  = boost::abs(source_framerate_          - destination_framerate_);
+
+                       if (diff_halve < diff_keep)                                                     // Repeat every frame two times
+                       {
+                               destination_framerate_  /= 2;
+                               output_repeat_                  = 2;
+                       }
+               }
+
+               speed_ = boost::rational<int64_t>(source_framerate_ / destination_framerate_);
+
+               // drop_and_skip will only be used by default for exact framerate multiples (half, same and double)
+               // for all other framerates a frame interpolator will be chosen.
+               if (speed_ != 1 && speed_ * 2 != 1 && speed_ != 2)
+               {
+                       auto high_source_framerate              = source_framerate_ > 47;
+                       auto high_destination_framerate = destination_framerate_ > 47
+                                       || destination_fieldmode_ != field_mode::progressive;
+
+                       if (high_source_framerate && high_destination_framerate)        // The bluriness of blend_all is acceptable on high framerates.
+                               interpolator_ = blend_all();
+                       else                                                                                                            // blend_all is mostly too blurry on low framerates. blend provides a compromise.
+                               interpolator_ = &blend;
+
+                       CASPAR_LOG(warning) << source_->print() << L" Frame blending frame rate conversion required to conform to channel frame rate.";
+               }
+       }
 };
 
 void describe_framerate_producer(help_sink& sink)
@@ -528,14 +544,14 @@ void describe_framerate_producer(help_sink& sink)
 
 spl::shared_ptr<frame_producer> create_framerate_producer(
                spl::shared_ptr<frame_producer> source,
-               boost::rational<int> source_framerate,
+               std::function<boost::rational<int> ()> get_source_framerate,
                boost::rational<int> destination_framerate,
                field_mode destination_fieldmode,
                std::vector<int> destination_audio_cadence)
 {
        return spl::make_shared<framerate_producer>(
                        std::move(source),
-                       std::move(source_framerate),
+                       std::move(get_source_framerate),
                        std::move(destination_framerate),
                        destination_fieldmode,
                        std::move(destination_audio_cadence));
index f0995310d1fa97afd2da306b724144927faee59d..f742e484cccad27054c79c97d58043719775e006 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <string>
 #include <vector>
+#include <functional>
 
 #include <boost/rational.hpp>
 
@@ -38,7 +39,7 @@ void describe_framerate_producer(help_sink& sink);
 
 spl::shared_ptr<frame_producer> create_framerate_producer(
                spl::shared_ptr<frame_producer> source,
-               boost::rational<int> source_framerate,
+               std::function<boost::rational<int> ()> get_source_framerate, // Will be called after first receive() on the source
                boost::rational<int> destination_framerate,
                field_mode destination_fieldmode,
                std::vector<int> destination_audio_cadence);
index 823188659137ab15842a7ccf92e68dbd48ef0f4c..8dd4a84209b59da78955e588df63cb5f3c7294b2 100644 (file)
@@ -1,6 +1,7 @@
 cmake_minimum_required (VERSION 2.6)
 project ("modules")
 
+add_subdirectory(reroute)
 add_subdirectory(ffmpeg)
 add_subdirectory(oal)
 
@@ -19,5 +20,4 @@ if (MSVC)
 endif ()
 
 add_subdirectory(image)
-add_subdirectory(reroute)
 
index 5200a6900c72f5c4568faad00657d7ed0d343dac..e08b3a6d5d52462f1051eabcf75f666dd3ff9780 100644 (file)
@@ -43,6 +43,7 @@
 #include <core/frame/frame_transform.h>
 #include <core/frame/frame_factory.h>
 #include <core/producer/frame_producer.h>
+#include <core/producer/framerate/framerate_producer.h>
 #include <core/monitor/monitor.h>
 #include <core/diagnostics/call_context.h>
 #include <core/mixer/audio/audio_mixer.h>
@@ -59,7 +60,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #define __STDC_CONSTANT_MACROS
        #define __STDC_LIMIT_MACROS
@@ -74,7 +75,6 @@ extern "C"
 #include <functional>
 
 namespace caspar { namespace decklink {
-
 core::audio_channel_layout get_adjusted_channel_layout(core::audio_channel_layout layout)
 {
        if (layout.num_channels <= 2)
@@ -92,9 +92,9 @@ std::wstring to_string(const T& cadence)
 {
        return boost::join(cadence | boost::adaptors::transformed([](size_t i) { return boost::lexical_cast<std::wstring>(i); }), L", ");
 }
-               
+
 class decklink_producer : boost::noncopyable, public IDeckLinkInputCallback
-{      
+{
        const int                                                                               device_index_;
        core::monitor::subject                                                  monitor_subject_;
        spl::shared_ptr<diagnostics::graph>                             graph_;
@@ -103,29 +103,30 @@ class decklink_producer : boost::noncopyable, public IDeckLinkInputCallback
        com_ptr<IDeckLink>                                                              decklink_                       = get_device(device_index_);
        com_iface_ptr<IDeckLinkInput>                                   input_                          = iface_cast<IDeckLinkInput>(decklink_);
        com_iface_ptr<IDeckLinkAttributes>                              attributes_                     = iface_cast<IDeckLinkAttributes>(decklink_);
-       
+
        const std::wstring                                                              model_name_                     = get_model_name(decklink_);
        const std::wstring                                                              filter_;
-       
+
        core::video_format_desc                                                 in_format_desc_;
        core::video_format_desc                                                 out_format_desc_;
-       std::vector<int>                                                                audio_cadence_          = out_format_desc_.audio_cadence;
+       std::vector<int>                                                                audio_cadence_          = in_format_desc_.audio_cadence;
        boost::circular_buffer<size_t>                                  sync_buffer_            { audio_cadence_.size() };
        spl::shared_ptr<core::frame_factory>                    frame_factory_;
        core::audio_channel_layout                                              channel_layout_;
-       ffmpeg::frame_muxer                                                             muxer_                          { in_format_desc_.fps, frame_factory_, out_format_desc_, channel_layout_, filter_ };
-                       
+       ffmpeg::frame_muxer                                                             muxer_                          { in_format_desc_.framerate, frame_factory_, out_format_desc_, channel_layout_, filter_, ffmpeg::filter::is_deinterlacing(filter_) };
+
        core::constraints                                                               constraints_            { in_format_desc_.width, in_format_desc_.height };
 
        tbb::concurrent_bounded_queue<core::draw_frame> frame_buffer_;
+       core::draw_frame                                                                last_frame_                     = core::draw_frame::empty();
 
        std::exception_ptr                                                              exception_;
 
 public:
        decklink_producer(
-                       const core::video_format_desc& in_format_desc, 
-                       int device_index, 
-                       const spl::shared_ptr<core::frame_factory>& frame_factory, 
+                       const core::video_format_desc& in_format_desc,
+                       int device_index,
+                       const spl::shared_ptr<core::frame_factory>& frame_factory,
                        const core::video_format_desc& out_format_desc,
                        const core::audio_channel_layout& channel_layout,
                        const std::wstring& filter)
@@ -136,8 +137,8 @@ public:
                , frame_factory_(frame_factory)
                , channel_layout_(get_adjusted_channel_layout(channel_layout))
        {
-               frame_buffer_.set_capacity(2);
-               
+               frame_buffer_.set_capacity(4);
+
                graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
                graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f));
                graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));
@@ -145,13 +146,13 @@ public:
                graph_->set_color("output-buffer", diagnostics::color(0.0f, 1.0f, 0.0f));
                graph_->set_text(print());
                diagnostics::register_graph(graph_);
-               
+
                bool will_attempt_dma;
                auto display_mode = get_display_mode(input_, in_format_desc.format, bmdFormat8BitYUV, bmdVideoInputFlagDefault, will_attempt_dma);
-               
+
                // NOTE: bmdFormat8BitARGB is currently not supported by any decklink card. (2011-05-08)
-               if(FAILED(input_->EnableVideoInput(display_mode, bmdFormat8BitYUV, 0))) 
-                       CASPAR_THROW_EXCEPTION(caspar_exception() 
+               if(FAILED(input_->EnableVideoInput(display_mode, bmdFormat8BitYUV, 0)))
+                       CASPAR_THROW_EXCEPTION(caspar_exception()
                                                                        << msg_info(print() + L" Could not enable video input.")
                                                                        << boost::errinfo_api_function("EnableVideoInput"));
 
@@ -159,14 +160,14 @@ public:
                        CASPAR_THROW_EXCEPTION(caspar_exception()
                                                                        << msg_info(print() + L" Could not enable audio input.")
                                                                        << boost::errinfo_api_function("EnableAudioInput"));
-                       
+
                if (FAILED(input_->SetCallback(this)) != S_OK)
-                       CASPAR_THROW_EXCEPTION(caspar_exception() 
+                       CASPAR_THROW_EXCEPTION(caspar_exception()
                                                                        << msg_info(print() + L" Failed to set input callback.")
                                                                        << boost::errinfo_api_function("SetCallback"));
-                       
+
                if(FAILED(input_->StartStreams()))
-                       CASPAR_THROW_EXCEPTION(caspar_exception() 
+                       CASPAR_THROW_EXCEPTION(caspar_exception()
                                                                        << msg_info(print() + L" Failed to start input stream.")
                                                                        << boost::errinfo_api_function("StartStreams"));
 
@@ -175,7 +176,7 @@ public:
 
        ~decklink_producer()
        {
-               if(input_ != nullptr) 
+               if(input_ != nullptr)
                {
                        input_->StopStreams();
                        input_->DisableVideoInput();
@@ -190,14 +191,15 @@ public:
        virtual HRESULT STDMETHODCALLTYPE       QueryInterface (REFIID, LPVOID*)        {return E_NOINTERFACE;}
        virtual ULONG STDMETHODCALLTYPE         AddRef ()                                                       {return 1;}
        virtual ULONG STDMETHODCALLTYPE         Release ()                                                      {return 1;}
-               
+
        virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents /*notificationEvents*/, IDeckLinkDisplayMode* newDisplayMode, BMDDetectedVideoInputFormatFlags /*detectedSignalFlags*/)
        {
                return S_OK;
        }
 
        virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame* video, IDeckLinkAudioInputPacket* audio)
-       {       
+       {
+               ensure_gpf_handler_installed_for_thread("decklink-VideoInputFrameArrived");
                if(!video)
                        return S_OK;
 
@@ -207,23 +209,24 @@ public:
                        tick_timer_.restart();
 
                        caspar::timer frame_timer;
-                       
+
                        // Video
 
                        void* video_bytes = nullptr;
                        if(FAILED(video->GetBytes(&video_bytes)) || !video_bytes)
                                return S_OK;
-                       
+
                        auto video_frame = ffmpeg::create_frame();
-                                               
+
                        video_frame->data[0]                    = reinterpret_cast<uint8_t*>(video_bytes);
-                       video_frame->linesize[0]                = video->GetRowBytes();                 
+                       video_frame->linesize[0]                = video->GetRowBytes();
                        video_frame->format                             = PIX_FMT_UYVY422;
                        video_frame->width                              = video->GetWidth();
                        video_frame->height                             = video->GetHeight();
                        video_frame->interlaced_frame   = in_format_desc_.field_mode != core::field_mode::progressive;
                        video_frame->top_field_first    = in_format_desc_.field_mode == core::field_mode::upper ? 1 : 0;
-                               
+                       video_frame->key_frame                  = 1;
+
                        monitor_subject_
                                        << core::monitor::message("/file/name")                                 % model_name_
                                        << core::monitor::message("/file/path")                                 % device_index_
@@ -237,67 +240,57 @@ public:
 
                        // Audio
 
-                       auto audio_frame = ffmpeg::create_frame();
-                       audio_frame->format = AV_SAMPLE_FMT_S32;
-                       core::mutable_audio_buffer audio_buf;
+                       std::shared_ptr<core::mutable_audio_buffer>     audio_buffer;
+                       void*                                                                           audio_bytes             = nullptr;
 
-                       if (audio)
+                       // It is assumed that audio is always equal or ahead of video.
+                       if (audio && SUCCEEDED(audio->GetBytes(&audio_bytes)) && audio_bytes)
                        {
-                               void* audio_bytes = nullptr;
-                               if (FAILED(audio->GetBytes(&audio_bytes)) || !audio_bytes)
-                                       return S_OK;
-
+                               auto sample_frame_count = audio->GetSampleFrameCount();
+                               auto audio_data = reinterpret_cast<int32_t*>(audio_bytes);
 
-                               audio_frame->data[0] = reinterpret_cast<uint8_t*>(audio_bytes);
-                               audio_frame->linesize[0] = audio->GetSampleFrameCount() * channel_layout_.num_channels * sizeof(int32_t);
-                               audio_frame->nb_samples = audio->GetSampleFrameCount();
+                               audio_buffer = std::make_shared<core::mutable_audio_buffer>(
+                                       audio_data,
+                                       audio_data + sample_frame_count * channel_layout_.num_channels);
                        }
                        else
-                       {
-                               audio_buf.resize(audio_cadence_.front() * channel_layout_.num_channels, 0);
-                               audio_frame->data[0] = reinterpret_cast<uint8_t*>(audio_buf.data());
-                               audio_frame->linesize[0] = audio_cadence_.front() * channel_layout_.num_channels * sizeof(int32_t);
-                               audio_frame->nb_samples = audio_cadence_.front();
-                       }
-                                               
+                               audio_buffer = std::make_shared<core::mutable_audio_buffer>(audio_cadence_.front() * channel_layout_.num_channels, 0);
+
                        // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
                        // This cadence fills the audio mixer most optimally.
 
-                       sync_buffer_.push_back(audio_frame->nb_samples);
+                       sync_buffer_.push_back(audio_buffer->size() / channel_layout_.num_channels);
                        if(!boost::range::equal(sync_buffer_, audio_cadence_))
                        {
                                CASPAR_LOG(trace) << print() << L" Syncing audio. Expected cadence: " << to_string(audio_cadence_) << L" Got cadence: " << to_string(sync_buffer_);
                                return S_OK;
                        }
                        boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);
-                       
+
                        // PUSH
 
-                       muxer_.push_video(video_frame);
-                       muxer_.push_audio(audio_frame);
-                       
+                       muxer_.push(audio_buffer);
+                       muxer_.push(static_cast<std::shared_ptr<AVFrame>>(video_frame));
+
                        // POLL
 
-                       auto frame = core::draw_frame::late();
-                       if(!muxer_.empty())
+                       for (auto frame = muxer_.poll(); frame != core::draw_frame::empty(); frame = muxer_.poll())
                        {
-                               frame = std::move(muxer_.front());
-                               muxer_.pop();
-
-                               if(!frame_buffer_.try_push(frame))
+                               if (!frame_buffer_.try_push(frame))
                                {
                                        auto dummy = core::draw_frame::empty();
                                        frame_buffer_.try_pop(dummy);
+
                                        frame_buffer_.try_push(frame);
-                                               
+
                                        graph_->set_tag(diagnostics::tag_severity::WARNING, "dropped-frame");
                                }
                        }
-                       
-                       graph_->set_value("frame-time", frame_timer.elapsed()*out_format_desc_.fps*0.5);        
+
+                       graph_->set_value("frame-time", frame_timer.elapsed()*out_format_desc_.fps*0.5);
                        monitor_subject_ << core::monitor::message("/profiler/time") % frame_timer.elapsed() % out_format_desc_.fps;
 
-                       graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity()));      
+                       graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity()));
                        monitor_subject_ << core::monitor::message("/buffer") % frame_buffer_.size() % frame_buffer_.capacity();
                }
                catch(...)
@@ -308,32 +301,42 @@ public:
 
                return S_OK;
        }
-       
+
        core::draw_frame get_frame()
        {
                if(exception_ != nullptr)
                        std::rethrow_exception(exception_);
-               
-               core::draw_frame frame = core::draw_frame::late();
-               if(!frame_buffer_.try_pop(frame))
+
+               core::draw_frame frame = last_frame_;
+
+               if (!frame_buffer_.try_pop(frame))
                        graph_->set_tag(diagnostics::tag_severity::WARNING, "late-frame");
-               graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity()));      
+               else
+                       last_frame_ = frame;
+
+               graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size()) / static_cast<float>(frame_buffer_.capacity()));
+
                return frame;
        }
-       
+
        std::wstring print() const
        {
                return model_name_ + L" [" + boost::lexical_cast<std::wstring>(device_index_) + L"|" + in_format_desc_.name + L"]";
        }
 
+       boost::rational<int> get_out_framerate() const
+       {
+               return muxer_.out_framerate();
+       }
+
        core::monitor::subject& monitor_output()
        {
                return monitor_subject_;
        }
 };
-       
+
 class decklink_producer_proxy : public core::frame_producer_base
-{              
+{
        std::unique_ptr<decklink_producer>      producer_;
        const uint32_t                                          length_;
        executor                                                        executor_;
@@ -359,7 +362,7 @@ public:
        }
 
        ~decklink_producer_proxy()
-       {               
+       {
                executor_.invoke([=]
                {
                        producer_.reset();
@@ -371,11 +374,11 @@ public:
        {
                return producer_->monitor_output();
        }
-       
+
        // frame_producer
-                               
+
        core::draw_frame receive_impl() override
-       {               
+       {
                return producer_->get_frame();
        }
 
@@ -383,17 +386,17 @@ public:
        {
                return producer_->pixel_constraints();
        }
-                       
+
        uint32_t nb_frames() const override
        {
                return length_;
        }
-       
+
        std::wstring print() const override
        {
                return producer_->print();
        }
-       
+
        std::wstring name() const override
        {
                return L"decklink";
@@ -405,6 +408,11 @@ public:
                info.add(L"type", L"decklink");
                return info;
        }
+
+       boost::rational<int> get_out_framerate() const
+       {
+               return producer_->get_out_framerate();
+       }
 };
 
 void describe_producer(core::help_sink& sink, const core::help_repository& repo)
@@ -433,11 +441,11 @@ spl::shared_ptr<core::frame_producer> create_producer(const core::frame_producer
        auto device_index       = get_param(L"DEVICE", params, -1);
        if(device_index == -1)
                device_index = boost::lexical_cast<int>(params.at(1));
-       
-       auto filter_str         = get_param(L"FILTER", params);         
-       auto length                     = get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max());   
+
+       auto filter_str         = get_param(L"FILTER", params);
+       auto length                     = get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max());
        auto in_format_desc = core::video_format_desc(get_param(L"FORMAT", params, L"INVALID"));
-               
+
        if(in_format_desc.format == core::video_format::invalid)
                in_format_desc = dependencies.format_desc;
 
@@ -453,15 +461,28 @@ spl::shared_ptr<core::frame_producer> create_producer(const core::frame_producer
 
                channel_layout = *found_layout;
        }
-                       
-       return create_destroy_proxy(spl::make_shared<decklink_producer_proxy>(
+
+       boost::ireplace_all(filter_str, L"DEINTERLACE_BOB",     L"YADIF=1:-1");
+       boost::ireplace_all(filter_str, L"DEINTERLACE_LQ",      L"SEPARATEFIELDS");
+       boost::ireplace_all(filter_str, L"DEINTERLACE",         L"YADIF=0:-1");
+
+       auto producer = spl::make_shared<decklink_producer_proxy>(
                        in_format_desc,
                        dependencies.frame_factory,
                        dependencies.format_desc,
                        channel_layout,
                        device_index,
                        filter_str,
-                       length));
-}
+                       length);
+
+       auto get_source_framerate       = [=] { return producer->get_out_framerate(); };
+       auto target_framerate           = dependencies.format_desc.framerate;
 
+       return core::create_destroy_proxy(core::create_framerate_producer(
+                       producer,
+                       get_source_framerate,
+                       target_framerate,
+                       dependencies.format_desc.field_mode,
+                       dependencies.format_desc.audio_cadence));
+}
 }}
index e60910bf1bdde96c58cd2bcde6bf6a7e5f873c21..6b8172acfb2854d0faaa0757a137cc72e8a19aae 100644 (file)
@@ -25,8 +25,6 @@ set(SOURCES
                audio_channel_remapper.cpp
                ffmpeg.cpp
                ffmpeg_error.cpp
-               ffmpeg_pipeline.cpp
-               ffmpeg_pipeline_backend_internal.cpp
                StdAfx.cpp
 )
 set(HEADERS
@@ -53,9 +51,6 @@ set(HEADERS
 
                ffmpeg.h
                ffmpeg_error.h
-               ffmpeg_pipeline.h
-               ffmpeg_pipeline_backend.h
-               ffmpeg_pipeline_backend_internal.h
                StdAfx.h
 )
 
@@ -90,6 +85,7 @@ if (MSVC)
                        avcodec.lib
                        avutil.lib
                        avfilter.lib
+                       avdevice.lib
                        swscale.lib
                        swresample.lib
        )
@@ -103,6 +99,7 @@ else()
                        avcodec.so
                        avutil.so
                        avfilter.so
+                       avdevice.so
                        swscale.so
                        swresample.so
                        postproc.so
index 7d8159238c8cc0ed9f386f4c231f41ef64c556ca..ab500c3e050240558ef1f511dd7689283cfaa1dc 100644 (file)
@@ -50,7 +50,7 @@
 #pragma warning (disable : 4996)
 #endif
 
-extern "C" 
+extern "C"
 {
        #define __STDC_CONSTANT_MACROS
        #define __STDC_LIMIT_MACROS
@@ -58,45 +58,45 @@ extern "C"
        #include <libswscale/swscale.h>
        #include <libavutil/avutil.h>
        #include <libavfilter/avfilter.h>
+       #include <libavdevice/avdevice.h>
 }
 
 namespace caspar { namespace ffmpeg {
-       
-int ffmpeg_lock_callback(void **mutex, enum AVLockOp op) 
-{ 
+int ffmpeg_lock_callback(void **mutex, enum AVLockOp op)
+{
        if(!mutex)
                return 0;
 
        auto my_mutex = reinterpret_cast<tbb::recursive_mutex*>(*mutex);
-       
-       switch(op) 
-       { 
-               case AV_LOCK_CREATE: 
-               { 
-                       *mutex = new tbb::recursive_mutex(); 
-                       break; 
-               } 
-               case AV_LOCK_OBTAIN: 
-               { 
+
+       switch(op)
+       {
+               case AV_LOCK_CREATE:
+               {
+                       *mutex = new tbb::recursive_mutex();
+                       break;
+               }
+               case AV_LOCK_OBTAIN:
+               {
                        if(my_mutex)
-                               my_mutex->lock(); 
-                       break; 
-               } 
-               case AV_LOCK_RELEASE: 
-               { 
+                               my_mutex->lock();
+                       break;
+               }
+               case AV_LOCK_RELEASE:
+               {
                        if(my_mutex)
-                               my_mutex->unlock(); 
-                       break; 
-               } 
-               case AV_LOCK_DESTROY: 
-               { 
+                               my_mutex->unlock();
+                       break;
+               }
+               case AV_LOCK_DESTROY:
+               {
                        delete my_mutex;
                        *mutex = nullptr;
-                       break; 
-               } 
-       } 
-       return 0; 
-} 
+                       break;
+               }
+       }
+       return 0;
+}
 
 static void sanitize(uint8_t *line)
 {
@@ -119,15 +119,15 @@ void log_callback(void* ptr, int level, const char* fmt, va_list vl)
     if(level > av_log_get_level())
         return;
     line[0]=0;
-       
+
 #undef fprintf
-    if(print_prefix && avc) 
+    if(print_prefix && avc)
        {
-        if (avc->parent_log_context_offset) 
+        if (avc->parent_log_context_offset)
                {
             AVClass** parent= *(AVClass***)(((uint8_t*)ptr) + avc->parent_log_context_offset);
             if(parent && *parent)
-                std::sprintf(line, "[%s @ %p] ", (*parent)->item_name(parent), parent);            
+                std::sprintf(line, "[%s @ %p] ", (*parent)->item_name(parent), parent);
         }
         std::sprintf(line + strlen(line), "[%s @ %p] ", avc->item_name(ptr), ptr);
     }
@@ -135,7 +135,7 @@ void log_callback(void* ptr, int level, const char* fmt, va_list vl)
     std::vsprintf(line + strlen(line), fmt, vl);
 
     print_prefix = strlen(line) && line[strlen(line)-1] == '\n';
-       
+
     //if(print_prefix && !strcmp(line, prev)){
     //    count++;
     //    if(is_atty==1)
@@ -152,7 +152,7 @@ void log_callback(void* ptr, int level, const char* fmt, va_list vl)
        auto len = strlen(line);
        if(len > 0)
                line[len-1] = 0;
-       
+
        if(level == AV_LOG_DEBUG)
                CASPAR_LOG(debug) << L"[ffmpeg] " << line;
        else if(level == AV_LOG_INFO)
@@ -254,6 +254,11 @@ bool& get_quiet_logging_for_thread()
        return *local;
 }
 
+void enable_quiet_logging_for_thread()
+{
+       get_quiet_logging_for_thread() = true;
+}
+
 bool is_logging_quiet_for_thread()
 {
        return get_quiet_logging_for_thread();
@@ -291,9 +296,10 @@ void init(core::module_dependencies dependencies)
        av_register_all();
     avformat_network_init();
     avcodec_register_all();
+       avdevice_register_all();
 
        auto info_repo = dependencies.media_info_repo;
-       
+
        dependencies.consumer_registry->register_consumer_factory(L"FFmpeg Consumer", create_consumer, describe_consumer);
        dependencies.consumer_registry->register_consumer_factory(L"Streaming Consumer",  create_streaming_consumer, describe_streaming_consumer);
        dependencies.consumer_registry->register_preconfigured_consumer_factory(L"file", create_preconfigured_consumer);
@@ -334,5 +340,4 @@ void uninit()
     avformat_network_deinit();
        av_lockmgr_register(nullptr);
 }
-
 }}
index 08a098da45bdc8f2f0271e968379c90e42c7e5cd..f9569dc6baefb2b6671840e373a2a119d1b9b367 100644 (file)
@@ -29,6 +29,7 @@ namespace caspar { namespace ffmpeg {
 void init(core::module_dependencies dependencies);
 void uninit();
 std::shared_ptr<void> temporary_enable_quiet_logging_for_thread(bool enable);
+void enable_quiet_logging_for_thread();
 bool is_logging_quiet_for_thread();
 
 }}
diff --git a/modules/ffmpeg/ffmpeg_pipeline.cpp b/modules/ffmpeg/ffmpeg_pipeline.cpp
deleted file mode 100644 (file)
index 0d46908..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
-*
-* This file is part of CasparCG (www.casparcg.com).
-*
-* CasparCG is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation, either version 3 of the License, or
-* (at your option) any later version.
-*
-* CasparCG is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
-*
-* Author: Helge Norberg, helge.norberg@svt.se
-*/
-
-#include "StdAfx.h"
-
-#include "ffmpeg_pipeline.h"
-#include "ffmpeg_pipeline_backend.h"
-#include "ffmpeg_pipeline_backend_internal.h"
-
-#include <core/frame/draw_frame.h>
-#include <core/video_format.h>
-
-namespace caspar { namespace ffmpeg {
-
-ffmpeg_pipeline::ffmpeg_pipeline()
-       : impl_(create_internal_pipeline())
-{
-}
-
-ffmpeg_pipeline                        ffmpeg_pipeline::graph(spl::shared_ptr<caspar::diagnostics::graph> g)                                                                                                   { impl_->graph(std::move(g)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::from_file(std::string filename)                                                                                                                                                { impl_->from_file(std::move(filename)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::from_memory_only_audio(int num_channels, int samplerate)                                                                                               { impl_->from_memory_only_audio(num_channels, samplerate); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::from_memory_only_video(int width, int height, boost::rational<int> framerate)                                                  { impl_->from_memory_only_video(width, height, std::move(framerate)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::from_memory(int num_channels, int samplerate, int width, int height, boost::rational<int> framerate)   { impl_->from_memory(num_channels, samplerate, width, height, std::move(framerate)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::start_frame(std::uint32_t frame)                                                                                                                                               { impl_->start_frame(frame); return *this; }
-std::uint32_t                  ffmpeg_pipeline::start_frame() const                                                                                                                                                                    { return impl_->start_frame(); }
-ffmpeg_pipeline                        ffmpeg_pipeline::length(std::uint32_t frames)                                                                                                                                                   { impl_->length(frames); return *this; }
-std::uint32_t                  ffmpeg_pipeline::length() const                                                                                                                                                                                 { return impl_->length(); }
-ffmpeg_pipeline                        ffmpeg_pipeline::seek(std::uint32_t frame)                                                                                                                                                              { impl_->seek(frame); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::loop(bool value)                                                                                                                                                                               { impl_->loop(value); return *this; }
-bool                                   ffmpeg_pipeline::loop() const                                                                                                                                                                                   { return impl_->loop(); }
-std::string                            ffmpeg_pipeline::source_filename() const                                                                                                                                                                { return impl_->source_filename(); }
-ffmpeg_pipeline                        ffmpeg_pipeline::vfilter(std::string filter)                                                                                                                                                    { impl_->vfilter(std::move(filter)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::afilter(std::string filter)                                                                                                                                                    { impl_->afilter(std::move(filter)); return *this; }
-int                                            ffmpeg_pipeline::width() const                                                                                                                                                                                  { return impl_->width(); }
-int                                            ffmpeg_pipeline::height() const                                                                                                                                                                                 { return impl_->height(); }
-boost::rational<int>   ffmpeg_pipeline::framerate() const                                                                                                                                                                              { return impl_->framerate(); }
-bool                                   ffmpeg_pipeline::progressive() const                                                                                                                                                                    { return impl_->progressive(); }
-ffmpeg_pipeline                        ffmpeg_pipeline::to_memory(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format)                                { impl_->to_memory(std::move(factory), std::move(format)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::to_file(std::string filename)                                                                                                                                                  { impl_->to_file(std::move(filename)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::vcodec(std::string codec)                                                                                                                                                              { impl_->vcodec(std::move(codec)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::acodec(std::string codec)                                                                                                                                                              { impl_->acodec(std::move(codec)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::format(std::string fmt)                                                                                                                                                                { impl_->format(std::move(fmt)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::start()                                                                                                                                                                                                { impl_->start(); return *this; }
-bool                                   ffmpeg_pipeline::try_push_audio(caspar::array<const std::int32_t> data)                                                                                                 { return impl_->try_push_audio(std::move(data)); }
-bool                                   ffmpeg_pipeline::try_push_video(caspar::array<const std::uint8_t> data)                                                                                                 { return impl_->try_push_video(std::move(data)); }
-core::draw_frame               ffmpeg_pipeline::try_pop_frame()                                                                                                                                                                                { return impl_->try_pop_frame(); }
-std::uint32_t                  ffmpeg_pipeline::last_frame() const                                                                                                                                                                             { return impl_->last_frame(); }
-bool                                   ffmpeg_pipeline::started() const                                                                                                                                                                                { return impl_->started(); }
-void                                   ffmpeg_pipeline::stop()                                                                                                                                                                                                 { impl_->stop(); }
-
-}}
diff --git a/modules/ffmpeg/ffmpeg_pipeline.h b/modules/ffmpeg/ffmpeg_pipeline.h
deleted file mode 100644 (file)
index 2508228..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
-*
-* This file is part of CasparCG (www.casparcg.com).
-*
-* CasparCG is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation, either version 3 of the License, or
-* (at your option) any later version.
-*
-* CasparCG is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
-*
-* Author: Helge Norberg, helge.norberg@svt.se
-*/
-
-#pragma once
-
-#include <common/memory.h>
-#include <common/array.h>
-
-#include <core/fwd.h>
-
-#include <boost/rational.hpp>
-
-#include <string>
-#include <functional>
-#include <cstdint>
-
-FORWARD2(caspar, diagnostics, class graph);
-
-namespace caspar { namespace ffmpeg {
-
-struct ffmpeg_pipeline_backend;
-
-class ffmpeg_pipeline
-{
-public:
-       ffmpeg_pipeline();
-
-       ffmpeg_pipeline                 graph(spl::shared_ptr<caspar::diagnostics::graph> g);
-
-       ffmpeg_pipeline                 from_file(std::string filename);
-       ffmpeg_pipeline                 from_memory_only_audio(int num_channels, int samplerate);
-       ffmpeg_pipeline                 from_memory_only_video(int width, int height, boost::rational<int> framerate);
-       ffmpeg_pipeline                 from_memory(int num_channels, int samplerate, int width, int height, boost::rational<int> framerate);
-
-       ffmpeg_pipeline                 start_frame(std::uint32_t frame);
-       std::uint32_t                   start_frame() const;
-       ffmpeg_pipeline                 length(std::uint32_t frames);
-       std::uint32_t                   length() const;
-       ffmpeg_pipeline                 seek(std::uint32_t frame);
-       ffmpeg_pipeline                 loop(bool value);
-       bool                                    loop() const;
-       std::string                             source_filename() const;
-
-       ffmpeg_pipeline                 vfilter(std::string filter);
-       ffmpeg_pipeline                 afilter(std::string filter);
-       int                                             width() const;
-       int                                             height() const;
-       boost::rational<int>    framerate() const;
-       bool                                    progressive() const;
-
-       ffmpeg_pipeline                 to_memory(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format);
-       ffmpeg_pipeline                 to_file(std::string filename);
-       ffmpeg_pipeline                 vcodec(std::string codec);
-       ffmpeg_pipeline                 acodec(std::string codec);
-       ffmpeg_pipeline                 format(std::string fmt);
-
-       ffmpeg_pipeline                 start();
-       bool                                    try_push_audio(caspar::array<const std::int32_t> data);
-       bool                                    try_push_video(caspar::array<const std::uint8_t> data);
-       core::draw_frame                try_pop_frame();
-       std::uint32_t                   last_frame() const;
-       bool                                    started() const;
-       void                                    stop();
-
-private:
-       std::shared_ptr<ffmpeg_pipeline_backend> impl_;
-};
-
-}}
diff --git a/modules/ffmpeg/ffmpeg_pipeline_backend.h b/modules/ffmpeg/ffmpeg_pipeline_backend.h
deleted file mode 100644 (file)
index e56e04b..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
-*
-* This file is part of CasparCG (www.casparcg.com).
-*
-* CasparCG is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation, either version 3 of the License, or
-* (at your option) any later version.
-*
-* CasparCG is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
-*
-* Author: Helge Norberg, helge.norberg@svt.se
-*/
-
-#include "StdAfx.h"
-
-#include <common/diagnostics/graph.h>
-#include <common/array.h>
-
-#include <core/frame/draw_frame.h>
-
-#include <boost/rational.hpp>
-
-namespace caspar { namespace ffmpeg {
-
-struct ffmpeg_pipeline_backend
-{
-       virtual ~ffmpeg_pipeline_backend() { }
-
-       virtual void                                    graph(spl::shared_ptr<caspar::diagnostics::graph> g) = 0;
-
-       virtual void                                    from_file(std::string filename) = 0;
-       virtual void                                    from_memory_only_audio(int num_channels, int samplerate) = 0;
-       virtual void                                    from_memory_only_video(int width, int height, boost::rational<int> framerate) = 0;
-       virtual void                                    from_memory(int num_channels, int samplerate, int width, int height, boost::rational<int> framerate) = 0;
-
-       virtual void                                    start_frame(std::uint32_t frame) = 0;
-       virtual std::uint32_t                   start_frame() const = 0;
-       virtual void                                    length(std::uint32_t frames) = 0;
-       virtual std::uint32_t                   length() const = 0;
-       virtual void                                    seek(std::uint32_t frame) = 0;
-       virtual void                                    loop(bool value) = 0;
-       virtual bool                                    loop() const = 0;
-       virtual std::string                             source_filename() const = 0;
-
-       virtual void                                    vfilter(std::string filter) = 0;
-       virtual void                                    afilter(std::string filter) = 0;
-       virtual int                                             width() const = 0;
-       virtual int                                             height() const = 0;
-       virtual boost::rational<int>    framerate() const = 0;
-       virtual bool                                    progressive() const = 0;
-       virtual std::uint32_t                   last_frame() const = 0;
-
-       virtual void                                    to_memory(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format) = 0;
-       virtual void                                    to_file(std::string filename) = 0;
-       virtual void                                    vcodec(std::string codec) = 0;
-       virtual void                                    acodec(std::string codec) = 0;
-       virtual void                                    format(std::string fmt) = 0;
-
-       virtual void                                    start() = 0;
-       virtual bool                                    try_push_audio(caspar::array<const std::int32_t> data) = 0;
-       virtual bool                                    try_push_video(caspar::array<const std::uint8_t> data) = 0;
-       virtual core::draw_frame                try_pop_frame() = 0;
-       virtual bool                                    started() const = 0;
-       virtual void                                    stop() = 0;
-};
-
-}}
diff --git a/modules/ffmpeg/ffmpeg_pipeline_backend_internal.cpp b/modules/ffmpeg/ffmpeg_pipeline_backend_internal.cpp
deleted file mode 100644 (file)
index 0329a16..0000000
+++ /dev/null
@@ -1,1339 +0,0 @@
-/*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
-*
-* This file is part of CasparCG (www.casparcg.com).
-*
-* CasparCG is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation, either version 3 of the License, or
-* (at your option) any later version.
-*
-* CasparCG is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
-*
-* Author: Helge Norberg, helge.norberg@svt.se
-* Author: Robert Nagy, ronag89@gmail.com
-*/
-
-#include "StdAfx.h"
-
-#include "ffmpeg_pipeline_backend.h"
-#include "ffmpeg_pipeline_backend_internal.h"
-#include "producer/input/input.h"
-#include "producer/video/video_decoder.h"
-#include "producer/audio/audio_decoder.h"
-#include "producer/filter/audio_filter.h"
-#include "producer/filter/filter.h"
-#include "producer/util/util.h"
-#include "ffmpeg_error.h"
-#include "ffmpeg.h"
-
-#include <common/diagnostics/graph.h>
-#include <common/os/general_protection_fault.h>
-#include <common/enum_class.h>
-
-#include <core/frame/audio_channel_layout.h>
-#include <core/frame/frame.h>
-#include <core/frame/frame_factory.h>
-#include <core/video_format.h>
-
-#include <functional>
-#include <limits>
-#include <queue>
-#include <map>
-
-#include <tbb/atomic.h>
-#include <tbb/concurrent_queue.h>
-#include <tbb/spin_mutex.h>
-
-#include <boost/thread.hpp>
-#include <boost/optional.hpp>
-#include <boost/exception_ptr.hpp>
-
-namespace caspar { namespace ffmpeg {
-
-std::string to_string(const boost::rational<int>& framerate)
-{
-       return boost::lexical_cast<std::string>(framerate.numerator())
-               + "/" + boost::lexical_cast<std::string>(framerate.denominator()) + " (" + boost::lexical_cast<std::string>(static_cast<double>(framerate.numerator()) / static_cast<double>(framerate.denominator())) + ") fps";
-}
-
-std::vector<int> find_audio_cadence(const boost::rational<int>& framerate)
-{
-       static std::map<boost::rational<int>, std::vector<int>> CADENCES_BY_FRAMERATE = []
-       {
-               std::map<boost::rational<int>, std::vector<int>> result;
-
-               for (core::video_format format : enum_constants<core::video_format>())
-               {
-                       core::video_format_desc desc(format);
-                       boost::rational<int> format_rate(desc.time_scale, desc.duration);
-
-                       result.insert(std::make_pair(format_rate, desc.audio_cadence));
-               }
-
-               return result;
-       }();
-
-       auto exact_match = CADENCES_BY_FRAMERATE.find(framerate);
-
-       if (exact_match != CADENCES_BY_FRAMERATE.end())
-               return exact_match->second;
-
-       boost::rational<int> closest_framerate_diff     = std::numeric_limits<int>::max();
-       boost::rational<int> closest_framerate          = 0;
-
-       for (auto format_framerate : CADENCES_BY_FRAMERATE | boost::adaptors::map_keys)
-       {
-               auto diff = boost::abs(framerate - format_framerate);
-
-               if (diff < closest_framerate_diff)
-               {
-                       closest_framerate_diff  = diff;
-                       closest_framerate               = format_framerate;
-               }
-       }
-
-       if (is_logging_quiet_for_thread())
-               CASPAR_LOG(debug) << "No exact audio cadence match found for framerate " << to_string(framerate)
-                       << "\nClosest match is " << to_string(closest_framerate)
-                       << "\nwhich is a " << to_string(closest_framerate_diff) << " difference.";
-       else
-               CASPAR_LOG(warning) << "No exact audio cadence match found for framerate " << to_string(framerate)
-                       << "\nClosest match is " << to_string(closest_framerate)
-                       << "\nwhich is a " << to_string(closest_framerate_diff) << " difference.";
-
-       return CADENCES_BY_FRAMERATE[closest_framerate];
-}
-
-struct source
-{
-       virtual ~source() { }
-
-       virtual std::wstring                                                    print() const                                                                                   = 0;
-       virtual void                                                                    start()                                                                                                 { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual void                                                                    graph(spl::shared_ptr<caspar::diagnostics::graph> g)    { }
-       virtual void                                                                    stop()                                                                                                  { }
-       virtual void                                                                    start_frame(std::uint32_t frame)                                                { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual std::uint32_t                                                   start_frame() const                                                                             { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual void                                                                    loop(bool value)                                                                                { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual bool                                                                    loop() const                                                                                    { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual void                                                                    length(std::uint32_t frames)                                                    { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual std::uint32_t                                                   length() const                                                                                  { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual std::string                                                             filename() const                                                                                { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print())); }
-       virtual void                                                                    seek(std::uint32_t frame)                                                               { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual bool                                                                    has_audio() const                                                                               { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual int                                                                             samplerate() const                                                                              { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual bool                                                                    has_video() const                                                                               { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual bool                                                                    eof() const                                                                                             { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual boost::rational<int>                                    framerate() const                                                                               { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual std::uint32_t                                                   frame_number() const                                                                    { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual std::vector<std::shared_ptr<AVFrame>>   get_input_frames_for_streams(AVMediaType type)                  { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-};
-
-struct no_source_selected : public source
-{
-       std::wstring print() const override
-       {
-               return L"[no_source_selected]";
-       }
-};
-
-class file_source : public source
-{
-       std::wstring                                                            filename_;
-       spl::shared_ptr<diagnostics::graph>                     graph_;
-       std::uint32_t                                                           start_frame_    = 0;
-       std::uint32_t                                                           length_                 = std::numeric_limits<std::uint32_t>::max();
-       bool                                                                            loop_                   = false;
-       mutable boost::mutex                                            pointer_mutex_;
-       std::shared_ptr<input>                                          input_;
-       std::vector<spl::shared_ptr<audio_decoder>>     audio_decoders_;
-       std::shared_ptr<video_decoder>                          video_decoder_;
-       bool                                                                            started_                = false;
-public:
-       file_source(std::string filename)
-               : filename_(u16(filename))
-       {
-       }
-
-       std::wstring print() const override
-       {
-               return L"[file_source " + filename_ + L"]";
-       }
-
-       void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
-       {
-               graph_ = std::move(g);
-       }
-
-       void start() override
-       {
-               boost::lock_guard<boost::mutex> lock(pointer_mutex_);
-               bool thumbnail_mode = is_logging_quiet_for_thread();
-               input_.reset(new input(graph_, filename_, loop_, start_frame_, length_, thumbnail_mode));
-
-               for (int i = 0; i < input_->num_audio_streams(); ++i)
-               {
-                       try
-                       {
-                               audio_decoders_.push_back(spl::make_shared<audio_decoder>(*input_, core::video_format::invalid, i));
-                       }
-                       catch (...)
-                       {
-                               if (is_logging_quiet_for_thread())
-                               {
-                                       CASPAR_LOG_CURRENT_EXCEPTION_AT_LEVEL(debug);
-                                       CASPAR_LOG(info) << print() << " Failed to open audio-stream. Turn on log level debug to see more information.";
-                               }
-                               else
-                               {
-                                       CASPAR_LOG_CURRENT_EXCEPTION();
-                                       CASPAR_LOG(warning) << print() << " Failed to open audio-stream.";
-                               }
-                       }
-               }
-
-               if (audio_decoders_.empty())
-                       CASPAR_LOG(debug) << print() << " No audio-stream found. Running without audio.";
-
-               try
-               {
-                       video_decoder_.reset(new video_decoder(*input_, false));
-               }
-               catch (averror_stream_not_found&)
-               {
-                       CASPAR_LOG(debug) << print() << " No video-stream found. Running without video.";
-               }
-               catch (...)
-               {
-                       if (is_logging_quiet_for_thread())
-                       {
-                               CASPAR_LOG_CURRENT_EXCEPTION_AT_LEVEL(debug);
-                               CASPAR_LOG(info) << print() << " Failed to open video-stream. Running without audio. Turn on log level debug to see more information.";
-                       }
-                       else
-                       {
-                               CASPAR_LOG_CURRENT_EXCEPTION();
-                               CASPAR_LOG(warning) << print() << " Failed to open video-stream. Running without audio.";
-                       }
-               }
-
-               started_ = true;
-       }
-
-       void stop() override
-       {
-               started_ = false;
-       }
-
-       void start_frame(std::uint32_t frame) override 
-       {
-               start_frame_ = frame;
-
-               auto i = get_input();
-               if (i)
-                       i->start(frame);
-       }
-
-       std::uint32_t start_frame() const override
-       {
-               return start_frame_;
-       }
-
-       void loop(bool value) override
-       {
-               loop_ = value;
-
-               auto i = get_input();
-               if (i)
-                       i->loop(value);
-       }
-
-       bool loop() const override
-       {
-               return loop_;
-       }
-
-       void length(std::uint32_t frames) override
-       {
-               length_ = frames;
-
-               auto i = get_input();
-               if (i)
-                       i->length(frames);
-       }
-
-       std::uint32_t length() const override
-       {
-               auto v = get_video_decoder();
-
-               if (v)
-                       return v->nb_frames();
-
-               auto a = get_audio_decoders();
-
-               if (!a.empty())
-                       return a.at(0)->nb_frames(); // Should be ok.
-
-               return length_;
-       }
-
-       std::string filename() const override
-       {
-               return u8(filename_);
-       }
-
-       void seek(std::uint32_t frame) override
-       {
-               expect_started();
-               get_input()->seek(frame);
-       }
-
-       bool eof() const override
-       {
-               auto i = get_input();
-               return !i || i->eof();
-       }
-
-       bool has_audio() const override
-       {
-               return !get_audio_decoders().empty();
-       }
-
-       int samplerate() const override
-       {
-               if (get_audio_decoders().empty())
-                       return -1;
-
-               return 48000;
-       }
-
-       bool has_video() const override
-       {
-               return static_cast<bool>(get_video_decoder());
-       }
-
-       boost::rational<int> framerate() const override
-       {
-               auto decoder = get_video_decoder();
-
-               if (!decoder)
-                       return -1;
-
-               return decoder->framerate();
-       }
-
-       std::uint32_t frame_number() const override
-       {
-               auto decoder = get_video_decoder();
-
-               if (!decoder)
-                       return 0;
-
-               return decoder->file_frame_number();
-       }
-
-       std::vector<std::shared_ptr<AVFrame>> get_input_frames_for_streams(AVMediaType type) override
-       {
-               auto a_decoders = get_audio_decoders();
-               auto v_decoder  = get_video_decoder();
-               expect_started();
-
-               if (type == AVMediaType::AVMEDIA_TYPE_AUDIO && !a_decoders.empty())
-               {
-                       std::vector<std::shared_ptr<AVFrame>> frames;
-
-                       for (auto& a_decoder : a_decoders)
-                       {
-                               std::shared_ptr<AVFrame> frame;
-
-                               for (int i = 0; i < 64; ++i)
-                               {
-                                       frame = (*a_decoder)();
-
-                                       if (frame == flush() || (frame && frame->data[0]))
-                                               break;
-                                       else
-                                               frame.reset();
-                               }
-
-                               frames.push_back(std::move(frame));
-                       }
-
-                       return frames;
-               }
-               else if (type == AVMediaType::AVMEDIA_TYPE_VIDEO && v_decoder)
-               {
-                       std::shared_ptr<AVFrame> frame;
-
-                       for (int i = 0; i < 128; ++i)
-                       {
-                               frame = (*v_decoder)();
-
-                               if (frame == flush() || (frame && frame->data[0]))
-                                       return { frame };
-                       }
-               }
-               else
-                       CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(
-                               print() + L" Unhandled media type " + boost::lexical_cast<std::wstring>(type)));
-
-               return { };
-       }
-private:
-       void expect_started() const
-       {
-               if (!started_)
-                       CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" Not started."));
-       }
-
-       std::shared_ptr<input> get_input() const
-       {
-               boost::lock_guard<boost::mutex> lock(pointer_mutex_);
-               return input_;
-       }
-
-       std::vector<spl::shared_ptr<audio_decoder>> get_audio_decoders() const
-       {
-               boost::lock_guard<boost::mutex> lock(pointer_mutex_);
-               return audio_decoders_;
-       }
-
-       std::shared_ptr<video_decoder> get_video_decoder() const
-       {
-               boost::lock_guard<boost::mutex> lock(pointer_mutex_);
-               return video_decoder_;
-       }
-};
-
-class memory_source : public source
-{
-       int                                                                                                                     samplerate_             = -1;
-       int                                                                                                                     num_channels_   = -1;
-       int                                                                                                                     width_                  = -1;
-       int                                                                                                                     height_                 = -1;
-       boost::rational<int>                                                                            framerate_              = -1;
-
-       tbb::atomic<bool>                                                                                       running_;
-       tbb::concurrent_bounded_queue<caspar::array<const int32_t>>     audio_frames_;
-       tbb::concurrent_bounded_queue<caspar::array<const uint8_t>>     video_frames_;
-       int64_t                                                                                                         audio_pts_              = 0;
-       int64_t                                                                                                         video_pts_              = 0;
-public:
-       memory_source()
-       {
-               running_ = false;
-               video_frames_.set_capacity(1);
-               audio_frames_.set_capacity(1);
-       }
-
-       ~memory_source()
-       {
-               stop();
-       }
-
-       void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
-       {
-       }
-
-       std::wstring print() const override
-       {
-               return L"[memory_source]";
-       }
-
-       void enable_audio(int samplerate, int num_channels)
-       {
-               samplerate_ = samplerate;
-               num_channels_ = num_channels;
-       }
-
-       void enable_video(int width, int height, boost::rational<int> framerate)
-       {
-               width_ = width;
-               height_ = height;
-       }
-
-       void start() override
-       {
-               running_ = true;
-       }
-
-       void stop() override
-       {
-               running_ = false;
-               video_frames_.try_push(caspar::array<const uint8_t>());
-               audio_frames_.try_push(caspar::array<const int32_t>());
-       }
-
-       bool has_audio() const override
-       {
-               return samplerate_ != -1;
-       }
-
-       int samplerate() const override
-       {
-               return samplerate_;
-       }
-
-       bool has_video() const override
-       {
-               return width_ != -1;
-       }
-
-       bool eof() const override
-       {
-               return !running_;
-       }
-
-       boost::rational<int> framerate() const override
-       {
-               return framerate_;
-       }
-       
-       bool try_push_audio(caspar::array<const std::int32_t> data)
-       {
-               if (!has_audio())
-                       CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" audio not enabled."));
-
-               if (data.empty() || data.size() % num_channels_ != 0)
-                       CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(print() + L" audio with incorrect number of channels submitted."));
-
-               return audio_frames_.try_push(std::move(data));
-       }
-
-       bool try_push_video(caspar::array<const std::uint8_t> data)
-       {
-               if (!has_video())
-                       CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" video not enabled."));
-
-               if (data.size() != width_ * height_ * 4)
-                       CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(print() + L" video with incorrect size submitted."));
-
-               return video_frames_.try_push(std::move(data));
-       }
-
-       std::vector<std::shared_ptr<AVFrame>> get_input_frames_for_streams(AVMediaType type) override
-       {
-               if (!running_)
-                       CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not running."));
-
-               if (type == AVMediaType::AVMEDIA_TYPE_AUDIO && has_audio())
-               {
-                       caspar::array<const std::int32_t> samples;
-                       audio_frames_.pop(samples);
-
-                       if (samples.empty())
-                               return { };
-                       
-                       spl::shared_ptr<AVFrame> av_frame(av_frame_alloc(), [samples](AVFrame* p) { av_frame_free(&p); });
-
-                       av_frame->channels                      = num_channels_;
-                       av_frame->channel_layout        = av_get_default_channel_layout(num_channels_);
-                       av_frame->sample_rate           = samplerate_;
-                       av_frame->nb_samples            = static_cast<int>(samples.size()) / num_channels_;
-                       av_frame->format                        = AV_SAMPLE_FMT_S32;
-                       av_frame->pts                           = audio_pts_;
-
-                       audio_pts_ += av_frame->nb_samples;
-
-                       FF(av_samples_fill_arrays(
-                                       av_frame->extended_data,
-                                       av_frame->linesize,
-                                       reinterpret_cast<const std::uint8_t*>(&*samples.begin()),
-                                       av_frame->channels,
-                                       av_frame->nb_samples,
-                                       static_cast<AVSampleFormat>(av_frame->format),
-                                       16));
-
-                       return { av_frame };
-               }
-               else if (type == AVMediaType::AVMEDIA_TYPE_VIDEO && has_video())
-               {
-                       caspar::array<const std::uint8_t> data;
-                       video_frames_.pop(data);
-
-                       if (data.empty())
-                               return {};
-
-                       spl::shared_ptr<AVFrame> av_frame(av_frame_alloc(), [data](AVFrame* p) { av_frame_free(&p); });
-                       avcodec_get_frame_defaults(av_frame.get());             
-                       
-                       const auto sample_aspect_ratio = boost::rational<int>(width_, height_);
-
-                       av_frame->format                                  = AV_PIX_FMT_BGRA;
-                       av_frame->width                                   = width_;
-                       av_frame->height                                  = height_;
-                       av_frame->sample_aspect_ratio.num = sample_aspect_ratio.numerator();
-                       av_frame->sample_aspect_ratio.den = sample_aspect_ratio.denominator();
-                       av_frame->pts                                     = video_pts_;
-
-                       video_pts_ += 1;
-
-                       FF(av_image_fill_arrays(
-                                       av_frame->data,
-                                       av_frame->linesize,
-                                       data.begin(),
-                                       static_cast<AVPixelFormat>(av_frame->format),
-                                       width_,
-                                       height_,
-                                       1));
-
-                       return { av_frame };
-               }
-               else
-                       CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(
-                               print() + L" Unhandled media type " + boost::lexical_cast<std::wstring>(type)));
-       }
-};
-
-struct sink
-{
-       virtual ~sink() { }
-
-       virtual std::wstring                                    print() const                                                                                                                                   = 0;
-       virtual void                                                    graph(spl::shared_ptr<caspar::diagnostics::graph> g)                                                    { }
-       virtual void                                                    acodec(std::string codec)                                                                                                               { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not an encoder.")); }
-       virtual void                                                    vcodec(std::string codec)                                                                                                               { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not an encoder.")); }
-       virtual void                                                    format(std::string fmt)                                                                                                                 { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not an encoder.")); }
-       virtual void                                                    framerate(boost::rational<int> framerate)                                                                               { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not an encoder.")); }
-       virtual void                                                    start(bool has_audio, bool has_video)                                                                                   { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual void                                                    stop()                                                                                                                                                  { }
-       virtual void                                                    flush_all()                                                                                                                                             { }
-       virtual std::vector<AVSampleFormat>             supported_sample_formats() const                                                                                                { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual std::vector<int>                                supported_samplerates() const                                                                                                   { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual std::vector<AVPixelFormat>              supported_pixel_formats() const                                                                                                 { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual int                                                             wanted_num_audio_streams() const                                                                                                { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual boost::optional<int>                    wanted_num_channels_per_stream() const                                                                                  { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual boost::optional<AVMediaType>    try_push(AVMediaType type, int stream_index, spl::shared_ptr<AVFrame> frame)    { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual void                                                    eof()                                                                                                                                                   { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-};
-
-struct no_sink_selected : public sink
-{
-       std::wstring print() const override
-       {
-               return L"[no_sink_selected]";
-       }
-};
-
-class file_sink : public sink
-{
-       std::wstring                                            filename_;
-       spl::shared_ptr<diagnostics::graph>     graph_;
-public:
-       file_sink(std::string filename)
-               : filename_(u16(std::move(filename)))
-       {
-       }
-
-       std::wstring print() const override
-       {
-               return L"[file_sink " + filename_ + L"]";
-       }
-
-       void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
-       {
-               graph_ = std::move(g);
-       }
-};
-
-class memory_sink : public sink
-{
-       spl::shared_ptr<core::frame_factory>                    factory_;
-
-       bool                                                                                    has_audio_                      = false;
-       bool                                                                                    has_video_                      = false;
-       std::vector<int>                                                                audio_cadence_;
-       core::audio_channel_layout                                              channel_layout_         = core::audio_channel_layout::invalid();
-       core::mutable_audio_buffer                                              audio_samples_;
-
-       std::queue<std::shared_ptr<AVFrame>>                    video_frames_;
-       std::shared_ptr<AVFrame>                                                last_video_frame_;
-
-       tbb::concurrent_bounded_queue<core::draw_frame> output_frames_;
-       tbb::atomic<bool>                                                               running_;
-public:
-       memory_sink(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format)
-               : factory_(std::move(factory))
-               , audio_cadence_(format.audio_cadence)
-       {
-               output_frames_.set_capacity(2);
-               running_ = false;
-               // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
-               // This cadence fills the audio mixer most optimally.
-               boost::range::rotate(audio_cadence_, std::end(audio_cadence_) - 1);
-       }
-
-       ~memory_sink()
-       {
-               stop();
-       }
-
-       std::wstring print() const override
-       {
-               return L"[memory_sink]";
-       }
-
-       void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
-       {
-       }
-
-       void framerate(boost::rational<int> framerate) override
-       {
-               audio_cadence_ = find_audio_cadence(framerate);
-               // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
-               // This cadence fills the audio mixer most optimally.
-               boost::range::rotate(audio_cadence_, std::end(audio_cadence_) - 1);
-       }
-
-       void start(bool has_audio, bool has_video) override
-       {
-               has_audio_      = has_audio;
-               has_video_      = has_video;
-               running_        = true;
-       }
-
-       void stop() override
-       {
-               running_ = false;
-               output_frames_.set_capacity(4);
-       }
-
-       std::vector<AVSampleFormat> supported_sample_formats() const override
-       {
-               return { AVSampleFormat::AV_SAMPLE_FMT_S32 };
-       }
-
-       std::vector<int> supported_samplerates() const override {
-               return { 48000 };
-       }
-
-       std::vector<AVPixelFormat> supported_pixel_formats() const override
-       {
-               return {
-                       AVPixelFormat::AV_PIX_FMT_YUVA420P,
-                       AVPixelFormat::AV_PIX_FMT_YUV444P,
-                       AVPixelFormat::AV_PIX_FMT_YUV422P,
-                       AVPixelFormat::AV_PIX_FMT_YUV420P,
-                       AVPixelFormat::AV_PIX_FMT_YUV411P,
-                       AVPixelFormat::AV_PIX_FMT_BGRA,
-                       AVPixelFormat::AV_PIX_FMT_ARGB,
-                       AVPixelFormat::AV_PIX_FMT_RGBA,
-                       AVPixelFormat::AV_PIX_FMT_ABGR,
-                       AVPixelFormat::AV_PIX_FMT_GRAY8
-               };
-       }
-
-       int wanted_num_audio_streams() const override
-       {
-               return 1;
-       }
-
-       boost::optional<int> wanted_num_channels_per_stream() const
-       {
-               return boost::none;
-       }
-
-       void flush_all() override
-       {
-               audio_samples_.clear();
-
-               while (!video_frames_.empty())
-                       video_frames_.pop();
-       }
-
-       boost::optional<AVMediaType> try_push(AVMediaType type, int stream_index, spl::shared_ptr<AVFrame> av_frame) override
-       {
-               if (!has_audio_ && !has_video_)
-                       CASPAR_THROW_EXCEPTION(invalid_operation());
-
-               if (type == AVMediaType::AVMEDIA_TYPE_AUDIO && av_frame->data[0])
-               {
-                       if (channel_layout_ == core::audio_channel_layout::invalid()) // First audio
-                       {
-                               channel_layout_ = get_audio_channel_layout(av_frame->channels, av_frame->channel_layout, L"");
-
-                               // Insert silence samples so that the audio mixer is guaranteed to be filled.
-                               auto min_num_samples_per_frame  = *boost::min_element(audio_cadence_);
-                               auto max_num_samples_per_frame  = *boost::max_element(audio_cadence_);
-                               auto cadence_safety_samples             = max_num_samples_per_frame - min_num_samples_per_frame;
-                               audio_samples_.resize(channel_layout_.num_channels * cadence_safety_samples, 0);
-                       }
-
-                       auto ptr = reinterpret_cast<int32_t*>(av_frame->data[0]);
-
-                       audio_samples_.insert(audio_samples_.end(), ptr, ptr + av_frame->linesize[0] / sizeof(int32_t));
-               }
-               else if (type == AVMediaType::AVMEDIA_TYPE_VIDEO)
-               {
-                       video_frames_.push(std::move(av_frame));
-               }
-
-               while (true)
-               {
-                       bool enough_audio =
-                               !has_audio_ ||
-                               (channel_layout_ != core::audio_channel_layout::invalid() && audio_samples_.size() >= audio_cadence_.front() * channel_layout_.num_channels);
-                       bool enough_video =
-                               !has_video_ ||
-                               !video_frames_.empty();
-
-                       if (!enough_audio)
-                               return AVMediaType::AVMEDIA_TYPE_AUDIO;
-
-                       if (!enough_video)
-                               return AVMediaType::AVMEDIA_TYPE_VIDEO;
-
-                       core::mutable_audio_buffer audio_data;
-
-                       if (has_audio_)
-                       {
-                               auto begin = audio_samples_.begin();
-                               auto end = begin + audio_cadence_.front() * channel_layout_.num_channels;
-
-                               audio_data.insert(audio_data.begin(), begin, end);
-                               audio_samples_.erase(begin, end);
-                               boost::range::rotate(audio_cadence_, std::begin(audio_cadence_) + 1);
-                       }
-
-                       if (!has_video_) // Audio only
-                       {
-                               core::mutable_frame audio_only_frame(
-                                               { },
-                                               std::move(audio_data),
-                                               this,
-                                               core::pixel_format_desc(core::pixel_format::invalid),
-                                               channel_layout_);
-
-                               output_frames_.push(core::draw_frame(std::move(audio_only_frame)));
-
-                               return AVMediaType::AVMEDIA_TYPE_AUDIO;
-                       }
-
-                       auto output_frame = make_frame(this, spl::make_shared_ptr(video_frames_.front()), *factory_, channel_layout_);
-                       last_video_frame_ = video_frames_.front();
-                       video_frames_.pop();
-                       output_frame.audio_data() = std::move(audio_data);
-
-                       output_frames_.push(core::draw_frame(std::move(output_frame)));
-               }
-       }
-
-       void eof() override
-       {
-               // Drain rest, regardless of it being enough or not.
-               while (!video_frames_.empty() || !audio_samples_.empty())
-               {
-                       core::mutable_audio_buffer audio_data;
-
-                       audio_data.swap(audio_samples_);
-
-                       if (video_frames_.empty() && !audio_data.empty() && last_video_frame_) // More audio samples than video
-                       {
-                               video_frames_.push(last_video_frame_);
-                       }
-
-                       if (!video_frames_.empty())
-                       {
-                               auto output_frame = make_frame(this, spl::make_shared_ptr(video_frames_.front()), *factory_, channel_layout_);
-                               last_video_frame_ = video_frames_.front();
-                               video_frames_.pop();
-                               output_frame.audio_data() = std::move(audio_data);
-
-                               output_frames_.push(core::draw_frame(std::move(output_frame)));
-                       }
-                       else
-                       {
-                               core::mutable_frame audio_only_frame(
-                                               {},
-                                               std::move(audio_data),
-                                               this,
-                                               core::pixel_format_desc(core::pixel_format::invalid),
-                                               channel_layout_);
-
-                               output_frames_.push(core::draw_frame(std::move(audio_only_frame)));
-                               output_frames_.push(core::draw_frame::empty());
-                       }
-               }
-       }
-
-       core::draw_frame try_pop_frame()
-       {
-               core::draw_frame frame = core::draw_frame::late();
-
-               if (!output_frames_.try_pop(frame) && !running_)
-                       return core::draw_frame::empty();
-
-               return frame;
-       }
-};
-
-struct audio_stream_info
-{
-       int                             num_channels    = 0;
-       AVSampleFormat  sampleformat    = AVSampleFormat::AV_SAMPLE_FMT_NONE;
-       uint64_t                channel_layout  = 0;
-};
-
-struct video_stream_info
-{
-       int                                     width           = 0;
-       int                                     height          = 0;
-       AVPixelFormat           pixelformat     = AVPixelFormat::AV_PIX_FMT_NONE;
-       core::field_mode        fieldmode       = core::field_mode::progressive;
-};
-
-class ffmpeg_pipeline_backend_internal : public ffmpeg_pipeline_backend
-{
-       spl::shared_ptr<diagnostics::graph>                                                             graph_;
-
-       spl::unique_ptr<source>                                                                                 source_                                 = spl::make_unique<no_source_selected>();
-       std::function<bool (caspar::array<const std::int32_t> data)>    try_push_audio_;
-       std::function<bool (caspar::array<const std::uint8_t> data)>    try_push_video_;
-
-       std::vector<audio_stream_info>                                                                  source_audio_streams_;
-       video_stream_info                                                                                               source_video_stream_;
-
-       std::string                                                                                                             afilter_;
-       std::unique_ptr<audio_filter>                                                                   audio_filter_;
-       std::string                                                                                                             vfilter_;
-       std::unique_ptr<filter>                                                                                 video_filter_;
-
-       spl::unique_ptr<sink>                                                                                   sink_                                   = spl::make_unique<no_sink_selected>();
-       std::function<core::draw_frame ()>                                                              try_pop_frame_;
-
-       tbb::atomic<bool>                                                                                               started_;
-       tbb::spin_mutex                                                                                                 exception_mutex_;
-       boost::exception_ptr                                                                                    exception_;
-       boost::thread                                                                                                   thread_;
-public:
-       ffmpeg_pipeline_backend_internal()
-       {
-               started_ = false;
-               diagnostics::register_graph(graph_);
-       }
-
-       ~ffmpeg_pipeline_backend_internal()
-       {
-               stop();
-       }
-
-       void throw_if_error()
-       {
-               boost::lock_guard<tbb::spin_mutex> lock(exception_mutex_);
-
-               if (exception_ != nullptr)
-                       boost::rethrow_exception(exception_);
-       }
-
-       void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
-       {
-               graph_ = std::move(g);
-               source_->graph(graph_);
-               sink_->graph(graph_);
-       }
-
-       // Source setup
-
-       void from_file(std::string filename) override
-       {
-               source_                 = spl::make_unique<file_source>(std::move(filename));
-               try_push_audio_ = std::function<bool (caspar::array<const std::int32_t>)>();
-               try_push_video_ = std::function<bool (caspar::array<const std::uint8_t>)>();
-               source_->graph(graph_);
-       }
-
-       void from_memory_only_audio(int num_channels, int samplerate) override
-       {
-               auto source             = spl::make_unique<memory_source>();
-               auto source_ptr = source.get();
-               try_push_audio_ = [this, source_ptr](caspar::array<const std::int32_t> data) { return source_ptr->try_push_audio(std::move(data)); };
-               source->enable_audio(samplerate, num_channels);
-
-               source_ = std::move(source);
-               source_->graph(graph_);
-       }
-
-       void from_memory_only_video(int width, int height, boost::rational<int> framerate) override
-       {
-               auto source             = spl::make_unique<memory_source>();
-               auto source_ptr = source.get();
-               try_push_video_ = [this, source_ptr](caspar::array<const std::uint8_t> data) { return source_ptr->try_push_video(std::move(data)); };
-               source->enable_video(width, height, std::move(framerate));
-
-               source_ = std::move(source);
-               source_->graph(graph_);
-       }
-
-       void from_memory(int num_channels, int samplerate, int width, int height, boost::rational<int> framerate) override
-       {
-               auto source             = spl::make_unique<memory_source>();
-               auto source_ptr = source.get();
-               try_push_audio_ = [this, source_ptr](caspar::array<const std::int32_t> data) { return source_ptr->try_push_audio(std::move(data)); };
-               try_push_video_ = [this, source_ptr](caspar::array<const std::uint8_t> data) { return source_ptr->try_push_video(std::move(data)); };
-               source->enable_audio(samplerate, num_channels);
-               source->enable_video(width, height, std::move(framerate));
-
-               source_ = std::move(source);
-               source_->graph(graph_);
-       }
-
-       void                    start_frame(std::uint32_t frame) override       { source_->start_frame(frame);          }
-       std::uint32_t   start_frame() const override                            { return source_->start_frame();        }
-       void                    length(std::uint32_t frames) override           { source_->length(frames);                      }
-       std::uint32_t   length() const override                                         { return source_->length();                     }
-       void                    seek(std::uint32_t frame) override                      { source_->seek(frame);                         }
-       void                    loop(bool value) override                                       { source_->loop(value);                         }
-       bool                    loop() const override                                           { return source_->loop();                       }
-       std::string             source_filename() const override                        { return source_->filename();           }
-
-       // Filter setup
-
-       void vfilter(std::string filter) override
-       {
-               vfilter_ = std::move(filter);
-       }
-
-       void afilter(std::string filter) override
-       {
-               afilter_ = std::move(filter);
-       }
-
-       int width() const override
-       {
-               return source_video_stream_.width;
-       }
-
-       int height() const override
-       {
-               return source_video_stream_.height;
-       }
-
-       boost::rational<int> framerate() const override
-       {
-               bool double_rate = filter::is_double_rate(u16(vfilter_));
-
-               return double_rate ? source_->framerate() * 2 : source_->framerate();
-       }
-
-       bool progressive() const override
-       {
-               return true;//TODO
-       }
-
-       // Sink setup
-
-       void to_memory(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format) override
-       {
-               auto sink               = spl::make_unique<memory_sink>(std::move(factory), std::move(format));
-               auto sink_ptr   = sink.get();
-               try_pop_frame_  = [sink_ptr] { return sink_ptr->try_pop_frame(); };
-
-               sink_ = std::move(sink);
-               sink_->graph(graph_);
-       }
-
-       void to_file(std::string filename) override
-       {
-               sink_                   = spl::make_unique<file_sink>(std::move(filename));
-               try_pop_frame_  = std::function<core::draw_frame ()>();
-               sink_->graph(graph_);
-       }
-
-       void acodec(std::string codec) override { sink_->acodec(std::move(codec)); }
-       void vcodec(std::string codec) override { sink_->vcodec(std::move(codec)); }
-       void format(std::string fmt) override   { sink_->format(std::move(fmt)); }
-
-       // Runtime control
-
-       void start() override
-       {
-               source_->start();
-               sink_->start(source_->has_audio(), source_->has_video());
-               started_ = true;
-               bool quiet = is_logging_quiet_for_thread();
-
-               thread_ = boost::thread([=] { run(quiet); });
-       }
-
-       bool try_push_audio(caspar::array<const std::int32_t> data) override
-       {
-               throw_if_error();
-
-               if (try_push_audio_)
-                       return try_push_audio_(std::move(data));
-               else
-                       return false;
-       }
-
-       bool try_push_video(caspar::array<const std::uint8_t> data) override
-       {
-               throw_if_error();
-
-               if (try_push_video_)
-                       return try_push_video_(std::move(data));
-               else
-                       return false;
-       }
-
-       core::draw_frame try_pop_frame() override
-       {
-               throw_if_error();
-
-               if (!try_pop_frame_)
-                       CASPAR_THROW_EXCEPTION(invalid_operation());
-
-               return try_pop_frame_();
-       }
-
-       std::uint32_t last_frame() const override
-       {
-               return source_->frame_number();
-       }
-
-       bool started() const override
-       {
-               return started_;
-       }
-
-       void stop() override
-       {
-               started_ = false;
-
-               sink_->stop();
-               source_->stop();
-
-               if (thread_.joinable())
-                       thread_.join();
-       }
-
-private:
-       void run(bool quiet)
-       {
-               ensure_gpf_handler_installed_for_thread(u8(L"ffmpeg-pipeline: " + source_->print() + L" -> " + sink_->print()).c_str());
-               auto quiet_logging = temporary_enable_quiet_logging_for_thread(quiet);
-
-               try
-               {
-                       boost::optional<AVMediaType> result = source_->has_audio() ? AVMediaType::AVMEDIA_TYPE_AUDIO : AVMediaType::AVMEDIA_TYPE_VIDEO;
-
-                       while (started_ && (source_->has_audio() || source_->has_video()))
-                       {
-                               auto needed                                             = *result;
-                               auto input_frames_for_streams   = source_->get_input_frames_for_streams(needed);
-                               bool flush_all                                  = !input_frames_for_streams.empty() && input_frames_for_streams.at(0) == flush();
-
-                               if (flush_all)
-                               {
-                                       sink_->flush_all();
-                                       
-                                       if (source_->has_audio() && source_->has_video())
-                                               result = needed == AVMediaType::AVMEDIA_TYPE_AUDIO ? AVMediaType::AVMEDIA_TYPE_VIDEO : AVMediaType::AVMEDIA_TYPE_AUDIO;
-
-                                       continue;
-                               }
-
-                               bool got_requested_media_type   = !input_frames_for_streams.empty() && input_frames_for_streams.at(0);
-
-                               if (got_requested_media_type)
-                               {
-                                       for (int input_stream_index = 0; input_stream_index < input_frames_for_streams.size(); ++input_stream_index)
-                                       {
-                                               if (needed == AVMediaType::AVMEDIA_TYPE_AUDIO)
-                                               {
-                                                       initialize_audio_filter_if_needed(input_frames_for_streams);
-                                                       audio_filter_->push(input_stream_index, std::move(input_frames_for_streams.at(input_stream_index)));
-
-                                                       for (int output_stream_index = 0; output_stream_index < sink_->wanted_num_audio_streams(); ++output_stream_index)
-                                                               for (auto filtered_frame : audio_filter_->poll_all(output_stream_index))
-                                                                       result = sink_->try_push(AVMediaType::AVMEDIA_TYPE_AUDIO, output_stream_index, std::move(filtered_frame));
-                                               }
-                                               else if (needed == AVMediaType::AVMEDIA_TYPE_VIDEO)
-                                               {
-                                                       initialize_video_filter_if_needed(*input_frames_for_streams.at(input_stream_index));
-                                                       video_filter_->push(std::move(input_frames_for_streams.at(input_stream_index)));
-
-                                                       for (auto filtered_frame : video_filter_->poll_all())
-                                                               result = sink_->try_push(AVMediaType::AVMEDIA_TYPE_VIDEO, 0, std::move(filtered_frame));
-                                               }
-                                               else
-                                                       CASPAR_THROW_EXCEPTION(not_supported());
-                                       }
-                               }
-                               else if (source_->eof())
-                               {
-                                       started_ = false;
-                                       sink_->eof();
-                                       break;
-                               }
-                               else
-                                       result = boost::none;
-
-                               if (!result)
-                               {
-                                       graph_->set_tag(caspar::diagnostics::tag_severity::WARNING, "dropped-frame");
-                                       result = needed; // Repeat same media type
-                               }
-                       }
-               }
-               catch (...)
-               {
-                       if (is_logging_quiet_for_thread())
-                       {
-                               CASPAR_LOG_CURRENT_EXCEPTION_AT_LEVEL(debug);
-                       }
-                       else
-                       {
-                               CASPAR_LOG_CURRENT_EXCEPTION();
-                       }
-
-                       boost::lock_guard<tbb::spin_mutex> lock(exception_mutex_);
-                       exception_ = boost::current_exception();
-               }
-
-               video_filter_.reset();
-               audio_filter_.reset();
-               source_->stop();
-               sink_->stop();
-               started_ = false;
-       }
-
-       template<typename T>
-       void set_if_changed(bool& changed, T& old_value, T new_value)
-       {
-               if (old_value != new_value)
-               {
-                       changed = true;
-                       old_value = new_value;
-               }
-       }
-
-       void initialize_audio_filter_if_needed(const std::vector<std::shared_ptr<AVFrame>>& av_frames_per_stream)
-       {
-               bool changed = av_frames_per_stream.size() != source_audio_streams_.size();
-               source_audio_streams_.resize(av_frames_per_stream.size());
-
-               for (int i = 0; i < av_frames_per_stream.size(); ++i)
-               {
-                       auto& av_frame  = *av_frames_per_stream.at(i);
-                       auto& stream    = source_audio_streams_.at(i);
-
-                       auto channel_layout = av_frame.channel_layout == 0
-                                       ? av_get_default_channel_layout(av_frame.channels)
-                                       : av_frame.channel_layout;
-
-                       set_if_changed(changed, stream.sampleformat, static_cast<AVSampleFormat>(av_frame.format));
-                       set_if_changed(changed, stream.num_channels, av_frame.channels);
-                       set_if_changed(changed, stream.channel_layout, channel_layout);
-               }
-
-               if (changed)
-                       initialize_audio_filter();
-       }
-
-       void initialize_audio_filter()
-       {
-               std::vector<audio_input_pad> input_pads;
-               std::vector<audio_output_pad> output_pads;
-
-               for (auto& source_audio_stream : source_audio_streams_)
-               {
-                       input_pads.emplace_back(
-                                       boost::rational<int>(1, source_->samplerate()),
-                                       source_->samplerate(),
-                                       source_audio_stream.sampleformat,
-                                       source_audio_stream.channel_layout);
-               }
-
-               auto total_num_channels = cpplinq::from(source_audio_streams_)
-                               .select([](const audio_stream_info& info) { return info.num_channels; })
-                               .aggregate(0, std::plus<int>());
-
-               if (total_num_channels > 1 && sink_->wanted_num_audio_streams() > 1)
-                       CASPAR_THROW_EXCEPTION(invalid_operation()
-                                       << msg_info("only one-to-many or many-to-one audio stream conversion supported."));
-
-               std::wstring amerge;
-
-               if (sink_->wanted_num_audio_streams() == 1 && !sink_->wanted_num_channels_per_stream())
-               {
-                       output_pads.emplace_back(
-                                       sink_->supported_samplerates(),
-                                       sink_->supported_sample_formats(),
-                                       std::vector<int64_t>({ av_get_default_channel_layout(total_num_channels) }));
-
-                       if (source_audio_streams_.size() > 1)
-                       {
-                               for (int i = 0; i < source_audio_streams_.size(); ++i)
-                                       amerge += L"[a:" + boost::lexical_cast<std::wstring>(i) + L"]";
-
-                               amerge += L"amerge=inputs=" + boost::lexical_cast<std::wstring>(source_audio_streams_.size());
-                       }
-               }
-
-               std::wstring afilter = u16(afilter_);
-
-               if (!amerge.empty())
-               {
-                       afilter = prepend_filter(u16(afilter), amerge);
-                       afilter += L"[aout:0]";
-               }
-
-               audio_filter_.reset(new audio_filter(input_pads, output_pads, u8(afilter)));
-       }
-
-       void initialize_video_filter_if_needed(const AVFrame& av_frame)
-       {
-               bool changed = false;
-
-               set_if_changed(changed, source_video_stream_.width, av_frame.width);
-               set_if_changed(changed, source_video_stream_.height, av_frame.height);
-               set_if_changed(changed, source_video_stream_.pixelformat, static_cast<AVPixelFormat>(av_frame.format));
-
-               core::field_mode field_mode = core::field_mode::progressive;
-
-               if (av_frame.interlaced_frame)
-                       field_mode = av_frame.top_field_first ? core::field_mode::upper : core::field_mode::lower;
-
-               set_if_changed(changed, source_video_stream_.fieldmode, field_mode);
-
-               if (changed)
-                       initialize_video_filter();
-       }
-
-       void initialize_video_filter()
-       {
-               if (source_video_stream_.fieldmode != core::field_mode::progressive && !filter::is_deinterlacing(u16(vfilter_)))
-                       vfilter_ = u8(append_filter(u16(vfilter_), L"YADIF=1:-1"));
-
-               if (source_video_stream_.height == 480) // NTSC DV
-               {
-                       auto pad_str = L"PAD=" + boost::lexical_cast<std::wstring>(source_video_stream_.width) + L":486:0:2:black";
-                       vfilter_ = u8(append_filter(u16(vfilter_), pad_str));
-               }
-
-               video_filter_.reset(new filter(
-                               source_video_stream_.width,
-                               source_video_stream_.height,
-                               1 / source_->framerate(),
-                               source_->framerate(),
-                               boost::rational<int>(1, 1), // TODO
-                               source_video_stream_.pixelformat,
-                               sink_->supported_pixel_formats(),
-                               vfilter_));
-               sink_->framerate(framerate());
-       }
-};
-
-spl::shared_ptr<struct ffmpeg_pipeline_backend> create_internal_pipeline()
-{
-       return spl::make_shared<ffmpeg_pipeline_backend_internal>();
-}
-
-}}
diff --git a/modules/ffmpeg/ffmpeg_pipeline_backend_internal.h b/modules/ffmpeg/ffmpeg_pipeline_backend_internal.h
deleted file mode 100644 (file)
index d83768f..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
-*
-* This file is part of CasparCG (www.casparcg.com).
-*
-* CasparCG is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation, either version 3 of the License, or
-* (at your option) any later version.
-*
-* CasparCG is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
-*
-* Author: Helge Norberg, helge.norberg@svt.se
-*/
-
-#pragma once
-
-#include <common/memory.h>
-
-#include <boost/rational.hpp>
-
-#include <string>
-#include <functional>
-#include <cstdint>
-
-namespace caspar { namespace ffmpeg {
-
-spl::shared_ptr<struct ffmpeg_pipeline_backend> create_internal_pipeline();
-
-}}
index e92a7c18134859e05370fc65b13e6fb18a5d720e..cd5b377926e30c4ac7ccc71865ce1eb78047a8fc 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
 *
 * This file is part of CasparCG (www.casparcg.com).
 *
 * Author: Robert Nagy, ronag89@gmail.com
 */
 
-#include "../../StdAfx.h"
+#include "../../stdafx.h"
 
 #include "audio_decoder.h"
 
 #include "../util/util.h"
-#include "../input/input.h"
 #include "../../ffmpeg_error.h"
 
 #include <core/video_format.h>
-#include <core/frame/audio_channel_layout.h>
+#include <core/mixer/audio/audio_util.h>
 
-#include <common/log.h>
 #include <common/cache_aligned_vector.h>
 
 #include <queue>
@@ -51,128 +49,119 @@ extern "C"
 
 namespace caspar { namespace ffmpeg {
        
-uint64_t get_ffmpeg_channel_layout(AVCodecContext* dec)
-{
-       auto layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
-       return layout;
-}
-
-struct audio_decoder::impl : boost::noncopyable
+struct audio_decoder::implementation : boost::noncopyable
 {      
-       core::monitor::subject                                                                          monitor_subject_;
-       input&                                                                                                          input_;
-       int                                                                                                                     index_;
-       int                                                                                                                     actual_index_;
-       const core::video_format_desc                                                           format_desc_;
-       const spl::shared_ptr<AVCodecContext>                                           codec_context_          = open_codec(input_.context(), AVMEDIA_TYPE_AUDIO, actual_index_, false);
-
-       std::shared_ptr<SwrContext>                                                                     swr_                            {
-                                                                                                                                                                               swr_alloc_set_opts(
-                                                                                                                                                                                               nullptr,
-                                                                                                                                                                                               create_channel_layout_bitmask(codec_context_->channels),//get_ffmpeg_channel_layout(codec_context_.get()),
-                                                                                                                                                                                               AV_SAMPLE_FMT_S32,
-                                                                                                                                                                                               format_desc_.audio_sample_rate,
-                                                                                                                                                                                               create_channel_layout_bitmask(codec_context_->channels),//get_ffmpeg_channel_layout(codec_context_.get()),
-                                                                                                                                                                                               codec_context_->sample_fmt,
-                                                                                                                                                                                               codec_context_->sample_rate,
-                                                                                                                                                                                               0,
-                                                                                                                                                                                               nullptr),
-                                                                                                                                                                               [](SwrContext* p){swr_free(&p); }
-                                                                                                                                                                       };
-
-       cache_aligned_vector<uint8_t>                                                           buffer_;
-
-       std::shared_ptr<AVPacket>                                                                       current_packet_;
+       int                                                                             index_                          = -1;
+       const spl::shared_ptr<AVCodecContext>   codec_context_;
+       const int                                                               out_samplerate_;
        
+       cache_aligned_vector<int32_t>                   buffer_;
+
+       std::queue<spl::shared_ptr<AVPacket>>   packets_;
+
+       std::shared_ptr<SwrContext>                             swr_                            {
+                                                                                                                                       swr_alloc_set_opts(
+                                                                                                                                                       nullptr,
+                                                                                                                                                       codec_context_->channel_layout
+                                                                                                                                                                       ? codec_context_->channel_layout
+                                                                                                                                                                       : av_get_default_channel_layout(codec_context_->channels),
+                                                                                                                                                       AV_SAMPLE_FMT_S32,
+                                                                                                                                                       out_samplerate_,
+                                                                                                                                                       codec_context_->channel_layout
+                                                                                                                                                                       ? codec_context_->channel_layout
+                                                                                                                                                                       : av_get_default_channel_layout(codec_context_->channels),
+                                                                                                                                                       codec_context_->sample_fmt,
+                                                                                                                                                       codec_context_->sample_rate,
+                                                                                                                                                       0,
+                                                                                                                                                       nullptr),
+                                                                                                                                       [](SwrContext* p)
+                                                                                                                                       {
+                                                                                                                                               swr_free(&p);
+                                                                                                                                       }
+                                                                                                                               };
+
 public:
-       explicit impl(
-                       input& in,
-                       const core::video_format_desc& format_desc,
-                       int audio_stream_index)
-               : input_(in)
-               , index_(audio_stream_index)
-               , actual_index_(input_.get_actual_audio_stream_index(index_))
-               , format_desc_(format_desc)
-               , buffer_(480000 * 4)
-       {
+       explicit implementation(const spl::shared_ptr<AVFormatContext>& context, int out_samplerate)
+               : codec_context_(open_codec(*context, AVMEDIA_TYPE_AUDIO, index_, false))
+               , out_samplerate_(out_samplerate)
+               , buffer_(10 * out_samplerate_ * codec_context_->channels) // 10 seconds of audio
+       {       
                if(!swr_)
-                       CASPAR_THROW_EXCEPTION(bad_alloc());
-
+                       BOOST_THROW_EXCEPTION(bad_alloc());
+               
                THROW_ON_ERROR2(swr_init(swr_.get()), "[audio_decoder]");
+
+               codec_context_->refcounted_frames = 1;
        }
-               
-       std::shared_ptr<AVFrame> poll()
-       {               
-               if(!current_packet_ && !input_.try_pop_audio(current_packet_, index_))
+
+       void push(const std::shared_ptr<AVPacket>& packet)
+       {                       
+               if(!packet)
+                       return;
+
+               if(packet->stream_index == index_ || packet->data == nullptr)
+                       packets_.push(spl::make_shared_ptr(packet));
+       }       
+       
+       std::shared_ptr<core::mutable_audio_buffer> poll()
+       {
+               if(packets_.empty())
                        return nullptr;
-               
-               std::shared_ptr<AVFrame> audio;
+                               
+               auto packet = packets_.front();
 
-               if (!current_packet_->data)
+               if(packet->data == nullptr)
                {
-                       if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)                       
-                               audio = decode(*current_packet_);
-                       
-                       if (!audio)
-                       {
-                               avcodec_flush_buffers(codec_context_.get());
-                               current_packet_.reset();
-                               audio = flush();
-                       }
+                       packets_.pop();
+                       avcodec_flush_buffers(codec_context_.get());
+                       return flush_audio();
                }
-               else
-               {
-                       audio = decode(*current_packet_);
-                       
-                       if(current_packet_->size == 0)
-                               current_packet_.reset();
-               }
-       
+
+               auto audio = decode(*packet);
+
+               if(packet->size == 0)                                   
+                       packets_.pop();
+
                return audio;
        }
 
-       std::shared_ptr<AVFrame> decode(AVPacket& pkt)
-       {               
-               auto frame = create_frame();
-               
+       std::shared_ptr<core::mutable_audio_buffer> decode(AVPacket& pkt)
+       {
+               auto decoded_frame = create_frame();
+
                int got_frame = 0;
-               auto len = THROW_ON_ERROR2(avcodec_decode_audio4(codec_context_.get(), frame.get(), &got_frame, &pkt), "[audio_decoder]");
-                                       
-               if(len == 0)
+               auto len = THROW_ON_ERROR2(avcodec_decode_audio4(codec_context_.get(), decoded_frame.get(), &got_frame, &pkt), "[audio_decoder]");
+
+               if (len == 0)
                {
                        pkt.size = 0;
                        return nullptr;
                }
 
-        pkt.data += len;
-        pkt.size -= len;
+               pkt.data += len;
+               pkt.size -= len;
 
-               if(!got_frame)
+               if (!got_frame)
                        return nullptr;
-                                                       
-               const uint8_t **in      = const_cast<const uint8_t**>(frame->extended_data);
-               uint8_t* out[]          = {buffer_.data()};
 
-               auto channel_samples = swr_convert(swr_.get(), 
-                                                                                       out, static_cast<int>(buffer_.size()) / codec_context_->channels / av_get_bytes_per_sample(AV_SAMPLE_FMT_S32),
-                                                                                       in, frame->nb_samples);
+               const uint8_t **in = const_cast<const uint8_t**>(decoded_frame->extended_data);
+               uint8_t* out[] = { reinterpret_cast<uint8_t*>(buffer_.data()) };
 
-               frame->data[0]          = buffer_.data();
-               frame->linesize[0]      = channel_samples * codec_context_->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S32);
-               frame->nb_samples       = channel_samples;
-               frame->format           = AV_SAMPLE_FMT_S32;
+               const auto channel_samples = swr_convert(
+                               swr_.get(),
+                               out,
+                               static_cast<int>(buffer_.size()) / codec_context_->channels,
+                               in,
+                               decoded_frame->nb_samples);
 
-               monitor_subject_  << core::monitor::message("/file/audio/sample-rate")  % codec_context_->sample_rate
-                                               << core::monitor::message("/file/audio/channels")       % codec_context_->channels
-                                               << core::monitor::message("/file/audio/format")         % u8(av_get_sample_fmt_name(codec_context_->sample_fmt))
-                                               << core::monitor::message("/file/audio/codec")          % u8(codec_context_->codec->long_name);                 
-
-               return frame;
+               return std::make_shared<core::mutable_audio_buffer>(
+                               buffer_.begin(),
+                               buffer_.begin() + channel_samples * decoded_frame->channels);
        }
-       
-       uint32_t nb_frames() const
+
+       bool ready() const
        {
-               return 0;
+               return packets_.size() > 10;
        }
 
        std::wstring print() const
@@ -181,12 +170,12 @@ public:
        }
 };
 
-audio_decoder::audio_decoder(input& input, const core::video_format_desc& format_desc, int audio_stream_index) : impl_(new impl(input, format_desc, audio_stream_index)){}
-audio_decoder::audio_decoder(audio_decoder&& other) : impl_(std::move(other.impl_)){}
-audio_decoder& audio_decoder::operator=(audio_decoder&& other){impl_ = std::move(other.impl_); return *this;}
-std::shared_ptr<AVFrame> audio_decoder::operator()(){return impl_->poll();}
-uint32_t audio_decoder::nb_frames() const{return impl_->nb_frames();}
+audio_decoder::audio_decoder(const spl::shared_ptr<AVFormatContext>& context, int out_samplerate) : impl_(new implementation(context, out_samplerate)){}
+void audio_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}
+bool audio_decoder::ready() const{return impl_->ready();}
+std::shared_ptr<core::mutable_audio_buffer> audio_decoder::poll() { return impl_->poll(); }
+int    audio_decoder::num_channels() const { return impl_->codec_context_->channels; }
+uint64_t audio_decoder::ffmpeg_channel_layout() const { return impl_->codec_context_->channel_layout; }
 std::wstring audio_decoder::print() const{return impl_->print();}
-core::monitor::subject& audio_decoder::monitor_output() { return impl_->monitor_subject_;}
 
 }}
index 99f6e398be358d3342bb7c44b94b1761b28314fd..0deb8292045c7bbb7162f407fc17ef9e0cb2a096 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
 *
 * This file is part of CasparCG (www.casparcg.com).
 *
 
 #pragma once
 
-#include <core/mixer/audio/audio_mixer.h>
-#include <core/monitor/monitor.h>
-
 #include <common/memory.h>
 
-#include <core/fwd.h>
+#include <core/frame/frame.h>
 
 #include <boost/noncopyable.hpp>
 
 struct AVPacket;
+struct AVFrame;
 struct AVFormatContext;
 
 namespace caspar { namespace ffmpeg {
-       
-class audio_decoder : public boost::noncopyable
+
+class audio_decoder : boost::noncopyable
 {
 public:
-       explicit audio_decoder(class input& input, const core::video_format_desc& format_desc, int audio_stream_index);
+       explicit audio_decoder(const spl::shared_ptr<AVFormatContext>& context, int out_samplerate);
        
-       audio_decoder(audio_decoder&& other);
-       audio_decoder& operator=(audio_decoder&& other);
+       bool ready() const;
+       void push(const std::shared_ptr<AVPacket>& packet);
+       std::shared_ptr<core::mutable_audio_buffer> poll();
 
-       std::shared_ptr<AVFrame> operator()();
+       int     num_channels() const;
+       uint64_t ffmpeg_channel_layout() const;
 
-       uint32_t nb_frames() const;
-       
        std::wstring print() const;
-       
-       core::monitor::subject& monitor_output();
-
 private:
-       struct impl;
-       spl::shared_ptr<impl> impl_;
+       struct implementation;
+       spl::shared_ptr<implementation> impl_;
 };
 
-}}
\ No newline at end of file
+}}
index b2d4b0e8b7ceed1db9b443f6a4859f56ff3a38ad..bffae75ed4a9aa05ad7b40ef0d0ae3a3c92d40ce 100644 (file)
 
 #include "ffmpeg_producer.h"
 
-#include "../ffmpeg_pipeline.h"
 #include "../ffmpeg.h"
+#include "../ffmpeg_error.h"
 #include "util/util.h"
+#include "input/input.h"
+#include "audio/audio_decoder.h"
+#include "video/video_decoder.h"
+#include "muxer/frame_muxer.h"
 
 #include <common/param.h>
 #include <common/diagnostics/graph.h>
 #include <core/help/help_sink.h>
 #include <core/producer/media_info/media_info.h>
 #include <core/producer/framerate/framerate_producer.h>
+#include <core/frame/frame_factory.h>
 
 #include <future>
+#include <queue>
 
 namespace caspar { namespace ffmpeg {
-
 struct seek_out_of_range : virtual user_error {};
 
 std::wstring get_relative_or_original(
@@ -70,104 +75,319 @@ std::wstring get_relative_or_original(
 
 struct ffmpeg_producer : public core::frame_producer_base
 {
-       spl::shared_ptr<core::monitor::subject>                 monitor_subject_;
-       ffmpeg_pipeline                                                                 pipeline_;
-       const std::wstring                                                              filename_;
-       const std::wstring                                                              path_relative_to_media_ = get_relative_or_original(filename_, env::media_folder());
-       
-       const spl::shared_ptr<diagnostics::graph>               graph_;
-                                       
-       const core::video_format_desc                                   format_desc_;
-
-       core::constraints                                                               constraints_;
-       
-       core::draw_frame                                                                first_frame_                    = core::draw_frame::empty();
-       core::draw_frame                                                                last_frame_                             = core::draw_frame::empty();
-
-       boost::optional<uint32_t>                                               seek_target_;
-       
+       spl::shared_ptr<core::monitor::subject>                         monitor_subject_;
+       const std::wstring                                                                      filename_;
+       const std::wstring                                                                      path_relative_to_media_         = get_relative_or_original(filename_, env::media_folder());
+
+       FFMPEG_Resource                                                                         resource_type_;
+
+       const spl::shared_ptr<diagnostics::graph>                       graph_;
+       timer                                                                                           frame_timer_;
+
+       const spl::shared_ptr<core::frame_factory>                      frame_factory_;
+
+       std::shared_ptr<void>                                                           initial_logger_disabler_;
+
+       core::constraints                                                                       constraints_;
+
+       input                                                                                           input_;
+       std::unique_ptr<video_decoder>                                          video_decoder_;
+       std::unique_ptr<audio_decoder>                                          audio_decoder_;
+       std::unique_ptr<frame_muxer>                                            muxer_;
+
+       const boost::rational<int>                                                      framerate_;
+       const uint32_t                                                                          start_;
+       const uint32_t                                                                          length_;
+       const bool                                                                                      thumbnail_mode_;
+
+       core::draw_frame                                                                        last_frame_;
+
+       std::queue<std::pair<core::draw_frame, uint32_t>>       frame_buffer_;
+
+       int64_t                                                                                         frame_number_                           = 0;
+       uint32_t                                                                                        file_frame_number_                      = 0;
 public:
        explicit ffmpeg_producer(
-                       ffmpeg_pipeline pipeline, 
-                       const core::video_format_desc& format_desc)
-               : pipeline_(std::move(pipeline))
-               , filename_(u16(pipeline_.source_filename()))
-               , format_desc_(format_desc)
+                       const spl::shared_ptr<core::frame_factory>& frame_factory,
+                       const core::video_format_desc& format_desc,
+                       const std::wstring& filename,
+                       FFMPEG_Resource resource_type,
+                       const std::wstring& filter,
+                       bool loop,
+                       uint32_t start,
+                       uint32_t length,
+                       bool thumbnail_mode,
+                       const std::wstring& custom_channel_order,
+                       const ffmpeg_options& vid_params)
+               : filename_(filename)
+               , resource_type_(resource_type)
+               , frame_factory_(frame_factory)
+               , initial_logger_disabler_(temporary_enable_quiet_logging_for_thread(thumbnail_mode))
+               , input_(graph_, filename_, resource_type, loop, start, length, thumbnail_mode, vid_params)
+               , framerate_(read_framerate(*input_.context(), format_desc.framerate))
+               , start_(start)
+               , length_(length)
+               , thumbnail_mode_(thumbnail_mode)
+               , last_frame_(core::draw_frame::empty())
+               , frame_number_(0)
        {
                graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));
-               graph_->set_color("underflow", diagnostics::color(0.6f, 0.3f, 0.9f));   
+               graph_->set_color("underflow", diagnostics::color(0.6f, 0.3f, 0.9f));
                diagnostics::register_graph(graph_);
 
-               pipeline_.graph(graph_);
-               pipeline_.start();
+               try
+               {
+                       video_decoder_.reset(new video_decoder(input_.context()));
+                       if (!thumbnail_mode_)
+                               CASPAR_LOG(info) << print() << L" " << video_decoder_->print();
 
-               while ((first_frame_ = pipeline_.try_pop_frame()) == core::draw_frame::late())
-                       boost::this_thread::sleep_for(boost::chrono::milliseconds(1));
+                       constraints_.width.set(video_decoder_->width());
+                       constraints_.height.set(video_decoder_->height());
+               }
+               catch (averror_stream_not_found&)
+               {
+                       //CASPAR_LOG(warning) << print() << " No video-stream found. Running without video.";
+               }
+               catch (...)
+               {
+                       if (!thumbnail_mode_)
+                       {
+                               CASPAR_LOG_CURRENT_EXCEPTION();
+                               CASPAR_LOG(warning) << print() << "Failed to open video-stream. Running without video.";
+                       }
+               }
 
-               constraints_.width.set(pipeline_.width());
-               constraints_.height.set(pipeline_.height());
+               auto channel_layout = core::audio_channel_layout::invalid();
 
-               if (is_logging_quiet_for_thread())
-                       CASPAR_LOG(debug) << print() << L" Initialized";
-               else
-                       CASPAR_LOG(info) << print() << L" Initialized";
+               if (!thumbnail_mode_)
+               {
+                       try
+                       {
+                               audio_decoder_.reset(new audio_decoder(input_.context(), format_desc.audio_sample_rate));
+                               channel_layout = get_audio_channel_layout(
+                                               audio_decoder_->num_channels(),
+                                               audio_decoder_->ffmpeg_channel_layout(),
+                                               custom_channel_order);
+                               CASPAR_LOG(info) << print() << L" " << audio_decoder_->print();
+                       }
+                       catch (averror_stream_not_found&)
+                       {
+                               //CASPAR_LOG(warning) << print() << " No audio-stream found. Running without audio.";
+                       }
+                       catch (...)
+                       {
+                               CASPAR_LOG_CURRENT_EXCEPTION();
+                               CASPAR_LOG(warning) << print() << " Failed to open audio-stream. Running without audio.";
+                       }
+               }
+
+               if (!video_decoder_ && !audio_decoder_)
+                       CASPAR_THROW_EXCEPTION(averror_stream_not_found() << msg_info("No streams found"));
+
+               muxer_.reset(new frame_muxer(framerate_, frame_factory, format_desc, channel_layout, filter, true));
        }
 
        // frame_producer
-       
+
        core::draw_frame receive_impl() override
-       {                               
-               auto frame = core::draw_frame::late();
-               
-               caspar::timer frame_timer;
-               
-               auto decoded_frame = first_frame_;
-
-               if (decoded_frame == core::draw_frame::empty())
-                       decoded_frame = pipeline_.try_pop_frame();
-               else
-                       first_frame_ = core::draw_frame::empty();
+       {
+               return render_frame().first;
+       }
+
+       core::draw_frame last_frame() override
+       {
+               return core::draw_frame::still(last_frame_);
+       }
+
+       core::constraints& pixel_constraints() override
+       {
+               return constraints_;
+       }
+
+       double out_fps() const
+       {
+               auto out_framerate      = muxer_->out_framerate();
+               auto fps                        = static_cast<double>(out_framerate.numerator()) / static_cast<double>(out_framerate.denominator());
+
+               return fps;
+       }
 
-               if (decoded_frame == core::draw_frame::empty())
-                       frame = core::draw_frame::still(last_frame_);
-               else if (decoded_frame != core::draw_frame::late())
-                       last_frame_ = frame = core::draw_frame(std::move(decoded_frame));
-               else if (pipeline_.started())
-                       graph_->set_tag(diagnostics::tag_severity::WARNING, "underflow");
+       std::pair<core::draw_frame, uint32_t> render_frame()
+       {
+               frame_timer_.restart();
+               auto disable_logging = temporary_enable_quiet_logging_for_thread(thumbnail_mode_);
+
+               for (int n = 0; n < 16 && frame_buffer_.size() < 2; ++n)
+                       try_decode_frame();
+
+               graph_->set_value("frame-time", frame_timer_.elapsed() * out_fps() *0.5);
+
+               if (frame_buffer_.empty())
+               {
+                       if (input_.eof())
+                       {
+                               send_osc();
+                               return std::make_pair(last_frame(), -1);
+                       }
+                       else if (resource_type_ == FFMPEG_Resource::FFMPEG_FILE)
+                       {
+                               graph_->set_tag(diagnostics::tag_severity::WARNING, "underflow");
+                               send_osc();
+                               return std::make_pair(core::draw_frame::late(), -1);
+                       }
+                       else
+                       {
+                               send_osc();
+                               return std::make_pair(last_frame(), -1);
+                       }
+               }
+
+               auto frame = frame_buffer_.front();
+               frame_buffer_.pop();
+
+               ++frame_number_;
+               file_frame_number_ = frame.second;
 
                graph_->set_text(print());
 
-               graph_->set_value("frame-time", frame_timer.elapsed()*format_desc_.fps*0.5);
-               *monitor_subject_
-                               << core::monitor::message("/profiler/time")     % frame_timer.elapsed() % (1.0/format_desc_.fps);                       
-               *monitor_subject_
-                               << core::monitor::message("/file/frame")        % static_cast<int32_t>(pipeline_.last_frame())
-                                                                                                                       % static_cast<int32_t>(pipeline_.length())
-                               << core::monitor::message("/file/fps")          % boost::rational_cast<double>(pipeline_.framerate())
-                               << core::monitor::message("/file/path")         % path_relative_to_media_
-                               << core::monitor::message("/loop")                      % pipeline_.loop();
+               last_frame_ = frame.first;
+
+               send_osc();
 
                return frame;
        }
 
-       core::draw_frame last_frame() override
+       void send_osc()
        {
-               return core::draw_frame::still(last_frame_);
+               double fps = static_cast<double>(framerate_.numerator()) / static_cast<double>(framerate_.denominator());
+
+               *monitor_subject_       << core::monitor::message("/profiler/time")             % frame_timer_.elapsed() % (1.0/out_fps());
+
+               *monitor_subject_       << core::monitor::message("/file/time")                 % (file_frame_number()/fps)
+                                                                                                                                                       % (file_nb_frames()/fps)
+                                                       << core::monitor::message("/file/frame")                        % static_cast<int32_t>(file_frame_number())
+                                                                                                                                                       % static_cast<int32_t>(file_nb_frames())
+                                                       << core::monitor::message("/file/fps")                  % fps
+                                                       << core::monitor::message("/file/path")                 % path_relative_to_media_
+                                                       << core::monitor::message("/loop")                              % input_.loop();
        }
 
-       core::constraints& pixel_constraints() override
+       core::draw_frame render_specific_frame(uint32_t file_position)
        {
-               return constraints_;
+               // Some trial and error and undeterministic stuff here
+               static const int NUM_RETRIES = 32;
+
+               if (file_position > 0) // Assume frames are requested in sequential order,
+                                          // therefore no seeking should be necessary for the first frame.
+               {
+                       input_.seek(file_position > 1 ? file_position - 2: file_position).get();
+                       boost::this_thread::sleep(boost::posix_time::milliseconds(40));
+               }
+
+               for (int i = 0; i < NUM_RETRIES; ++i)
+               {
+                       boost::this_thread::sleep(boost::posix_time::milliseconds(40));
+
+                       auto frame = render_frame();
+
+                       if (frame.second == std::numeric_limits<uint32_t>::max())
+                       {
+                               // Retry
+                               continue;
+                       }
+                       else if (frame.second == file_position + 1 || frame.second == file_position)
+                               return frame.first;
+                       else if (frame.second > file_position + 1)
+                       {
+                               CASPAR_LOG(trace) << print() << L" " << frame.second << L" received, wanted " << file_position + 1;
+                               int64_t adjusted_seek = file_position - (frame.second - file_position + 1);
+
+                               if (adjusted_seek > 1 && file_position > 0)
+                               {
+                                       CASPAR_LOG(trace) << print() << L" adjusting to " << adjusted_seek;
+                                       input_.seek(static_cast<uint32_t>(adjusted_seek) - 1).get();
+                                       boost::this_thread::sleep(boost::posix_time::milliseconds(40));
+                               }
+                               else
+                                       return frame.first;
+                       }
+               }
+
+               CASPAR_LOG(trace) << print() << " Giving up finding frame at " << file_position;
+               return core::draw_frame::empty();
+       }
+
+       core::draw_frame create_thumbnail_frame()
+       {
+               auto total_frames = nb_frames();
+               auto grid = env::properties().get(L"configuration.thumbnails.video-grid", 2);
+
+               if (grid < 1)
+               {
+                       CASPAR_LOG(error) << L"configuration/thumbnails/video-grid cannot be less than 1";
+                       BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("configuration/thumbnails/video-grid cannot be less than 1"));
+               }
+
+               if (grid == 1)
+               {
+                       return render_specific_frame(total_frames / 2);
+               }
+
+               auto num_snapshots = grid * grid;
+
+               std::vector<core::draw_frame> frames;
+
+               for (int i = 0; i < num_snapshots; ++i)
+               {
+                       int x = i % grid;
+                       int y = i / grid;
+                       int desired_frame;
+
+                       if (i == 0)
+                               desired_frame = 0; // first
+                       else if (i == num_snapshots - 1)
+                               desired_frame = total_frames - 1; // last
+                       else
+                               // evenly distributed across the file.
+                               desired_frame = total_frames * i / (num_snapshots - 1);
+
+                       auto frame = render_specific_frame(desired_frame);
+                       frame.transform().image_transform.fill_scale[0] = 1.0 / static_cast<double>(grid);
+                       frame.transform().image_transform.fill_scale[1] = 1.0 / static_cast<double>(grid);
+                       frame.transform().image_transform.fill_translation[0] = 1.0 / static_cast<double>(grid) * x;
+                       frame.transform().image_transform.fill_translation[1] = 1.0 / static_cast<double>(grid) * y;
+
+                       frames.push_back(frame);
+               }
+
+               return core::draw_frame(frames);
+       }
+
+       uint32_t file_frame_number() const
+       {
+               return video_decoder_ ? video_decoder_->file_frame_number() : 0;
        }
 
        uint32_t nb_frames() const override
        {
-               if (pipeline_.loop())
+               if (resource_type_ == FFMPEG_Resource::FFMPEG_DEVICE || resource_type_ == FFMPEG_Resource::FFMPEG_STREAM || input_.loop())
                        return std::numeric_limits<uint32_t>::max();
 
-               return pipeline_.length();
+               uint32_t nb_frames = file_nb_frames();
+
+               nb_frames = std::min(length_, nb_frames - start_);
+               nb_frames = muxer_->calc_nb_frames(nb_frames);
+
+               return nb_frames;
+       }
+
+       uint32_t file_nb_frames() const
+       {
+               uint32_t file_nb_frames = 0;
+               file_nb_frames = std::max(file_nb_frames, video_decoder_ ? video_decoder_->nb_frames() : 0);
+               return file_nb_frames;
        }
-               
+
        std::future<std::wstring> call(const std::vector<std::wstring>& params) override
        {
                static const boost::wregex loop_exp(LR"(LOOP\s*(?<VALUE>\d?)?)", boost::regex::icase);
@@ -176,47 +396,47 @@ public:
                static const boost::wregex start_exp(LR"(START\\s+(?<VALUE>\\d+)?)", boost::regex::icase);
 
                auto param = boost::algorithm::join(params, L" ");
-               
+
                std::wstring result;
-                       
+
                boost::wsmatch what;
                if(boost::regex_match(param, what, loop_exp))
                {
                        auto value = what["VALUE"].str();
-                       if(!value.empty())
-                               pipeline_.loop(boost::lexical_cast<bool>(value));
-                       result = boost::lexical_cast<std::wstring>(pipeline_.loop());
+                       if (!value.empty())
+                               input_.loop(boost::lexical_cast<bool>(value));
+                       result = boost::lexical_cast<std::wstring>(input_.loop());
                }
                else if(boost::regex_match(param, what, seek_exp))
                {
                        auto value = what["VALUE"].str();
-                       pipeline_.seek(boost::lexical_cast<uint32_t>(value));
+                       input_.seek(boost::lexical_cast<uint32_t>(value));
                }
                else if(boost::regex_match(param, what, length_exp))
                {
                        auto value = what["VALUE"].str();
                        if(!value.empty())
-                               pipeline_.length(boost::lexical_cast<uint32_t>(value));                 
-                       result = boost::lexical_cast<std::wstring>(pipeline_.length());
+                               input_.length(boost::lexical_cast<uint32_t>(value));
+                       result = boost::lexical_cast<std::wstring>(input_.length());
                }
                else if(boost::regex_match(param, what, start_exp))
                {
                        auto value = what["VALUE"].str();
                        if(!value.empty())
-                               pipeline_.start_frame(boost::lexical_cast<uint32_t>(value));
-                       result = boost::lexical_cast<std::wstring>(pipeline_.start_frame());
+                               input_.start(boost::lexical_cast<uint32_t>(value));
+                       result = boost::lexical_cast<std::wstring>(input_.start());
                }
                else
                        CASPAR_THROW_EXCEPTION(invalid_argument());
 
                return make_ready_future(std::move(result));
        }
-                               
+
        std::wstring print() const override
        {
-               return L"ffmpeg[" + boost::filesystem::path(filename_).filename().wstring() + L"|" 
-                                                 + print_mode() + L"|" 
-                                                 + boost::lexical_cast<std::wstring>(pipeline_.last_frame()) + L"/" + boost::lexical_cast<std::wstring>(pipeline_.length()) + L"]";
+               return L"ffmpeg[" + boost::filesystem::path(filename_).filename().wstring() + L"|"
+                                                 + print_mode() + L"|"
+                                                 + boost::lexical_cast<std::wstring>(file_frame_number_) + L"/" + boost::lexical_cast<std::wstring>(file_nb_frames()) + L"]";
        }
 
        std::wstring name() const override
@@ -227,20 +447,21 @@ public:
        boost::property_tree::wptree info() const override
        {
                boost::property_tree::wptree info;
-               info.add(L"type",                               L"ffmpeg");
+               info.add(L"type",                               L"ffmpeg-producer");
                info.add(L"filename",                   filename_);
-               info.add(L"width",                              pipeline_.width());
-               info.add(L"height",                             pipeline_.height());
-               info.add(L"progressive",                pipeline_.progressive());
-               info.add(L"fps",                                boost::rational_cast<double>(pipeline_.framerate()));
-               info.add(L"loop",                               pipeline_.loop());
-               info.add(L"frame-number",               frame_number());
-               info.add(L"nb-frames",                  nb_frames());
-               info.add(L"file-frame-number",  pipeline_.last_frame());
-               info.add(L"file-nb-frames",             pipeline_.length());
+               info.add(L"width",                              video_decoder_ ? video_decoder_->width() : 0);
+               info.add(L"height",                             video_decoder_ ? video_decoder_->height() : 0);
+               info.add(L"progressive",                video_decoder_ ? video_decoder_->is_progressive() : false);
+               info.add(L"fps",                                static_cast<double>(framerate_.numerator()) / static_cast<double>(framerate_.denominator()));
+               info.add(L"loop",                               input_.loop());
+               info.add(L"frame-number",               frame_number_);
+               auto nb_frames2 = nb_frames();
+               info.add(L"nb-frames",                  nb_frames2 == std::numeric_limits<int64_t>::max() ? -1 : nb_frames2);
+               info.add(L"file-frame-number",  file_frame_number_);
+               info.add(L"file-nb-frames",             file_nb_frames());
                return info;
        }
-       
+
        core::monitor::subject& monitor_output()
        {
                return *monitor_subject_;
@@ -250,18 +471,82 @@ public:
 
        std::wstring print_mode() const
        {
-               return ffmpeg::print_mode(
-                               pipeline_.width(),
-                               pipeline_.height(),
-                               boost::rational_cast<double>(pipeline_.framerate()), 
-                               !pipeline_.progressive());
+               return video_decoder_ ? ffmpeg::print_mode(
+                               video_decoder_->width(),
+                               video_decoder_->height(),
+                               static_cast<double>(framerate_.numerator()) / static_cast<double>(framerate_.denominator()),
+                               !video_decoder_->is_progressive()) : L"";
+       }
+
+       void try_decode_frame()
+       {
+               std::shared_ptr<AVPacket> pkt;
+
+               for (int n = 0; n < 32 && ((video_decoder_ && !video_decoder_->ready()) || (audio_decoder_ && !audio_decoder_->ready())) && input_.try_pop(pkt); ++n)
+               {
+                       if (video_decoder_)
+                               video_decoder_->push(pkt);
+                       if (audio_decoder_)
+                               audio_decoder_->push(pkt);
+               }
+
+               std::shared_ptr<AVFrame>                                        video;
+               std::shared_ptr<core::mutable_audio_buffer>     audio;
+
+               tbb::parallel_invoke(
+               [&]
+               {
+                       if (!muxer_->video_ready() && video_decoder_)
+                               video = video_decoder_->poll();
+               },
+               [&]
+               {
+                       if (!muxer_->audio_ready() && audio_decoder_)
+                               audio = audio_decoder_->poll();
+               });
+
+               muxer_->push(video);
+               muxer_->push(audio);
+
+               if (!audio_decoder_)
+               {
+                       if(video == flush_video())
+                               muxer_->push(flush_audio());
+                       else if(!muxer_->audio_ready())
+                               muxer_->push(empty_audio());
+               }
+
+               if (!video_decoder_)
+               {
+                       if(audio == flush_audio())
+                               muxer_->push(flush_video());
+                       else if(!muxer_->video_ready())
+                               muxer_->push(empty_video());
+               }
+
+               uint32_t file_frame_number = 0;
+               file_frame_number = std::max(file_frame_number, video_decoder_ ? video_decoder_->file_frame_number() : 0);
+               //file_frame_number = std::max(file_frame_number, audio_decoder_ ? audio_decoder_->file_frame_number() : 0);
+
+               for (auto frame = muxer_->poll(); frame != core::draw_frame::empty(); frame = muxer_->poll())
+                       frame_buffer_.push(std::make_pair(frame, file_frame_number));
+       }
+
+       bool audio_only() const
+       {
+               return !video_decoder_;
+       }
+
+       boost::rational<int> get_out_framerate() const
+       {
+               return muxer_->out_framerate();
        }
 };
 
 void describe_producer(core::help_sink& sink, const core::help_repository& repo)
 {
        sink.short_description(L"A producer for playing media files supported by FFmpeg.");
-       sink.syntax(L"[clip:string] {[loop:LOOP]} {START,SEEK [start:int]} {LENGTH [start:int]} {FILTER [filter:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
+       sink.syntax(L"[clip:string] {[loop:LOOP]} {SEEK [start:int]} {LENGTH [start:int]} {FILTER [filter:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
        sink.para()
                ->text(L"The FFmpeg Producer can play all media that FFmpeg can play, which includes many ")
                ->text(L"QuickTime video codec such as Animation, PNG, PhotoJPEG, MotionJPEG, as well as ")
@@ -278,9 +563,9 @@ void describe_producer(core::help_sink& sink, const core::help_repository& repo)
        sink.para()->text(L"Examples:");
        sink.example(L">> PLAY 1-10 folder/clip", L"to play all frames in a clip and stop at the last frame.");
        sink.example(L">> PLAY 1-10 folder/clip LOOP", L"to loop a clip between the first frame and the last frame.");
-       sink.example(L">> PLAY 1-10 folder/clip LOOP START 10", L"to loop a clip between frame 10 and the last frame.");
-       sink.example(L">> PLAY 1-10 folder/clip LOOP START 10 LENGTH 50", L"to loop a clip between frame 10 and frame 60.");
-       sink.example(L">> PLAY 1-10 folder/clip START 10 LENGTH 50", L"to play frames 10-60 in a clip and stop.");
+       sink.example(L">> PLAY 1-10 folder/clip LOOP SEEK 10", L"to loop a clip between frame 10 and the last frame.");
+       sink.example(L">> PLAY 1-10 folder/clip LOOP SEEK 10 LENGTH 50", L"to loop a clip between frame 10 and frame 60.");
+       sink.example(L">> PLAY 1-10 folder/clip SEEK 10 LENGTH 50", L"to play frames 10-60 in a clip and stop.");
        sink.example(L">> PLAY 1-10 folder/clip FILTER yadif=1,-1", L"to deinterlace the video.");
        sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT film", L"given the defaults in casparcg.config this will specifies that the clip has 6 audio channels of the type 5.1 and that they are in the order FL FC FR BL BR LFE regardless of what ffmpeg says.");
        sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT \"5.1:LFE FL FC FR BL BR\"", L"specifies that the clip has 6 audio channels of the type 5.1 and that they are in the specified order regardless of what ffmpeg says.");
@@ -288,6 +573,7 @@ void describe_producer(core::help_sink& sink, const core::help_repository& repo)
        sink.example(L">> CALL 1-10 LOOP 1");
        sink.example(L">> CALL 1-10 START 10");
        sink.example(L">> CALL 1-10 LENGTH 50");
+       sink.example(L">> CALL 1-10 SEEK 30");
        core::describe_framerate_producer(sink);
 }
 
@@ -296,37 +582,88 @@ spl::shared_ptr<core::frame_producer> create_producer(
                const std::vector<std::wstring>& params,
                const spl::shared_ptr<core::media_info_repository>& info_repo)
 {
-       auto filename = probe_stem(env::media_folder() + L"/" + params.at(0), false);
+       // Infer the resource type from the resource_name
+       auto resource_type      = FFMPEG_Resource::FFMPEG_FILE;
+       auto tokens                     = protocol_split(params.at(0));
+       auto filename           = params.at(0);
+
+       if (!tokens[0].empty())
+       {
+               if (tokens[0] == L"dshow")
+               {
+                       // Camera
+                       resource_type   = FFMPEG_Resource::FFMPEG_DEVICE;
+                       filename                = tokens[1];
+               }
+               else
+               {
+                       // Stream
+                       resource_type   = FFMPEG_Resource::FFMPEG_STREAM;
+                       filename                = params.at(0);
+               }
+       }
+       else
+       {
+               // File
+               resource_type   = FFMPEG_Resource::FFMPEG_FILE;
+               filename                = probe_stem(env::media_folder() + L"/" + params.at(0), false);
+       }
 
-       if(filename.empty())
+       if (filename.empty())
                return core::frame_producer::empty();
-       
-       auto pipeline = ffmpeg_pipeline()
-                       .from_file(u8(filename))
-                       .loop(contains_param(L"LOOP", params))
-                       .start_frame(get_param(L"START", params, get_param(L"SEEK", params, static_cast<uint32_t>(0))))
-                       .length(get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max()))
-                       .vfilter(u8(get_param(L"FILTER", params, L"")))
-                       .to_memory(dependencies.frame_factory, dependencies.format_desc);
-
-       auto producer = create_destroy_proxy(spl::make_shared_ptr(std::make_shared<ffmpeg_producer>(
-                       pipeline,
-                       dependencies.format_desc)));
-
-       if (pipeline.framerate() == -1) // Audio only.
-               return producer;
-
-       auto source_framerate = pipeline.framerate();
-       auto target_framerate = boost::rational<int>(
-                       dependencies.format_desc.time_scale,
-                       dependencies.format_desc.duration);
-
-       return core::create_framerate_producer(
+
+       auto loop                                       = contains_param(L"LOOP",               params);
+       auto start                                      = get_param(L"SEEK",                    params, static_cast<uint32_t>(0));
+       auto length                                     = get_param(L"LENGTH",                  params, std::numeric_limits<uint32_t>::max());
+       auto filter_str                         = get_param(L"FILTER",                  params, L"");
+       auto custom_channel_order       = get_param(L"CHANNEL_LAYOUT",  params, L"");
+
+       boost::ireplace_all(filter_str, L"DEINTERLACE_BOB",     L"YADIF=1:-1");
+       boost::ireplace_all(filter_str, L"DEINTERLACE_LQ",      L"SEPARATEFIELDS");
+       boost::ireplace_all(filter_str, L"DEINTERLACE",         L"YADIF=0:-1");
+
+       ffmpeg_options vid_params;
+       bool haveFFMPEGStartIndicator = false;
+       for (size_t i = 0; i < params.size() - 1; ++i)
+       {
+               if (!haveFFMPEGStartIndicator && params[i] == L"--")
+               {
+                       haveFFMPEGStartIndicator = true;
+                       continue;
+               }
+               if (haveFFMPEGStartIndicator)
+               {
+                       auto name = u8(params.at(i++)).substr(1);
+                       auto value = u8(params.at(i));
+                       vid_params.push_back(std::make_pair(name, value));
+               }
+       }
+
+       auto producer = spl::make_shared<ffmpeg_producer>(
+                       dependencies.frame_factory,
+                       dependencies.format_desc,
+                       filename,
+                       resource_type,
+                       filter_str,
+                       loop,
+                       start,
+                       length,
+                       false,
+                       custom_channel_order,
+                       vid_params);
+
+       if (producer->audio_only())
+               return core::create_destroy_proxy(producer);
+
+       auto get_source_framerate       = [=] { return producer->get_out_framerate(); };
+       auto target_framerate           = dependencies.format_desc.framerate;
+
+       return core::create_destroy_proxy(core::create_framerate_producer(
                        producer,
-                       source_framerate,
+                       get_source_framerate,
                        target_framerate,
                        dependencies.format_desc.field_mode,
-                       dependencies.format_desc.audio_cadence);
+                       dependencies.format_desc.audio_cadence));
 }
 
 core::draw_frame create_thumbnail_frame(
@@ -340,67 +677,25 @@ core::draw_frame create_thumbnail_frame(
        if (filename.empty())
                return core::draw_frame::empty();
 
-       auto render_specific_frame = [&](std::int64_t frame_num)
-       {
-               auto pipeline = ffmpeg_pipeline()
-                       .from_file(u8(filename))
-                       .start_frame(static_cast<uint32_t>(frame_num))
-                       .to_memory(dependencies.frame_factory, dependencies.format_desc);
-               pipeline.start();
-
-               auto frame = core::draw_frame::empty();
-               while ((frame = pipeline.try_pop_frame()) == core::draw_frame::late())
-                       boost::this_thread::sleep_for(boost::chrono::milliseconds(1));
-               return frame;
-       };
-
-       auto info = info_repo->get(filename);
-
-       if (!info)
-               return core::draw_frame::empty();
-
-       auto total_frames = info->duration;
-       auto grid = env::properties().get(L"configuration.thumbnails.video-grid", 2);
-
-       if (grid < 1)
-       {
-               CASPAR_LOG(error) << L"configuration/thumbnails/video-grid cannot be less than 1";
-               BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("configuration/thumbnails/video-grid cannot be less than 1"));
-       }
-
-       if (grid == 1)
-       {
-               return render_specific_frame(total_frames / 2);
-       }
-
-       auto num_snapshots = grid * grid;
-
-       std::vector<core::draw_frame> frames;
-
-       for (int i = 0; i < num_snapshots; ++i)
-       {
-               int x = i % grid;
-               int y = i / grid;
-               std::int64_t desired_frame;
-
-               if (i == 0)
-                       desired_frame = 0; // first
-               else if (i == num_snapshots - 1)
-                       desired_frame = total_frames - 2; // last
-               else
-                       // evenly distributed across the file.
-                       desired_frame = total_frames * i / (num_snapshots - 1);
-
-               auto frame = render_specific_frame(desired_frame);
-               frame.transform().image_transform.fill_scale[0] = 1.0 / static_cast<double>(grid);
-               frame.transform().image_transform.fill_scale[1] = 1.0 / static_cast<double>(grid);
-               frame.transform().image_transform.fill_translation[0] = 1.0 / static_cast<double>(grid) * x;
-               frame.transform().image_transform.fill_translation[1] = 1.0 / static_cast<double>(grid) * y;
-
-               frames.push_back(frame);
-       }
-
-       return core::draw_frame(frames);
+       auto loop               = false;
+       auto start              = 0;
+       auto length             = std::numeric_limits<uint32_t>::max();
+       auto filter_str = L"";
+
+       ffmpeg_options vid_params;
+       auto producer = spl::make_shared<ffmpeg_producer>(
+                       dependencies.frame_factory,
+                       dependencies.format_desc,
+                       filename,
+                       FFMPEG_Resource::FFMPEG_FILE,
+                       filter_str,
+                       loop,
+                       start,
+                       length,
+                       true,
+                       L"",
+                       vid_params);
+
+       return producer->create_thumbnail_frame();
 }
-
 }}
index e562fa377ba5fc2318a698cee0a9bfd3bc82c8c8..e07bbcdd173fb78627dd9d63ffbf78b5a78f7e38 100644 (file)
@@ -171,45 +171,36 @@ struct audio_filter::implementation
                        std::vector<AVFilterContext*>& source_contexts,
                        std::vector<AVFilterContext*>& sink_contexts)
        {
-               try
-               {
-                       AVFilterInOut* outputs  = nullptr;
-                       AVFilterInOut* inputs   = nullptr;
+               AVFilterInOut* outputs  = nullptr;
+               AVFilterInOut* inputs   = nullptr;
 
-                       FF(avfilter_graph_parse2(
-                                       &graph,
-                                       filtergraph.c_str(),
-                                       &inputs,
-                                       &outputs));
+               FF(avfilter_graph_parse2(
+                               &graph,
+                               filtergraph.c_str(),
+                               &inputs,
+                               &outputs));
 
-                       // Workaround because outputs and inputs are not filled in for some reason
-                       for (unsigned i = 0; i < graph.nb_filters; ++i)
-                       {
-                               auto filter = graph.filters[i];
+               // Workaround because outputs and inputs are not filled in for some reason
+               for (unsigned i = 0; i < graph.nb_filters; ++i)
+               {
+                       auto filter = graph.filters[i];
 
-                               if (std::string(filter->filter->name) == "abuffer")
-                                       source_contexts.push_back(filter);
+                       if (std::string(filter->filter->name) == "abuffer")
+                               source_contexts.push_back(filter);
 
-                               if (std::string(filter->filter->name) == "abuffersink")
-                                       sink_contexts.push_back(filter);
-                       }
+                       if (std::string(filter->filter->name) == "abuffersink")
+                               sink_contexts.push_back(filter);
+               }
 
-                       for (AVFilterInOut* iter = inputs; iter; iter = iter->next)
-                               source_contexts.push_back(iter->filter_ctx);
+               for (AVFilterInOut* iter = inputs; iter; iter = iter->next)
+                       source_contexts.push_back(iter->filter_ctx);
 
-                       for (AVFilterInOut* iter = outputs; iter; iter = iter->next)
-                               sink_contexts.push_back(iter->filter_ctx);
+               for (AVFilterInOut* iter = outputs; iter; iter = iter->next)
+                       sink_contexts.push_back(iter->filter_ctx);
 
-                       FF(avfilter_graph_config(
-                               &graph, 
-                               nullptr));
-               }
-               catch(...)
-               {
-                       //avfilter_inout_free(&outputs);
-                       //avfilter_inout_free(&inputs);
-                       throw;
-               }
+               FF(avfilter_graph_config(
+                       &graph, 
+                       nullptr));
        }
 
        void push(int input_pad_id, const std::shared_ptr<AVFrame>& src_av_frame)
index 537f2fb59b334cb7660230f549b853c267383a07..aa83cf36662584a583fd758a55b3850c03d0a53b 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "../../ffmpeg_error.h"
 #include "../../ffmpeg.h"
+#include "../util/util.h"
 
 #include <common/assert.h>
 #include <common/except.h>
@@ -43,7 +44,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #include <libavutil/avutil.h>
        #include <libavutil/imgutils.h>
@@ -58,7 +59,6 @@ extern "C"
 #endif
 
 namespace caspar { namespace ffmpeg {
-       
 struct filter::implementation
 {
        std::string                                                             filtergraph_;
@@ -68,7 +68,7 @@ struct filter::implementation
     AVFilterContext*                                           video_graph_out_;
 
        std::queue<std::shared_ptr<AVFrame>>    fast_path_;
-               
+
        implementation(
                        int in_width,
                        int in_height,
@@ -77,7 +77,8 @@ struct filter::implementation
                        boost::rational<int> in_sample_aspect_ratio,
                        AVPixelFormat in_pix_fmt,
                        std::vector<AVPixelFormat> out_pix_fmts,
-                       const std::string& filtergraph) 
+                       const std::string& filtergraph,
+                       bool multithreaded)
                : filtergraph_(boost::to_lower_copy(filtergraph))
        {
                if(out_pix_fmts.empty())
@@ -99,66 +100,73 @@ struct filter::implementation
                out_pix_fmts.push_back(AV_PIX_FMT_NONE);
 
                video_graph_.reset(
-                       avfilter_graph_alloc(), 
+                       avfilter_graph_alloc(),
                        [](AVFilterGraph* p)
                        {
                                avfilter_graph_free(&p);
                        });
-               
-               video_graph_->nb_threads  = 0;
-               video_graph_->thread_type = AVFILTER_THREAD_SLICE;
-                               
+
+               if (multithreaded)
+               {
+                       video_graph_->nb_threads        = 0;
+                       video_graph_->thread_type       = AVFILTER_THREAD_SLICE;
+               }
+               else
+               {
+                       video_graph_->nb_threads        = 1;
+               }
+
                const auto vsrc_options = (boost::format("video_size=%1%x%2%:pix_fmt=%3%:time_base=%4%/%5%:pixel_aspect=%6%/%7%:frame_rate=%8%/%9%")
                        % in_width % in_height
                        % in_pix_fmt
                        % in_time_base.numerator() % in_time_base.denominator()
                        % in_sample_aspect_ratio.numerator() % in_sample_aspect_ratio.denominator()
                        % in_frame_rate.numerator() % in_frame_rate.denominator()).str();
-                                       
-               AVFilterContext* filt_vsrc = nullptr;                   
+
+               AVFilterContext* filt_vsrc = nullptr;
                FF(avfilter_graph_create_filter(
                        &filt_vsrc,
-                       avfilter_get_by_name("buffer"), 
+                       avfilter_get_by_name("buffer"),
                        "filter_buffer",
-                       vsrc_options.c_str(), 
-                       nullptr, 
+                       vsrc_options.c_str(),
+                       nullptr,
                        video_graph_.get()));
-                               
+
                AVFilterContext* filt_vsink = nullptr;
                FF(avfilter_graph_create_filter(
                        &filt_vsink,
-                       avfilter_get_by_name("buffersink"), 
+                       avfilter_get_by_name("buffersink"),
                        "filter_buffersink",
-                       nullptr, 
-                       nullptr, 
+                       nullptr,
+                       nullptr,
                        video_graph_.get()));
-               
+
 #pragma warning (push)
 #pragma warning (disable : 4245)
 
                FF(av_opt_set_int_list(
-                       filt_vsink, 
-                       "pix_fmts", 
-                       out_pix_fmts.data(), 
+                       filt_vsink,
+                       "pix_fmts",
+                       out_pix_fmts.data(),
                        -1,
                        AV_OPT_SEARCH_CHILDREN));
 
 #pragma warning (pop)
-                       
+
                configure_filtergraph(
-                       *video_graph_, 
+                       *video_graph_,
                        filtergraph_,
                        *filt_vsrc,
                        *filt_vsink);
 
                video_graph_in_  = filt_vsrc;
                video_graph_out_ = filt_vsink;
-               
+
                if (is_logging_quiet_for_thread())
                        CASPAR_LOG(trace)
-                               <<      u16(std::string("\n") 
+                               <<      u16(std::string("\n")
                                        + avfilter_graph_dump(
-                                                       video_graph_.get(), 
+                                                       video_graph_.get(),
                                                        nullptr));
                else
                        CASPAR_LOG(debug)
@@ -167,61 +175,47 @@ struct filter::implementation
                                                        video_graph_.get(),
                                                        nullptr));
        }
-       
+
        void configure_filtergraph(
-               AVFilterGraph& graph, 
-               const std::string& filtergraph, 
-               AVFilterContext& source_ctx, 
+               AVFilterGraph& graph,
+               const std::string& filtergraph,
+               AVFilterContext& source_ctx,
                AVFilterContext& sink_ctx)
        {
-               AVFilterInOut* outputs = nullptr;
-               AVFilterInOut* inputs = nullptr;
-
-               try
+               if (!filtergraph.empty())
                {
-                       if(!filtergraph.empty()) 
-                       {
-                               outputs = avfilter_inout_alloc();
-                               inputs  = avfilter_inout_alloc();
+                       auto outputs = avfilter_inout_alloc();
+                       auto inputs  = avfilter_inout_alloc();
 
-                               CASPAR_VERIFY(outputs && inputs);
+                       CASPAR_VERIFY(outputs && inputs);
 
-                               outputs->name       = av_strdup("in");
-                               outputs->filter_ctx = &source_ctx;
-                               outputs->pad_idx    = 0;
-                               outputs->next       = nullptr;
+                       outputs->name       = av_strdup("in");
+                       outputs->filter_ctx = &source_ctx;
+                       outputs->pad_idx    = 0;
+                       outputs->next       = nullptr;
 
-                               inputs->name        = av_strdup("out");
-                               inputs->filter_ctx  = &sink_ctx;
-                               inputs->pad_idx     = 0;
-                               inputs->next        = nullptr;
+                       inputs->name        = av_strdup("out");
+                       inputs->filter_ctx  = &sink_ctx;
+                       inputs->pad_idx     = 0;
+                       inputs->next        = nullptr;
 
-                               FF(avfilter_graph_parse(
-                                       &graph, 
-                                       filtergraph.c_str(), 
+                       FF(avfilter_graph_parse(
+                                       &graph,
+                                       filtergraph.c_str(),
                                        inputs,
                                        outputs,
                                        nullptr));
-                       } 
-                       else 
-                       {
-                               FF(avfilter_link(
-                                       &source_ctx, 
-                                       0, 
-                                       &sink_ctx, 
-                                       0));
-                       }
-
-                       FF(avfilter_graph_config(
-                               &graph, 
-                               nullptr));
                }
-               catch(...)
+               else
                {
-                       //avfilter_inout_free(&outputs);
-                       //avfilter_inout_free(&inputs);
-                       throw;
+                       FF(avfilter_link(
+                                       &source_ctx,
+                                       0,
+                                       &sink_ctx,
+                                       0));
                }
+
+               FF(avfilter_graph_config(&graph, nullptr));
        }
 
        bool fast_path() const
@@ -235,7 +229,7 @@ struct filter::implementation
                        fast_path_.push(src_av_frame);
                else
                        FF(av_buffersrc_add_frame(
-                               video_graph_in_, 
+                               video_graph_in_,
                                src_av_frame.get()));
        }
 
@@ -251,20 +245,15 @@ struct filter::implementation
                        return result;
                }
 
-               std::shared_ptr<AVFrame> filt_frame(
-                       av_frame_alloc(), 
-                       [](AVFrame* p)
-                       {
-                               av_frame_free(&p);
-                       });
-               
+               auto filt_frame = create_frame();
+
                const auto ret = av_buffersink_get_frame(
-                       video_graph_out_, 
+                       video_graph_out_,
                        filt_frame.get());
-                               
+
                if(ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
                        return nullptr;
-                                       
+
                FF_RET(ret, "poll");
 
                return filt_frame;
@@ -279,8 +268,9 @@ filter::filter(
                boost::rational<int> in_sample_aspect_ratio,
                AVPixelFormat in_pix_fmt,
                std::vector<AVPixelFormat> out_pix_fmts,
-               const std::string& filtergraph) 
-               : impl_(new implementation(
+               const std::string& filtergraph,
+               bool multithreaded)
+       : impl_(new implementation(
                        in_width,
                        in_height,
                        in_time_base,
@@ -288,18 +278,18 @@ filter::filter(
                        in_sample_aspect_ratio,
                        in_pix_fmt,
                        out_pix_fmts,
-                       filtergraph)){}
+                       filtergraph,
+                       multithreaded)){}
 filter::filter(filter&& other) : impl_(std::move(other.impl_)){}
 filter& filter::operator=(filter&& other){impl_ = std::move(other.impl_); return *this;}
 void filter::push(const std::shared_ptr<AVFrame>& frame){impl_->push(frame);}
 std::shared_ptr<AVFrame> filter::poll(){return impl_->poll();}
 std::wstring filter::filter_str() const{return u16(impl_->filtergraph_);}
 std::vector<spl::shared_ptr<AVFrame>> filter::poll_all()
-{      
+{
        std::vector<spl::shared_ptr<AVFrame>> frames;
        for(auto frame = poll(); frame; frame = poll())
                frames.push_back(spl::make_shared_ptr(frame));
        return frames;
 }
-
 }}
index e8d62314168eb0eb2a814430cd6b68fab24770a2..86cdba98f5c0ee3b535d02fea75fd570983d78db 100644 (file)
@@ -67,7 +67,8 @@ public:
                boost::rational<int> in_sample_aspect_ratio,
                AVPixelFormat in_pix_fmt,
                std::vector<AVPixelFormat> out_pix_fmts,
-               const std::string& filtergraph);
+               const std::string& filtergraph,
+               bool multithreaded = true);
        filter(filter&& other);
        filter& operator=(filter&& other);
 
@@ -79,10 +80,13 @@ public:
                        
        static bool is_double_rate(const std::wstring& filters)
        {
-               if(boost::to_upper_copy(filters).find(L"YADIF=1") != std::string::npos)
+               if (boost::to_upper_copy(filters).find(L"YADIF=1") != std::string::npos)
                        return true;
-       
-               if(boost::to_upper_copy(filters).find(L"YADIF=3") != std::string::npos)
+
+               if (boost::to_upper_copy(filters).find(L"YADIF=3") != std::string::npos)
+                       return true;
+
+               if (boost::to_upper_copy(filters).find(L"SEPARATEFIELDS") != std::string::npos)
                        return true;
 
                return false;
@@ -90,8 +94,12 @@ public:
 
        static bool is_deinterlacing(const std::wstring& filters)
        {
-               if(boost::to_upper_copy(filters).find(L"YADIF") != std::string::npos)
-                       return true;    
+               if (boost::to_upper_copy(filters).find(L"YADIF") != std::string::npos)
+                       return true;
+
+               if (boost::to_upper_copy(filters).find(L"SEPARATEFIELDS") != std::string::npos)
+                       return true;
+
                return false;
        }       
 
index c51cf152d406c503b0807e55e139664eba10a045..1e081a17a09214b589ecd3d2cd4c8dd6d91273bb 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
 *
 * This file is part of CasparCG (www.casparcg.com).
 *
 * Author: Robert Nagy, ronag89@gmail.com
 */
 
-#include "../../StdAfx.h"
+#include "../../stdafx.h"
 
 #include "input.h"
 
 #include "../util/util.h"
+#include "../util/flv.h"
 #include "../../ffmpeg_error.h"
 #include "../../ffmpeg.h"
 
+#include <core/video_format.h>
+
 #include <common/diagnostics/graph.h>
 #include <common/executor.h>
-#include <common/lock.h>
-//#include <common/except.h>
+#include <common/except.h>
 #include <common/os/general_protection_fault.h>
-#include <common/log.h>
-
-#include <core/video_format.h>
 
 #include <tbb/concurrent_queue.h>
 #include <tbb/atomic.h>
 #include <tbb/recursive_mutex.h>
 
+#include <boost/rational.hpp>
+#include <boost/range/algorithm.hpp>
 #include <boost/thread/condition_variable.hpp>
 #include <boost/thread/mutex.hpp>
 #include <boost/thread/thread.hpp>
@@ -48,7 +49,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #define __STDC_CONSTANT_MACROS
        #define __STDC_LIMIT_MACROS
@@ -58,212 +59,284 @@ extern "C"
 #pragma warning (pop)
 #endif
 
+static const size_t MAX_BUFFER_COUNT    = 100;
+static const size_t MAX_BUFFER_COUNT_RT = 3;
+static const size_t MIN_BUFFER_COUNT    = 50;
+static const size_t MAX_BUFFER_SIZE     = 64 * 1000000;
+
 namespace caspar { namespace ffmpeg {
+struct input::implementation : boost::noncopyable
+{
+       const spl::shared_ptr<diagnostics::graph>                                       graph_;
 
-static const int MAX_PUSH_WITHOUT_POP = 200;
-static const int MIN_FRAMES = 25;
+       const spl::shared_ptr<AVFormatContext>                                          format_context_; // Destroy this last
+       const int                                                                                                       default_stream_index_   = av_find_default_stream_index(format_context_.get());
 
-class stream
-{
-       stream(const stream&);
-       stream& operator=(const stream&);
+       const std::wstring                                                                                      filename_;
+       tbb::atomic<uint32_t>                                                                           start_;
+       tbb::atomic<uint32_t>                                                                           length_;
+       const bool                                                                                                      thumbnail_mode_;
+       tbb::atomic<bool>                                                                                       loop_;
+       uint32_t                                                                                                        frame_number_                   = 0;
+       boost::rational<int>                                                                            framerate_                              = read_framerate(*format_context_, 1);
 
-       typedef tbb::concurrent_bounded_queue<std::shared_ptr<AVPacket>>::size_type size_type;
+       tbb::concurrent_bounded_queue<std::shared_ptr<AVPacket>>        buffer_;
+       tbb::atomic<size_t>                                                                                     buffer_size_;
 
-       int                                                                                                                     index_;
-       tbb::concurrent_bounded_queue<std::shared_ptr<AVPacket>>        packets_;
-       tbb::atomic<int>                                                                                        push_since_pop_;
-public:
+       executor                                                                                                        executor_;
 
-       stream(int index) 
-               : index_(index)
+       explicit implementation(const spl::shared_ptr<diagnostics::graph> graph, const std::wstring& filename, FFMPEG_Resource resource_type, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params)
+               : graph_(graph)
+               , format_context_(open_input(filename, resource_type, vid_params))
+               , filename_(filename)
+               , thumbnail_mode_(thumbnail_mode)
+               , executor_(print())
        {
-               push_since_pop_ = 0;
-       }
+               if (thumbnail_mode_)
+                       executor_.invoke([]
+                       {
+                               enable_quiet_logging_for_thread();
+                       });
 
-       stream(stream&&) = default;
+               start_                  = start;
+               length_                 = length;
+               loop_                   = loop;
+               buffer_size_    = 0;
 
-       bool is_available() const
-       {
-               return index_ >= 0;
-       }
+               if(start_ > 0)
+                       queued_seek(start_);
 
-       int index() const
-       {
-               return index_;
+               graph_->set_color("seek", diagnostics::color(1.0f, 0.5f, 0.0f));
+               graph_->set_color("buffer-count", diagnostics::color(0.7f, 0.4f, 0.4f));
+               graph_->set_color("buffer-size", diagnostics::color(1.0f, 1.0f, 0.0f));
+
+               tick();
        }
-       
-       void push(const std::shared_ptr<AVPacket>& packet)
+
+       bool try_pop(std::shared_ptr<AVPacket>& packet)
        {
-               if(packet && packet->data && packet->stream_index != index_)
-                       return;
+               auto result = buffer_.try_pop(packet);
 
-               if (++push_since_pop_ > MAX_PUSH_WITHOUT_POP) // Out of memory protection for streams never being used.
+               if(result)
                {
-                       return;
+                       if(packet)
+                               buffer_size_ -= packet->size;
+                       tick();
                }
 
-               packets_.push(packet);
-       }
+               graph_->set_value("buffer-size", (static_cast<double>(buffer_size_)+0.001)/MAX_BUFFER_SIZE);
+               graph_->set_value("buffer-count", (static_cast<double>(buffer_.size()+0.001)/MAX_BUFFER_COUNT));
 
-       bool try_pop(std::shared_ptr<AVPacket>& packet)
-       {
-               push_since_pop_ = 0;
-
-               return packets_.try_pop(packet);
+               return result;
        }
 
-       void clear()
+       std::ptrdiff_t get_max_buffer_count() const
        {
-               std::shared_ptr<AVPacket> packet;
-               push_since_pop_ = 0;
-               while(packets_.try_pop(packet));
+               return thumbnail_mode_ ? 1 : MAX_BUFFER_COUNT;
        }
-               
-       size_type size() const
+
+       std::ptrdiff_t get_min_buffer_count() const
        {
-               return is_available() ? packets_.size() : std::numeric_limits<size_type>::max();
+               return thumbnail_mode_ ? 0 : MIN_BUFFER_COUNT;
        }
-};
-               
-struct input::impl : boost::noncopyable
-{              
-       const spl::shared_ptr<diagnostics::graph>       graph_;
-
-       const std::wstring                                                      filename_;
-       const spl::shared_ptr<AVFormatContext>          format_context_                 = open_input(filename_); // Destroy this last
-       const int                                                                       default_stream_index_   = av_find_default_stream_index(format_context_.get());
-
-       tbb::atomic<uint32_t>                                           start_;         
-       tbb::atomic<uint32_t>                                           length_;
-       tbb::atomic<bool>                                                       loop_;
-       tbb::atomic<bool>                                                       eof_;
-       double                                                                          fps_                                    = read_fps(*format_context_, 0.0);
-       uint32_t                                                                        frame_number_                   = 0;
-
-       stream                                                                          video_stream_                   {                                                       av_find_best_stream(format_context_.get(), AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0) };
-       std::vector<stream>                                                     audio_streams_;
-
-       boost::optional<uint32_t>                                       seek_target_;
-
-       tbb::atomic<bool>                                                       is_running_;
-       boost::mutex                                                            mutex_;
-       boost::condition_variable                                       cond_;
-       boost::thread                                                           thread_;
-       
-       impl(
-                       const spl::shared_ptr<diagnostics::graph> graph,
-                       const std::wstring& filename,
-                       const bool loop,
-                       const uint32_t start,
-                       const uint32_t length,
-                       bool thumbnail_mode)
-               : graph_(graph)
-               , filename_(filename)
-       {
-               start_                  = start;
-               length_                 = length;
-               loop_                   = loop;
-               eof_                    = false;
-               is_running_             = true;
 
-               if(start_ != 0)
-                       seek_target_ = start_;
-                                                                                                               
-               graph_->set_color("seek", diagnostics::color(1.0f, 0.5f, 0.0f));
+       std::future<bool> seek(uint32_t target)
+       {
+               if (!executor_.is_running())
+                       return make_ready_future(false);
 
-               if (!thumbnail_mode)
-                       for (unsigned i = 0; i < format_context_->nb_streams; ++i)
-                               if (format_context_->streams[i]->codec->codec_type == AVMediaType::AVMEDIA_TYPE_AUDIO)
-                                       audio_streams_.emplace_back(i);
+               return executor_.begin_invoke([=]() -> bool
+               {
+                       std::shared_ptr<AVPacket> packet;
+                       while(buffer_.try_pop(packet) && packet)
+                               buffer_size_ -= packet->size;
 
-               for (int i = 0; i < audio_streams_.size(); ++i)
-                       graph_->set_color("audio-buffer" + boost::lexical_cast<std::string>(i + 1), diagnostics::color(0.7f, 0.4f, 0.4f));
+                       queued_seek(target);
 
-               if (video_stream_.is_available())
-                       graph_->set_color("video-buffer", diagnostics::color(1.0f, 1.0f, 0.0f));
-               
-               for(int n = 0; n < 8; ++n)
                        tick();
 
-               thread_ = boost::thread([this, thumbnail_mode]{run(thumbnail_mode);});
+                       return true;
+               }, task_priority::high_priority);
        }
 
-       ~impl()
+       std::wstring print() const
        {
-               is_running_ = false;
-               cond_.notify_one();
-               thread_.join();
+               return L"ffmpeg_input[" + filename_ + L")]";
        }
-       
-       bool try_pop_video(std::shared_ptr<AVPacket>& packet)
-       {
-               if (!video_stream_.is_available())
-                       return false;
-
-               bool result = video_stream_.try_pop(packet);
 
-               if(result)
-                       cond_.notify_one();
-               
-               graph_->set_value("video-buffer", std::min(1.0, static_cast<double>(video_stream_.size())/MIN_FRAMES));
-                               
-               return result;
+       bool full() const
+       {
+               return (buffer_size_ > MAX_BUFFER_SIZE || buffer_.size() > get_max_buffer_count()) && buffer_.size() > get_min_buffer_count();
        }
-       
-       bool try_pop_audio(std::shared_ptr<AVPacket>& packet, int audio_stream_index)
+
+       void tick()
        {
-               if (audio_streams_.size() < audio_stream_index + 1)
-                       return false;
+               if(!executor_.is_running())
+                       return;
 
-               auto& audio_stream = audio_streams_.at(audio_stream_index);
-               bool result = audio_stream.try_pop(packet);
-               if(result)
-                       cond_.notify_one();
+               executor_.begin_invoke([this]
+               {
+                       if(full())
+                               return;
 
-               auto buffer_nr = boost::lexical_cast<std::string>(audio_stream_index + 1);
-               graph_->set_value("audio-buffer" + buffer_nr, std::min(1.0, static_cast<double>(audio_stream.size())/MIN_FRAMES));
+                       try
+                       {
+                               auto packet = create_packet();
 
-               return result;
-       }
+                               auto ret = av_read_frame(format_context_.get(), packet.get()); // packet is only valid until next call of av_read_frame. Use av_dup_packet to extend its life.
 
-       void seek(uint32_t target)
-       {
-               {
-                       boost::lock_guard<boost::mutex> lock(mutex_);
+                               if(is_eof(ret))
+                               {
+                                       frame_number_   = 0;
+
+                                       if(loop_)
+                                       {
+                                               queued_seek(start_);
+                                               graph_->set_tag(diagnostics::tag_severity::INFO, "seek");
+                                               CASPAR_LOG(trace) << print() << " Looping.";
+                                       }
+                                       else
+                                               executor_.stop();
+                               }
+                               else
+                               {
+                                       THROW_ON_ERROR(ret, "av_read_frame", print());
 
-                       seek_target_ = target;
-                       video_stream_.clear();
+                                       if(packet->stream_index == default_stream_index_)
+                                               ++frame_number_;
 
-                       for (auto& audio_stream : audio_streams_)
-                               audio_stream.clear();
-               }
+                                       THROW_ON_ERROR2(av_dup_packet(packet.get()), print());
+
+                                       // Make sure that the packet is correctly deallocated even if size and data is modified during decoding.
+                                       auto size = packet->size;
+                                       auto data = packet->data;
 
-               cond_.notify_one();
+                                       packet = spl::shared_ptr<AVPacket>(packet.get(), [packet, size, data](AVPacket*)
+                                       {
+                                               packet->size = size;
+                                               packet->data = data;
+                                       });
+
+                                       buffer_.try_push(packet);
+                                       buffer_size_ += packet->size;
+
+                                       graph_->set_value("buffer-size", (static_cast<double>(buffer_size_)+0.001)/MAX_BUFFER_SIZE);
+                                       graph_->set_value("buffer-count", (static_cast<double>(buffer_.size()+0.001)/MAX_BUFFER_COUNT));
+                               }
+
+                               tick();
+                       }
+                       catch(...)
+                       {
+                               if (!thumbnail_mode_)
+                                       CASPAR_LOG_CURRENT_EXCEPTION();
+                               executor_.stop();
+                       }
+               });
        }
 
-       int get_actual_audio_stream_index(int audio_stream_index) const
+       spl::shared_ptr<AVFormatContext> open_input(const std::wstring resource_name, FFMPEG_Resource resource_type, const ffmpeg_options& vid_params)
        {
-               if (audio_stream_index + 1 > audio_streams_.size())
-                       CASPAR_THROW_EXCEPTION(averror_stream_not_found());
+               AVFormatContext* weak_context = nullptr;
 
-               return audio_streams_.at(audio_stream_index).index();
+               switch (resource_type) {
+               case FFMPEG_Resource::FFMPEG_FILE:
+                       THROW_ON_ERROR2(avformat_open_input(&weak_context, u8(resource_name).c_str(), nullptr, nullptr), resource_name);
+                       break;
+               case FFMPEG_Resource::FFMPEG_DEVICE:
+                       {
+                               AVDictionary* format_options = NULL;
+                               for (auto& option  : vid_params)
+                               {
+                                       av_dict_set(&format_options, option.first.c_str(), option.second.c_str(), 0);
+                               }
+                               AVInputFormat* input_format = av_find_input_format("dshow");
+                               THROW_ON_ERROR2(avformat_open_input(&weak_context, u8(resource_name).c_str(), input_format, &format_options), resource_name);
+                               if (format_options != nullptr)
+                               {
+                                       std::string unsupported_tokens = "";
+                                       AVDictionaryEntry *t = NULL;
+                                       while ((t = av_dict_get(format_options, "", t, AV_DICT_IGNORE_SUFFIX)) != nullptr)
+                                       {
+                                               if (!unsupported_tokens.empty())
+                                                       unsupported_tokens += ", ";
+                                               unsupported_tokens += t->key;
+                                       }
+                                       avformat_close_input(&weak_context);
+                                       BOOST_THROW_EXCEPTION(ffmpeg_error() << msg_info(unsupported_tokens));
+                               }
+                               av_dict_free(&format_options);
+                       }
+                       break;
+               case FFMPEG_Resource::FFMPEG_STREAM:
+                       {
+                               AVDictionary* format_options = NULL;
+                               for (auto& option : vid_params)
+                               {
+                                       av_dict_set(&format_options, option.first.c_str(), option.second.c_str(), 0);
+                               }
+                               THROW_ON_ERROR2(avformat_open_input(&weak_context, u8(resource_name).c_str(), nullptr, &format_options), resource_name);
+                               if (format_options != nullptr)
+                               {
+                                       std::string unsupported_tokens = "";
+                                       AVDictionaryEntry *t = NULL;
+                                       while ((t = av_dict_get(format_options, "", t, AV_DICT_IGNORE_SUFFIX)) != nullptr)
+                                       {
+                                               if (!unsupported_tokens.empty())
+                                                       unsupported_tokens += ", ";
+                                               unsupported_tokens += t->key;
+                                       }
+                                       avformat_close_input(&weak_context);
+                                       BOOST_THROW_EXCEPTION(ffmpeg_error() << msg_info(unsupported_tokens));
+                               }
+                               av_dict_free(&format_options);
+                       }
+                       break;
+               };
+               spl::shared_ptr<AVFormatContext> context(weak_context, [](AVFormatContext* p)
+               {
+                       avformat_close_input(&p);
+               });
+               THROW_ON_ERROR2(avformat_find_stream_info(weak_context, nullptr), resource_name);
+               fix_meta_data(*context);
+               return context;
        }
-               
-       std::wstring print() const
+
+       void fix_meta_data(AVFormatContext& context)
        {
-               return L"ffmpeg_input[" + filename_ + L")]";
+               auto video_index = av_find_best_stream(&context, AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0);
+
+               if (video_index > -1)
+               {
+                       auto video_stream = context.streams[video_index];
+                       auto video_context = context.streams[video_index]->codec;
+
+                       if (boost::filesystem::path(context.filename).extension().string() == ".flv")
+                       {
+                               try
+                               {
+                                       auto meta = read_flv_meta_info(context.filename);
+                                       double fps = boost::lexical_cast<double>(meta["framerate"]);
+                                       video_stream->nb_frames = static_cast<int64_t>(boost::lexical_cast<double>(meta["duration"])*fps);
+                               }
+                               catch (...) {}
+                       }
+                       else
+                       {
+                               auto stream_time = video_stream->time_base;
+                               auto duration = video_stream->duration;
+                               auto codec_time = video_context->time_base;
+                               auto ticks = video_context->ticks_per_frame;
+
+                               if (video_stream->nb_frames == 0)
+                                       video_stream->nb_frames = (duration*stream_time.num*codec_time.den) / (stream_time.den*codec_time.num*ticks);
+                       }
+               }
        }
 
-private:
-       void internal_seek(uint32_t target)
+       void queued_seek(const uint32_t target)
        {
-               eof_ = false;
-               graph_->set_tag(diagnostics::tag_severity::INFO, "seek");
-
-               if (is_logging_quiet_for_thread())
-                       CASPAR_LOG(trace) << print() << " Seeking: " << target;
-               else
+               if (!thumbnail_mode_)
                        CASPAR_LOG(debug) << print() << " Seeking: " << target;
 
                int flags = AVSEEK_FLAG_FRAME;
@@ -278,152 +351,61 @@ private:
                                        flags = AVSEEK_FLAG_BYTE;
                        }
                }
-               
-               auto stream                             = format_context_->streams[default_stream_index_];
-               auto fps                                = read_fps(*format_context_, 0.0);
-               auto target_timestamp   = static_cast<int64_t>((target / fps * stream->time_base.den) / stream->time_base.num);
-               
+
+               auto stream = format_context_->streams[default_stream_index_];
+
+
+               auto fps = read_fps(*format_context_, 0.0);
+
                THROW_ON_ERROR2(avformat_seek_file(
-                               format_context_.get(),
-                               default_stream_index_,
-                               std::numeric_limits<int64_t>::min(),
-                               target_timestamp,
-                               std::numeric_limits<int64_t>::max(),
-                               0), print());
+                       format_context_.get(),
+                       default_stream_index_,
+                       std::numeric_limits<int64_t>::min(),
+                       static_cast<int64_t>((target / fps * stream->time_base.den) / stream->time_base.num),
+                       std::numeric_limits<int64_t>::max(),
+                       0), print());
 
                auto flush_packet       = create_packet();
                flush_packet->data      = nullptr;
                flush_packet->size      = 0;
                flush_packet->pos       = target;
-               
-               video_stream_.push(flush_packet);
 
-               for (auto& audio_stream : audio_streams_)
-                       audio_stream.push(flush_packet);
+               buffer_.push(flush_packet);
        }
 
-       void tick()
-       {
-               if(seek_target_)                                
-               {
-                       internal_seek(*seek_target_);
-                       seek_target_.reset();
-               }
-
-               auto packet = create_packet();
-               
-               auto ret = av_read_frame(format_context_.get(), packet.get()); // packet is only valid until next call of av_read_frame. Use av_dup_packet to extend its life.  
-               
-               if(is_eof(ret))                                                                                                              
-               {
-                       if (loop_)
-                               internal_seek(start_);
-                       else
-                       {
-                               eof_ = true;
-                       }
-               }
-               else
-               {               
-                       THROW_ON_ERROR(ret, "av_read_frame", print());
-                                       
-                       THROW_ON_ERROR2(av_dup_packet(packet.get()), print());
-                               
-                       // Make sure that the packet is correctly deallocated even if size and data is modified during decoding.
-                       const auto size = packet->size;
-                       const auto data = packet->data;
-                       
-                       packet = spl::shared_ptr<AVPacket>(packet.get(), [packet, size, data](AVPacket*)
-                       {
-                               packet->size = size;
-                               packet->data = data;                            
-                       });
-                                       
-                       const auto stream_time_base = format_context_->streams[packet->stream_index]->time_base;
-                       const auto packet_frame_number = static_cast<uint32_t>((static_cast<double>(packet->pts * stream_time_base.num)/stream_time_base.den)*fps_);
-
-                       if(packet->stream_index == default_stream_index_)
-                               frame_number_ = packet_frame_number;
-                                       
-                       if(packet_frame_number >= start_ && packet_frame_number < length_)
-                       {
-                               video_stream_.push(packet);
-
-                               for (auto& audio_stream : audio_streams_)
-                                       audio_stream.push(packet);
-                       }
-               }       
-
-               if (video_stream_.is_available())
-                       graph_->set_value("video-buffer", std::min(1.0, static_cast<double>(video_stream_.size())/MIN_FRAMES));
-
-               for (int i = 0; i < audio_streams_.size(); ++i)
-                       graph_->set_value(
-                                       "audio-buffer" + boost::lexical_cast<std::string>(i + 1),
-                                       std::min(1.0, static_cast<double>(audio_streams_[i].size())/MIN_FRAMES));
-       }
-                       
-       bool full() const
+       bool is_eof(int ret)
        {
-               bool video_full = video_stream_.size() >= MIN_FRAMES;
+               if(ret == AVERROR(EIO))
+                       CASPAR_LOG(trace) << print() << " Received EIO, assuming EOF. ";
+               if(ret == AVERROR_EOF)
+                       CASPAR_LOG(trace) << print() << " Received EOF. ";
 
-               if (!video_full)
-                       return false;
-
-               for (auto& audio_stream : audio_streams_)
-                       if (audio_stream.size() < MIN_FRAMES)
-                               return false;
-
-               return true;
+               return ret == AVERROR_EOF || ret == AVERROR(EIO) || frame_number_ >= length_; // av_read_frame doesn't always correctly return AVERROR_EOF;
        }
 
-       void run(bool thumbnail_mode)
+       int num_audio_streams() const
        {
-               ensure_gpf_handler_installed_for_thread(u8(print()).c_str());
-               auto quiet_logging = temporary_enable_quiet_logging_for_thread(thumbnail_mode);
-
-               while(is_running_)
-               {
-                       try
-                       {
-                               
-                               {
-                                       boost::unique_lock<boost::mutex> lock(mutex_);
-
-                                       while((eof_ || full()) && !seek_target_ && is_running_)
-                                               cond_.wait(lock);
-                                       
-                                       tick();
-                               }
-                       }
-                       catch(...)
-                       {
-                               CASPAR_LOG_CURRENT_EXCEPTION();
-                               is_running_ = false;
-                       }
-               }
+               return 0; // TODO
        }
-                       
-       bool is_eof(int ret)
+
+       boost::rational<int> framerate() const
        {
-               #pragma warning (disable : 4146)
-               return ret == AVERROR_EOF || ret == AVERROR(EIO) || frame_number_ >= length_; // av_read_frame doesn't always correctly return AVERROR_EOF;
+               return framerate_;
        }
 };
 
-input::input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode)
-       : impl_(new impl(graph, filename, loop, start, length, thumbnail_mode)){}
-int input::get_actual_audio_stream_index(int audio_stream_index) const { return impl_->get_actual_audio_stream_index(audio_stream_index); };
-int input::num_audio_streams() const { return static_cast<int>(impl_->audio_streams_.size()); }
-bool input::try_pop_video(std::shared_ptr<AVPacket>& packet){return impl_->try_pop_video(packet);}
-bool input::try_pop_audio(std::shared_ptr<AVPacket>& packet, int audio_stream_index){return impl_->try_pop_audio(packet, audio_stream_index);}
-AVFormatContext& input::context(){return *impl_->format_context_;}
-void input::loop(bool value){impl_->loop_ = value;}
-bool input::loop() const{return impl_->loop_;}
-void input::seek(uint32_t target){impl_->seek(target);}
+input::input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& filename, FFMPEG_Resource resource_type, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params)
+       : impl_(new implementation(graph, filename, resource_type, loop, start, length, thumbnail_mode, vid_params)){}
+bool input::eof() const {return !impl_->executor_.is_running();}
+bool input::try_pop(std::shared_ptr<AVPacket>& packet){return impl_->try_pop(packet);}
+spl::shared_ptr<AVFormatContext> input::context(){return impl_->format_context_;}
 void input::start(uint32_t value){impl_->start_ = value;}
 uint32_t input::start() const{return impl_->start_;}
 void input::length(uint32_t value){impl_->length_ = value;}
 uint32_t input::length() const{return impl_->length_;}
-bool input::eof() const { return impl_->eof_; }
+void input::loop(bool value){impl_->loop_ = value;}
+bool input::loop() const{return impl_->loop_;}
+int input::num_audio_streams() const { return impl_->num_audio_streams(); }
+boost::rational<int> input::framerate() const { return impl_->framerate(); }
+std::future<bool> input::seek(uint32_t target){return impl_->seek(target);}
 }}
index 5f843b9d11770c8973a151cf7a695c6baa52dd2b..28dd7f480322f7fca42382113abb7d7d5827f896 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
 *
 * This file is part of CasparCG (www.casparcg.com).
 *
 
 #pragma once
 
+#include "../util/util.h"
+
 #include <common/memory.h>
 
 #include <memory>
 #include <string>
 #include <cstdint>
+#include <future>
 
 #include <boost/noncopyable.hpp>
+#include <boost/rational.hpp>
 
 struct AVFormatContext;
 struct AVPacket;
@@ -45,36 +49,27 @@ namespace ffmpeg {
 class input : boost::noncopyable
 {
 public:
-       explicit input(
-                       const spl::shared_ptr<diagnostics::graph>& graph,
-                       const std::wstring& filename,
-                       bool loop, uint32_t start,
-                       uint32_t length,
-                       bool thumbnail_mode);
-
-       int                     num_audio_streams() const;
-       int                     get_actual_audio_stream_index(int audio_stream_index) const;
-
-       bool            try_pop_video(std::shared_ptr<AVPacket>& packet);
-       bool            try_pop_audio(std::shared_ptr<AVPacket>& packet, int audio_stream_index);
-
-       void            loop(bool value);
-       bool            loop() const;
+       explicit input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& filename, FFMPEG_Resource resource_type, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params);
 
-       void            start(uint32_t value);
-       uint32_t        start() const;
+       bool                                                            try_pop(std::shared_ptr<AVPacket>& packet);
+       bool                                                            eof() const;
 
-       void            length(uint32_t value);
-       uint32_t        length() const;
+       void                                                            start(uint32_t value);
+       uint32_t                                                        start() const;
+       void                                                            length(uint32_t value);
+       uint32_t                                                        length() const;
+       void                                                            loop(bool value);
+       bool                                                            loop() const;
 
-       bool            eof() const;
+       int                                                                     num_audio_streams() const;
+       boost::rational<int>                            framerate() const;
 
-       void            seek(uint32_t target);
+       std::future<bool>                                       seek(uint32_t target);
 
-       AVFormatContext& context();
+       spl::shared_ptr<AVFormatContext>        context();
 private:
-       struct impl;
-       std::shared_ptr<impl> impl_;
+       struct implementation;
+       std::shared_ptr<implementation> impl_;
 };
 
        
index 3a469d8e122da656b08851eb5a2f2de2f0d972f5..4e3882a4bcb72aafd2e3745f821366ba8ba6e686 100644 (file)
@@ -30,13 +30,7 @@ namespace caspar { namespace ffmpeg {
 enum class display_mode
 {
        simple,
-       duplicate,
-       half,
-       interlace,
        deinterlace_bob,
-       deinterlace_bob_reinterlace,
-       deinterlace,
-       count,
        invalid
 };
 
@@ -46,65 +40,14 @@ std::basic_ostream< CharT, TraitsT >& operator<< (std::basic_ostream<CharT, Trai
        switch(value)
        {
        case display_mode::simple:                                              return o << L"simple";
-       case display_mode::duplicate:                                   return o << L"duplicate";
-       case display_mode::half:                                                return o << L"half";
-       case display_mode::interlace:                                   return o << L"interlace";
        case display_mode::deinterlace_bob:                             return o << L"deinterlace_bob";
-       case display_mode::deinterlace_bob_reinterlace: return o << L"deinterlace_bob_reinterlace";
-       case display_mode::deinterlace:                                 return o << L"deinterlace";
        default:                                                                                return o << L"invalid";
        }
 }
 
-static display_mode get_display_mode(const core::field_mode in_mode, double in_fps, const core::field_mode out_mode, double out_fps)
-{              
-       static const auto epsilon = 2.0;
-
-       if(in_fps < 20.0 || in_fps > 80.0)
-       {
-               //if(out_mode != core::field_mode::progressive && in_mode == core::field_mode::progressive)
-               //      return display_mode::interlace;
-               
-               if(out_mode == core::field_mode::progressive && in_mode != core::field_mode::progressive)
-               {
-                       if(in_fps < 35.0)
-                               return display_mode::deinterlace;
-                       else
-                               return display_mode::deinterlace_bob;
-               }
-       }
-
-       if(std::abs(in_fps - out_fps) < epsilon)
-       {
-               if(in_mode != core::field_mode::progressive && out_mode == core::field_mode::progressive)
-                       return display_mode::deinterlace;
-               //else if(in_mode == core::field_mode::progressive && out_mode != core::field_mode::progressive)
-               //      simple(); // interlace_duplicate();
-               else
-                       return display_mode::simple;
-       }
-       else if(std::abs(in_fps/2.0 - out_fps) < epsilon)
-       {
-               if(in_mode != core::field_mode::progressive)
-                       return display_mode::invalid;
-
-               if(out_mode != core::field_mode::progressive)
-                       return display_mode::interlace;
-               else
-                       return display_mode::half;
-       }
-       else if(std::abs(in_fps - out_fps/2.0) < epsilon)
-       {
-               if(out_mode != core::field_mode::progressive)
-                       return display_mode::invalid;
-
-               if(in_mode != core::field_mode::progressive)
-                       return display_mode::deinterlace_bob;
-               else
-                       return display_mode::duplicate;
-       }
-
-       return display_mode::invalid;
+static display_mode get_display_mode(const core::field_mode in_mode)
+{
+       return in_mode == core::field_mode::progressive ? display_mode::simple : display_mode::deinterlace_bob;
 }
 
 }}
\ No newline at end of file
index 055ec9d0fd6572fa87a3112b4b3667d31ea412ae..bf4850ddc8bbfdf98da2b1b228ba0375ba7e75a7 100644 (file)
@@ -43,7 +43,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #define __STDC_CONSTANT_MACROS
        #define __STDC_LIMIT_MACROS
@@ -57,6 +57,8 @@ extern "C"
 #include <common/assert.h>
 #include <boost/range/algorithm_ext/push_back.hpp>
 #include <boost/algorithm/string/predicate.hpp>
+#include <boost/thread/mutex.hpp>
+#include <boost/optional.hpp>
 
 #include <deque>
 #include <queue>
@@ -65,63 +67,86 @@ extern "C"
 using namespace caspar::core;
 
 namespace caspar { namespace ffmpeg {
-
-bool is_frame_format_changed(const AVFrame& lhs, const AVFrame& rhs)
+struct av_frame_format
 {
-       if (lhs.format != rhs.format)
-               return true;
+       int                                                                             pix_format;
+       std::array<int, AV_NUM_DATA_POINTERS>   line_sizes;
+       int                                                                             width;
+       int                                                                             height;
+
+       av_frame_format(const AVFrame& frame)
+               : pix_format(frame.format)
+               , width(frame.width)
+               , height(frame.height)
+       {
+               boost::copy(frame.linesize, line_sizes.begin());
+       }
 
-       for (int i = 0; i < AV_NUM_DATA_POINTERS; ++i)
+       bool operator==(const av_frame_format& other) const
        {
-               if (lhs.linesize[i] != rhs.linesize[i])
-                       return true;
+               return pix_format == other.pix_format
+                       && line_sizes == other.line_sizes
+                       && width == other.width
+                       && height == other.height;
        }
 
-       return false;
-}
-       
+       bool operator!=(const av_frame_format& other) const
+       {
+               return !(*this == other);
+       }
+};
+
 struct frame_muxer::impl : boost::noncopyable
-{      
-       std::queue<core::mutable_frame>                                 video_stream_;
-       core::mutable_audio_buffer                                              audio_stream_;
-       std::queue<draw_frame>                                                  frame_buffer_;
+{
+       std::queue<std::queue<core::mutable_frame>>             video_streams_;
+       std::queue<core::mutable_audio_buffer>                  audio_streams_;
+       std::queue<core::draw_frame>                                    frame_buffer_;
        display_mode                                                                    display_mode_                   = display_mode::invalid;
-       const double                                                                    in_fps_;
+       const boost::rational<int>                                              in_framerate_;
        const video_format_desc                                                 format_desc_;
-       audio_channel_layout                                                    channel_layout_;
-       
+       const audio_channel_layout                                              audio_channel_layout_;
+
        std::vector<int>                                                                audio_cadence_                  = format_desc_.audio_cadence;
-                       
+
        spl::shared_ptr<core::frame_factory>                    frame_factory_;
-       std::shared_ptr<AVFrame>                                                previous_frame_;
+       boost::optional<av_frame_format>                                previously_filtered_frame_;
 
        std::unique_ptr<filter>                                                 filter_;
        const std::wstring                                                              filter_str_;
-       bool                                                                                    force_deinterlacing_    = env::properties().get(L"configuration.force-deinterlace", true);
-               
+       const bool                                                                              multithreaded_filter_;
+       bool                                                                                    force_deinterlacing_    = env::properties().get(L"configuration.force-deinterlace", false);
+
+       mutable boost::mutex                                                    out_framerate_mutex_;
+       boost::rational<int>                                                    out_framerate_;
+
        impl(
-                       double in_fps,
+                       boost::rational<int> in_framerate,
                        const spl::shared_ptr<core::frame_factory>& frame_factory,
                        const core::video_format_desc& format_desc,
                        const core::audio_channel_layout& channel_layout,
-                       const std::wstring& filter_str)
-               : in_fps_(in_fps)
+                       const std::wstring& filter_str,
+                       bool multithreaded_filter)
+               : in_framerate_(in_framerate)
                , format_desc_(format_desc)
-               , channel_layout_(channel_layout)
+               , audio_channel_layout_(channel_layout)
                , frame_factory_(frame_factory)
                , filter_str_(filter_str)
-       {               
-               // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
-               // This cadence fills the audio mixer most optimally.
-               boost::range::rotate(audio_cadence_, std::end(audio_cadence_)-1);
+               , multithreaded_filter_(multithreaded_filter)
+       {
+               video_streams_.push(std::queue<core::mutable_frame>());
+               audio_streams_.push(core::mutable_audio_buffer());
+
+               set_out_framerate(in_framerate_);
        }
-       
-       void push_video(const std::shared_ptr<AVFrame>& video)
-       {               
-               if(!video)
+
+       void push(const std::shared_ptr<AVFrame>& video_frame)
+       {
+               if (!video_frame)
                        return;
 
-               if (previous_frame_ && video->data[0] && is_frame_format_changed(*previous_frame_, *video))
+               av_frame_format current_frame_format(*video_frame);
+
+               if (previously_filtered_frame_ && video_frame->data[0] && *previously_filtered_frame_ != current_frame_format)
                {
                        // Fixes bug where avfilter crashes server on some DV files (starts in YUV420p but changes to YUV411p after the first frame).
                        if (ffmpeg::is_logging_quiet_for_thread())
@@ -130,290 +155,262 @@ struct frame_muxer::impl : boost::noncopyable
                                CASPAR_LOG(info) << L"[frame_muxer] Frame format has changed. Resetting display mode.";
 
                        display_mode_ = display_mode::invalid;
+                       filter_.reset();
+                       previously_filtered_frame_ = boost::none;
                }
 
-               if(!video->data[0])
+               if (video_frame == flush_video())
                {
-                       auto empty_frame = frame_factory_->create_frame(this, core::pixel_format_desc(core::pixel_format::invalid), channel_layout_);
-                       video_stream_.push(std::move(empty_frame));
+                       video_streams_.push(std::queue<core::mutable_frame>());
+               }
+               else if (video_frame == empty_video())
+               {
+                       video_streams_.back().push(frame_factory_->create_frame(this, core::pixel_format::invalid, audio_channel_layout_));
                        display_mode_ = display_mode::simple;
                }
                else
                {
-                       if(!filter_ || display_mode_ == display_mode::invalid)
-                               update_display_mode(video);
-                               
-                       filter_->push(video);
-                       previous_frame_ = video;
-                       for (auto& av_frame : filter_->poll_all())
-                               video_stream_.push(make_frame(this, av_frame, *frame_factory_, channel_layout_));
+                       if (!filter_ || display_mode_ == display_mode::invalid)
+                               update_display_mode(video_frame);
+
+                       if (filter_)
+                       {
+                               filter_->push(video_frame);
+                               previously_filtered_frame_ = current_frame_format;
+
+                               for (auto& av_frame : filter_->poll_all())
+                                       video_streams_.back().push(make_frame(this, av_frame, *frame_factory_, audio_channel_layout_));
+                       }
                }
 
-               merge();
+               if (video_streams_.back().size() > 32)
+                       CASPAR_THROW_EXCEPTION(invalid_operation() << source_info("frame_muxer") << msg_info("video-stream overflow. This can be caused by incorrect frame-rate. Check clip meta-data."));
        }
 
-       void push_audio(const std::shared_ptr<AVFrame>& audio)
+       void push(const std::shared_ptr<core::mutable_audio_buffer>& audio)
        {
-               if(!audio)
+               if (!audio)
                        return;
 
-               if(!audio->data[0])             
+               if (audio == flush_audio())
                {
-                       if (channel_layout_ == core::audio_channel_layout::invalid())
-                               channel_layout_ = *core::audio_channel_layout_repository::get_default()->get_layout(L"stereo");
-
-                       boost::range::push_back(audio_stream_, core::mutable_audio_buffer(audio_cadence_.front() * channel_layout_.num_channels, 0));
+                       audio_streams_.push(core::mutable_audio_buffer());
+               }
+               else if (audio == empty_audio())
+               {
+                       boost::range::push_back(audio_streams_.back(), core::mutable_audio_buffer(audio_cadence_.front() * audio_channel_layout_.num_channels, 0));
                }
                else
                {
-                       auto ptr = reinterpret_cast<int32_t*>(audio->data[0]);
-                       audio_stream_.insert(audio_stream_.end(), ptr, ptr + audio->linesize[0]/sizeof(int32_t));
+                       boost::range::push_back(audio_streams_.back(), *audio);
                }
 
-               merge();
+               if (audio_streams_.back().size() > 32 * audio_cadence_.front() * audio_channel_layout_.num_channels)
+                       BOOST_THROW_EXCEPTION(invalid_operation() << source_info("frame_muxer") << msg_info("audio-stream overflow. This can be caused by incorrect frame-rate. Check clip meta-data."));
        }
-       
+
        bool video_ready() const
        {
-               switch(display_mode_)
-               {
-               case display_mode::deinterlace_bob_reinterlace:                                 
-               case display_mode::interlace:   
-               case display_mode::half:
-                       return video_stream_.size() >= 2;
-               default:                                                                                
-                       return video_stream_.size() >= 1;
-               }
+               return video_streams_.size() > 1 || (video_streams_.size() >= audio_streams_.size() && video_ready2());
        }
-       
+
        bool audio_ready() const
        {
-               switch(display_mode_)
-               {
-               case display_mode::duplicate:                                   
-                       return audio_stream_.size() >= static_cast<size_t>(audio_cadence_[0] + audio_cadence_[1 % audio_cadence_.size()]) * channel_layout_.num_channels;
-               default:                                                                                
-                       return audio_stream_.size() >= static_cast<size_t>(audio_cadence_.front()) * channel_layout_.num_channels;
-               }
+               return audio_streams_.size() > 1 || (audio_streams_.size() >= video_streams_.size() && audio_ready2());
        }
 
-       bool empty() const
+       bool video_ready2() const
        {
-               return frame_buffer_.empty();
+               return video_streams_.front().size() >= 1;
        }
 
-       core::draw_frame front() const
+       bool audio_ready2() const
        {
-               return frame_buffer_.front();
+               return audio_streams_.front().size() >= audio_cadence_.front() * audio_channel_layout_.num_channels;
        }
 
-       void pop()
-       {
-               frame_buffer_.pop();
-       }
-               
-       void merge()
+       core::draw_frame poll()
        {
-               while(video_ready() && audio_ready() && display_mode_ != display_mode::invalid)
-               {                               
-                       auto frame1                     = pop_video();
-                       frame1.audio_data()     = pop_audio();
+               if (!frame_buffer_.empty())
+               {
+                       auto frame = frame_buffer_.front();
+                       frame_buffer_.pop();
+                       return frame;
+               }
 
-                       switch(display_mode_)
-                       {
-                       case display_mode::simple:                                              
-                       case display_mode::deinterlace_bob:                             
-                       case display_mode::deinterlace: 
-                               {
-                                       frame_buffer_.push(core::draw_frame(std::move(frame1)));
-                                       break;
-                               }
-                       case display_mode::interlace:                                   
-                       case display_mode::deinterlace_bob_reinterlace: 
-                               {                               
-                                       auto frame2 = pop_video();
-
-                                       frame_buffer_.push(core::draw_frame::interlace(
-                                               core::draw_frame(std::move(frame1)),
-                                               core::draw_frame(std::move(frame2)),
-                                               format_desc_.field_mode));      
-                                       break;
-                               }
-                       case display_mode::duplicate:   
-                               {
-                                       //boost::range::push_back(frame1.audio_data(), pop_audio());
-
-                                       auto second_audio_frame = core::mutable_frame(
-                                                       std::vector<array<std::uint8_t>>(),
-                                                       pop_audio(),
-                                                       frame1.stream_tag(),
-                                                       core::pixel_format_desc(),
-                                                       channel_layout_);
-                                       auto first_frame = core::draw_frame(std::move(frame1));
-                                       auto muted_first_frame = core::draw_frame(first_frame);
-                                       muted_first_frame.transform().audio_transform.volume = 0;
-                                       auto second_frame = core::draw_frame({ core::draw_frame(std::move(second_audio_frame)), muted_first_frame });
-
-                                       // Same video but different audio.
-                                       frame_buffer_.push(first_frame);
-                                       frame_buffer_.push(second_frame);
-                                       break;
-                               }
-                       case display_mode::half:        
-                               {                               
-                                       pop_video(); // Throw away
-
-                                       frame_buffer_.push(core::draw_frame(std::move(frame1)));
-                                       break;
-                               }
-                       default:
-                               CASPAR_THROW_EXCEPTION(invalid_operation());
-                       }
+               if (video_streams_.size() > 1 && audio_streams_.size() > 1 && (!video_ready2() || !audio_ready2()))
+               {
+                       if (!video_streams_.front().empty() || !audio_streams_.front().empty())
+                               CASPAR_LOG(trace) << "Truncating: " << video_streams_.front().size() << L" video-frames, " << audio_streams_.front().size() << L" audio-samples.";
+
+                       video_streams_.pop();
+                       audio_streams_.pop();
                }
+
+               if (!video_ready2() || !audio_ready2() || display_mode_ == display_mode::invalid)
+                       return core::draw_frame::empty();
+
+               auto frame                      = pop_video();
+               frame.audio_data()      = pop_audio();
+
+               frame_buffer_.push(core::draw_frame(std::move(frame)));
+
+               return frame_buffer_.empty() ? core::draw_frame::empty() : poll();
        }
-       
+
        core::mutable_frame pop_video()
        {
-               auto frame = std::move(video_stream_.front());
-               video_stream_.pop();            
-               return std::move(frame);
+               auto frame = std::move(video_streams_.front().front());
+               video_streams_.front().pop();
+               return frame;
        }
 
        core::mutable_audio_buffer pop_audio()
        {
-               if (audio_stream_.size() < audio_cadence_.front() * channel_layout_.num_channels)
-                       CASPAR_THROW_EXCEPTION(out_of_range());
+               CASPAR_VERIFY(audio_streams_.front().size() >= audio_cadence_.front() * audio_channel_layout_.num_channels);
 
-               auto begin = audio_stream_.begin();
-               auto end   = begin + audio_cadence_.front() * channel_layout_.num_channels;
+               auto begin      = audio_streams_.front().begin();
+               auto end        = begin + (audio_cadence_.front() * audio_channel_layout_.num_channels);
 
                core::mutable_audio_buffer samples(begin, end);
-               audio_stream_.erase(begin, end);
-               
-               boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);
+               audio_streams_.front().erase(begin, end);
+
+               boost::range::rotate(audio_cadence_, std::begin(audio_cadence_) + 1);
 
                return samples;
        }
-                               
+
+       uint32_t calc_nb_frames(uint32_t nb_frames) const
+       {
+               uint64_t nb_frames2 = nb_frames;
+
+               if(filter_ && filter_->is_double_rate()) // Take into account transformations in filter.
+                       nb_frames2 *= 2;
+
+               return static_cast<uint32_t>(nb_frames2);
+       }
+
+       boost::rational<int> out_framerate() const
+       {
+               boost::lock_guard<boost::mutex> lock(out_framerate_mutex_);
+
+               return out_framerate_;
+       }
+private:
        void update_display_mode(const std::shared_ptr<AVFrame>& frame)
        {
-               std::wstring filter_str = filter_str_;
+               std::wstring filter_str = filter_str_;
 
                display_mode_ = display_mode::simple;
 
                auto mode = get_mode(*frame);
-               if(mode == core::field_mode::progressive && frame->height < 720 && in_fps_ < 50.0) // SD frames are interlaced. Probably incorrect meta-data. Fix it.
+               if (mode == core::field_mode::progressive && frame->height < 720 && in_framerate_ < 50) // SD frames are interlaced. Probably incorrect meta-data. Fix it.
                        mode = core::field_mode::upper;
 
-               auto fps  = in_fps_;
-
-               if(filter::is_deinterlacing(filter_str_))
-                       mode = core::field_mode::progressive;
-
-               if(filter::is_double_rate(filter_str_))
-                       fps *= 2;
-                       
-               display_mode_ = get_display_mode(mode, fps, format_desc_.field_mode, format_desc_.fps);
-                       
-               if((frame->height != 480 || format_desc_.height != 486) && // don't deinterlace for NTSC DV
-                               display_mode_ == display_mode::simple && mode != core::field_mode::progressive && format_desc_.field_mode != core::field_mode::progressive && 
-                               frame->height != format_desc_.height)
+               if (filter::is_deinterlacing(filter_str_))
+               {
+                       display_mode_ = display_mode::simple;
+               }
+               else if (mode != core::field_mode::progressive)
                {
-                       display_mode_ = display_mode::deinterlace_bob_reinterlace; // The frame will most likely be scaled, we need to deinterlace->reinterlace 
+                       if (force_deinterlacing_)
+                       {
+                               display_mode_ = display_mode::deinterlace_bob;
+                       }
+                       else
+                       {
+                               bool output_also_interlaced = format_desc_.field_mode != core::field_mode::progressive;
+                               bool interlaced_output_compatible =
+                                               output_also_interlaced
+                                               && (
+                                                               (frame->height == 480 && format_desc_.height == 486) // don't deinterlace for NTSC DV
+                                                               || frame->height == format_desc_.height
+                                               )
+                                               && in_framerate_ == format_desc_.framerate;
+
+                               display_mode_ = interlaced_output_compatible ? display_mode::simple : display_mode::deinterlace_bob;
+                       }
                }
 
-               // ALWAYS de-interlace, until we have GPU de-interlacing.
-               if(force_deinterlacing_ && frame->interlaced_frame && display_mode_ != display_mode::deinterlace_bob && display_mode_ != display_mode::deinterlace)
-                       display_mode_ = display_mode::deinterlace_bob_reinterlace;
-               
-               if(display_mode_ == display_mode::deinterlace)
-                       filter_str = append_filter(filter_str, L"YADIF=0:-1");
-               else if(display_mode_ == display_mode::deinterlace_bob || display_mode_ == display_mode::deinterlace_bob_reinterlace)
+               if (display_mode_ == display_mode::deinterlace_bob)
                        filter_str = append_filter(filter_str, L"YADIF=1:-1");
 
-               if(display_mode_ == display_mode::invalid)
-               {
-                       if (ffmpeg::is_logging_quiet_for_thread())
-                               CASPAR_LOG(debug) << L"[frame_muxer] Auto-transcode: Failed to detect display-mode.";
-                       else
-                               CASPAR_LOG(warning) << L"[frame_muxer] Auto-transcode: Failed to detect display-mode.";
+               auto out_framerate = in_framerate_;
 
-                       display_mode_ = display_mode::simple;
-               }
+               if (filter::is_double_rate(filter_str))
+                       out_framerate *= 2;
 
-               if(frame->height == 480) // NTSC DV
+               if (frame->height == 480) // NTSC DV
                {
                        auto pad_str = L"PAD=" + boost::lexical_cast<std::wstring>(frame->width) + L":486:0:2:black";
                        filter_str = append_filter(filter_str, pad_str);
                }
 
                filter_.reset (new filter(
-                       frame->width,
-                       frame->height,
-                       boost::rational<int>(1000000, static_cast<int>(in_fps_ * 1000000)),
-                       boost::rational<int>(static_cast<int>(in_fps_ * 1000000), 1000000),
-                       boost::rational<int>(frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den),
-                       static_cast<AVPixelFormat>(frame->format),
-                       std::vector<AVPixelFormat>(),
-                       u8(filter_str)));
+                               frame->width,
+                               frame->height,
+                               1 / in_framerate_,
+                               in_framerate_,
+                               boost::rational<int>(frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den),
+                               static_cast<AVPixelFormat>(frame->format),
+                               std::vector<AVPixelFormat>(),
+                               u8(filter_str)));
+
+               set_out_framerate(out_framerate);
+
+               auto in_fps = static_cast<double>(in_framerate_.numerator()) / static_cast<double>(in_framerate_.denominator());
 
                if (ffmpeg::is_logging_quiet_for_thread())
-                       CASPAR_LOG(debug) << L"[frame_muxer] " << display_mode_ << L" " << print_mode(frame->width, frame->height, in_fps_, frame->interlaced_frame > 0);
+                       CASPAR_LOG(debug) << L"[frame_muxer] " << display_mode_ << L" " << print_mode(frame->width, frame->height, in_fps, frame->interlaced_frame > 0);
                else
-                       CASPAR_LOG(info) << L"[frame_muxer] " << display_mode_ << L" " << print_mode(frame->width, frame->height, in_fps_, frame->interlaced_frame > 0);
+                       CASPAR_LOG(info) << L"[frame_muxer] " << display_mode_ << L" " << print_mode(frame->width, frame->height, in_fps, frame->interlaced_frame > 0);
        }
-       
-       uint32_t calc_nb_frames(uint32_t nb_frames) const
-       {
-               uint64_t nb_frames2 = nb_frames;
-               
-               if(filter_ && filter_->is_double_rate()) // Take into account transformations in filter.
-                       nb_frames2 *= 2;
 
-               switch(display_mode_) // Take into account transformation in run.
+       void merge()
+       {
+               while (video_ready() && audio_ready() && display_mode_ != display_mode::invalid)
                {
-               case display_mode::deinterlace_bob_reinterlace:
-               case display_mode::interlace:   
-               case display_mode::half:
-                       nb_frames2 /= 2;
-                       break;
-               case display_mode::duplicate:
-                       nb_frames2 *= 2;
-                       break;
-               }
+                       auto frame1 = pop_video();
+                       frame1.audio_data() = pop_audio();
 
-               return static_cast<uint32_t>(nb_frames2);
+                       frame_buffer_.push(core::draw_frame(std::move(frame1)));
+               }
        }
 
-       void clear()
+       void set_out_framerate(boost::rational<int> out_framerate)
        {
-               while(!video_stream_.empty())
-                       video_stream_.pop();    
+               boost::lock_guard<boost::mutex> lock(out_framerate_mutex_);
 
-               audio_stream_.clear();
+               bool changed = out_framerate != out_framerate_;
+               out_framerate_ = std::move(out_framerate);
 
-               while(!frame_buffer_.empty())
-                       frame_buffer_.pop();
-               
-               filter_.reset();
+               if (changed)
+                       update_audio_cadence();
+       }
+
+       void update_audio_cadence()
+       {
+               audio_cadence_ = find_audio_cadence(out_framerate_);
+
+               // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
+               // This cadence fills the audio mixer most optimally.
+               boost::range::rotate(audio_cadence_, std::end(audio_cadence_) - 1);
        }
 };
 
 frame_muxer::frame_muxer(
-               double in_fps,
+               boost::rational<int> in_framerate,
                const spl::shared_ptr<core::frame_factory>& frame_factory,
                const core::video_format_desc& format_desc,
                const core::audio_channel_layout& channel_layout,
-               const std::wstring& filter)
-       : impl_(new impl(in_fps, frame_factory, format_desc, channel_layout, filter)){}
-void frame_muxer::push_video(const std::shared_ptr<AVFrame>& frame){impl_->push_video(frame);}
-void frame_muxer::push_audio(const std::shared_ptr<AVFrame>& frame){impl_->push_audio(frame);}
-bool frame_muxer::empty() const{return impl_->empty();}
-core::draw_frame frame_muxer::front() const{return impl_->front();}
-void frame_muxer::pop(){return impl_->pop();}
-void frame_muxer::clear(){impl_->clear();}
+               const std::wstring& filter,
+               bool multithreaded_filter)
+       : impl_(new impl(in_framerate, frame_factory, format_desc, channel_layout, filter, multithreaded_filter)){}
+void frame_muxer::push(const std::shared_ptr<AVFrame>& video){impl_->push(video);}
+void frame_muxer::push(const std::shared_ptr<core::mutable_audio_buffer>& audio){impl_->push(audio);}
+core::draw_frame frame_muxer::poll(){return impl_->poll();}
 uint32_t frame_muxer::calc_nb_frames(uint32_t nb_frames) const {return impl_->calc_nb_frames(nb_frames);}
 bool frame_muxer::video_ready() const{return impl_->video_ready();}
 bool frame_muxer::audio_ready() const{return impl_->audio_ready();}
-
-}}
\ No newline at end of file
+boost::rational<int> frame_muxer::out_framerate() const { return impl_->out_framerate(); }
+}}
index 0a78863635f688fdefc212c37198bbc1c52f634c..651cbb4412cf4987c132133d9a87ee5c8150ece4 100644 (file)
 #include <common/forward.h>
 #include <common/memory.h>
 
+#include <core/frame/frame.h>
 #include <core/mixer/audio/audio_mixer.h>
 #include <core/fwd.h>
 
 #include <boost/noncopyable.hpp>
+#include <boost/rational.hpp>
 
 #include <vector>
 
@@ -41,23 +43,22 @@ class frame_muxer : boost::noncopyable
 {
 public:
        frame_muxer(
-                       double in_fps,
+                       boost::rational<int> in_framerate,
                        const spl::shared_ptr<core::frame_factory>& frame_factory,
                        const core::video_format_desc& format_desc,
                        const core::audio_channel_layout& channel_layout,
-                       const std::wstring& filter);
+                       const std::wstring& filter,
+                       bool multithreaded_filter);
        
-       void push_video(const std::shared_ptr<AVFrame>& frame);
-       void push_audio(const std::shared_ptr<AVFrame>& frame);
+       void push(const std::shared_ptr<AVFrame>& video_frame);
+       void push(const std::shared_ptr<core::mutable_audio_buffer>& audio_samples);
        
        bool video_ready() const;
        bool audio_ready() const;
 
-       void clear();
+       core::draw_frame poll();
 
-       bool empty() const;
-       core::draw_frame front() const;
-       void pop();
+       boost::rational<int> out_framerate() const;
 
        uint32_t calc_nb_frames(uint32_t nb_frames) const;
 private:
index eb773c6c115f5818572bdee16ddd1072e409a4ba..d57b9b60da5a4aa60568587daebad2d1da780ef4 100644 (file)
@@ -27,6 +27,7 @@
 
 #include "../tbb_avcodec.h"
 #include "../../ffmpeg_error.h"
+#include "../../ffmpeg.h"
 
 #include <tbb/concurrent_unordered_map.h>
 #include <tbb/concurrent_queue.h>
@@ -487,18 +488,31 @@ spl::shared_ptr<AVFrame> create_frame()
        {
                av_frame_free(&p);
        });
-       avcodec_get_frame_defaults(frame.get());
        return frame;
 }
 
-std::shared_ptr<AVFrame> flush()
+std::shared_ptr<core::mutable_audio_buffer> flush_audio()
 {
-       static std::shared_ptr<AVFrame> dummy(av_frame_alloc(), [](AVFrame* p)
-       {
-               av_frame_free(&p);
-       });
+       static std::shared_ptr<core::mutable_audio_buffer> audio(new core::mutable_audio_buffer());
+       return audio;
+}
+
+std::shared_ptr<core::mutable_audio_buffer> empty_audio()
+{
+       static std::shared_ptr<core::mutable_audio_buffer> audio(new core::mutable_audio_buffer());
+       return audio;
+}
+
+std::shared_ptr<AVFrame> flush_video()
+{
+       static auto video = create_frame();
+       return video;
+}
 
-       return dummy;
+std::shared_ptr<AVFrame> empty_video()
+{
+       static auto video = create_frame();
+       return video;
 }
 
 spl::shared_ptr<AVCodecContext> open_codec(AVFormatContext& context, enum AVMediaType type, int& index, bool single_threaded)
@@ -771,6 +785,60 @@ std::int64_t create_channel_layout_bitmask(int num_channels)
        return static_cast<std::int64_t>(result);
 }
 
+std::string to_string(const boost::rational<int>& framerate)
+{
+       return boost::lexical_cast<std::string>(framerate.numerator())
+               + "/" + boost::lexical_cast<std::string>(framerate.denominator()) + " (" + boost::lexical_cast<std::string>(static_cast<double>(framerate.numerator()) / static_cast<double>(framerate.denominator())) + ") fps";
+}
+
+std::vector<int> find_audio_cadence(const boost::rational<int>& framerate)
+{
+       static std::map<boost::rational<int>, std::vector<int>> CADENCES_BY_FRAMERATE = []
+       {
+               std::map<boost::rational<int>, std::vector<int>> result;
+
+               for (core::video_format format : enum_constants<core::video_format>())
+               {
+                       core::video_format_desc desc(format);
+                       boost::rational<int> format_rate(desc.time_scale, desc.duration);
+
+                       result.insert(std::make_pair(format_rate, desc.audio_cadence));
+               }
+
+               return result;
+       }();
+
+       auto exact_match = CADENCES_BY_FRAMERATE.find(framerate);
+
+       if (exact_match != CADENCES_BY_FRAMERATE.end())
+               return exact_match->second;
+
+       boost::rational<int> closest_framerate_diff = std::numeric_limits<int>::max();
+       boost::rational<int> closest_framerate = 0;
+
+       for (auto format_framerate : CADENCES_BY_FRAMERATE | boost::adaptors::map_keys)
+       {
+               auto diff = boost::abs(framerate - format_framerate);
+
+               if (diff < closest_framerate_diff)
+               {
+                       closest_framerate_diff = diff;
+                       closest_framerate = format_framerate;
+               }
+       }
+
+       if (is_logging_quiet_for_thread())
+               CASPAR_LOG(debug) << "No exact audio cadence match found for framerate " << to_string(framerate)
+               << "\nClosest match is " << to_string(closest_framerate)
+               << "\nwhich is a " << to_string(closest_framerate_diff) << " difference.";
+       else
+               CASPAR_LOG(warning) << "No exact audio cadence match found for framerate " << to_string(framerate)
+               << "\nClosest match is " << to_string(closest_framerate)
+               << "\nwhich is a " << to_string(closest_framerate_diff) << " difference.";
+
+       return CADENCES_BY_FRAMERATE[closest_framerate];
+}
+
 //
 //void av_dup_frame(AVFrame* frame)
 //{
index abd167a0180f3708a2105ec736db7a33b49c001c..be76ca5dc81e393b130c421ab6d7b270bb9e9d55 100644 (file)
 
 #include <core/video_format.h>
 #include <core/frame/pixel_format.h>
-#include <core/mixer/audio/audio_mixer.h>
+#include <core/frame/audio_channel_layout.h>
+#include <core/frame/frame.h>
 #include <core/fwd.h>
 
 #include <boost/rational.hpp>
 
 #include <array>
+#include <vector>
+#include <utility>
 
 #if defined(_MSC_VER)
 #pragma warning (push)
@@ -52,7 +55,15 @@ struct AVRational;
 struct AVCodecContext;
 
 namespace caspar { namespace ffmpeg {
-               
+
+enum class FFMPEG_Resource {
+       FFMPEG_FILE,
+       FFMPEG_DEVICE,
+       FFMPEG_STREAM
+};
+
+typedef std::vector<std::pair<std::string, std::string>> ffmpeg_options;
+
 // Utils
 
 core::field_mode                                       get_mode(const AVFrame& frame);
@@ -71,7 +82,10 @@ spl::shared_ptr<AVFormatContext> open_input(const std::wstring& filename);
 bool is_sane_fps(AVRational time_base);
 AVRational fix_time_base(AVRational time_base);
 
-std::shared_ptr<AVFrame> flush();
+std::shared_ptr<core::mutable_audio_buffer>    flush_audio();
+std::shared_ptr<core::mutable_audio_buffer>    empty_audio();
+std::shared_ptr<AVFrame>                                       flush_video();
+std::shared_ptr<AVFrame>                                       empty_video();
 
 double read_fps(AVFormatContext& context, double fail_value);
 boost::rational<int> read_framerate(AVFormatContext& context, const boost::rational<int>& fail_value);
@@ -87,4 +101,6 @@ core::audio_channel_layout get_audio_channel_layout(int num_channels, std::uint6
 // av_get_default_channel_layout does not work for layouts not predefined in ffmpeg. This is needed to support > 8 channels.
 std::int64_t create_channel_layout_bitmask(int num_channels);
 
+std::vector<int> find_audio_cadence(const boost::rational<int>& framerate);
+
 }}
index bc379a8c0b9378d1aab1517dbef8d9f9e2d1e1f8..5bc546078c5738e931fd277136a1ca2c74b230e7 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
 *
 * This file is part of CasparCG (www.casparcg.com).
 *
 * Author: Robert Nagy, ronag89@gmail.com
 */
 
-#include "../../StdAfx.h"
+#include "../../stdafx.h"
 
 #include "video_decoder.h"
 
 #include "../util/util.h"
-#include "../input/input.h"
 
 #include "../../ffmpeg_error.h"
 
-#include <common/log.h>
 #include <core/frame/frame_transform.h>
 #include <core/frame/frame_factory.h>
 
+#include <boost/range/algorithm_ext/push_back.hpp>
 #include <boost/filesystem.hpp>
 
-#include <tbb/atomic.h>
-
 #include <queue>
 
 #if defined(_MSC_VER)
@@ -53,105 +50,101 @@ extern "C"
 
 namespace caspar { namespace ffmpeg {
        
-struct video_decoder::impl : boost::noncopyable
+struct video_decoder::implementation : boost::noncopyable
 {
-       core::monitor::subject                                  monitor_subject_;
-       input*                                                                  input_;
        int                                                                             index_                          = -1;
        const spl::shared_ptr<AVCodecContext>   codec_context_;
 
        std::queue<spl::shared_ptr<AVPacket>>   packets_;
        
-       const AVStream*                                                 stream_;
        const uint32_t                                                  nb_frames_;
-       const int                                                               width_;
-       const int                                                               height_;
 
-       tbb::atomic<bool>                                               is_progressive_;
+       const int                                                               width_                          = codec_context_->width;
+       const int                                                               height_                         = codec_context_->height;
+       bool                                                                    is_progressive_;
+
        tbb::atomic<uint32_t>                                   file_frame_number_;
-       boost::rational<int>                                    framerate_;
-       
-       std::shared_ptr<AVPacket>                               current_packet_;
 
 public:
-       explicit impl(input& in, bool single_threaded)
-               : input_(&in)
-               , codec_context_(open_codec(input_->context(), AVMEDIA_TYPE_VIDEO, index_, single_threaded))
-               , stream_(input_->context().streams[index_])
-               , nb_frames_(static_cast<uint32_t>(stream_->nb_frames))
-               , width_(codec_context_->width)
-               , height_(codec_context_->height)
-               , framerate_(read_framerate(input_->context(), 0))
+       explicit implementation(const spl::shared_ptr<AVFormatContext>& context)
+               : codec_context_(open_codec(*context, AVMEDIA_TYPE_VIDEO, index_, false))
+               , nb_frames_(static_cast<uint32_t>(context->streams[index_]->nb_frames))
        {
-               is_progressive_ = false;
                file_frame_number_ = 0;
+
+               codec_context_->refcounted_frames = 1;
        }
-       
+
+       void push(const std::shared_ptr<AVPacket>& packet)
+       {
+               if(!packet)
+                       return;
+
+               if(packet->stream_index == index_ || packet->data == nullptr)
+                       packets_.push(spl::make_shared_ptr(packet));
+       }
+
        std::shared_ptr<AVFrame> poll()
-       {                       
-               if(!current_packet_ && !input_->try_pop_video(current_packet_))
+       {               
+               if(packets_.empty())
                        return nullptr;
                
-               std::shared_ptr<AVFrame> frame;
-
-               if (!current_packet_->data)
-               {
-                       if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)                       
-                               frame = decode(*current_packet_);
-                       
-                       if (!frame)
+               auto packet = packets_.front();
+                                       
+               if(packet->data == nullptr)
+               {                       
+                       if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)
                        {
-                               file_frame_number_ = static_cast<uint32_t>(current_packet_->pos);
-                               avcodec_flush_buffers(codec_context_.get());
-                               current_packet_.reset();
-                               frame = flush();
+                               auto video = decode(packet);
+                               if(video)
+                                       return video;
                        }
+                                       
+                       packets_.pop();
+                       file_frame_number_ = static_cast<uint32_t>(packet->pos);
+                       avcodec_flush_buffers(codec_context_.get());
+                       return flush_video();   
                }
-               else
-               {
-                       frame = decode(*current_packet_);
                        
-                       if(current_packet_->size == 0)
-                               current_packet_.reset();
-               }
-                       
-               return frame;
+               packets_.pop();
+               return decode(packet);
        }
 
-       std::shared_ptr<AVFrame> decode(AVPacket& pkt)
+       std::shared_ptr<AVFrame> decode(spl::shared_ptr<AVPacket> pkt)
        {
-               auto frame = create_frame();
-
-               int got_frame = 0;
-               auto len = THROW_ON_ERROR2(avcodec_decode_video2(codec_context_.get(), frame.get(), &got_frame, &pkt), "[video_decocer]");
-                               
-               if(len == 0)
+               auto decoded_frame = std::shared_ptr<AVFrame>(av_frame_alloc(), [](AVFrame* frame)
                {
-                       pkt.size = 0;
+                       av_frame_free(&frame);
+               });
+               
+               int frame_finished = 0;
+               THROW_ON_ERROR2(avcodec_decode_video2(codec_context_.get(), decoded_frame.get(), &frame_finished, pkt.get()), "[video_decoder]");
+               
+               // If a decoder consumes less then the whole packet then something is wrong
+               // that might be just harmless padding at the end, or a problem with the
+               // AVParser or demuxer which puted more then one frame in a AVPacket.
+
+               if(frame_finished == 0) 
                        return nullptr;
-               }
 
-        pkt.data += len;
-        pkt.size -= len;
+               is_progressive_ = !decoded_frame->interlaced_frame;
 
-               if(got_frame == 0)      
-                       return nullptr;
+               if(decoded_frame->repeat_pict > 0)
+                       CASPAR_LOG(warning) << "[video_decoder] Field repeat_pict not implemented.";
                
                ++file_frame_number_;
 
-               is_progressive_ = !frame->interlaced_frame;
-               
-               if(frame->repeat_pict > 0)
-                       CASPAR_LOG(warning) << "[video_decoder] repeat_pict not implemented.";
-                               
-               monitor_subject_  << core::monitor::message("/file/video/width")        % width_
-                                               << core::monitor::message("/file/video/height") % height_
-                                               << core::monitor::message("/file/video/field")  % u8(!frame->interlaced_frame ? "progressive" : (frame->top_field_first ? "upper" : "lower"))
-                                               << core::monitor::message("/file/video/codec")  % u8(codec_context_->codec->long_name);
-               
-               return frame;
+               // This ties the life of the decoded_frame to the packet that it came from. For the
+               // current version of ffmpeg (0.8 or c17808c) the RAW_VIDEO codec returns frame data
+               // owned by the packet.
+               return std::shared_ptr<AVFrame>(decoded_frame.get(), [decoded_frame, pkt](AVFrame*){});
        }
        
+       bool ready() const
+       {
+               return packets_.size() >= 8;
+       }
+
        uint32_t nb_frames() const
        {
                return std::max(nb_frames_, static_cast<uint32_t>(file_frame_number_));
@@ -163,16 +156,15 @@ public:
        }
 };
 
-video_decoder::video_decoder(input& in, bool single_threaded) : impl_(new impl(in, single_threaded)){}
-video_decoder::video_decoder(video_decoder&& other) : impl_(std::move(other.impl_)){}
-video_decoder& video_decoder::operator=(video_decoder&& other){impl_ = std::move(other.impl_); return *this;}
-std::shared_ptr<AVFrame> video_decoder::operator()(){return impl_->poll();}
+video_decoder::video_decoder(const spl::shared_ptr<AVFormatContext>& context) : impl_(new implementation(context)){}
+void video_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}
+std::shared_ptr<AVFrame> video_decoder::poll(){return impl_->poll();}
+bool video_decoder::ready() const{return impl_->ready();}
 int video_decoder::width() const{return impl_->width_;}
 int video_decoder::height() const{return impl_->height_;}
 uint32_t video_decoder::nb_frames() const{return impl_->nb_frames();}
-uint32_t video_decoder::file_frame_number() const{return impl_->file_frame_number_;}
-boost::rational<int> video_decoder::framerate() const { return impl_->framerate_; }
-bool video_decoder::is_progressive() const{return impl_->is_progressive_;}
+uint32_t video_decoder::file_frame_number() const{return static_cast<uint32_t>(impl_->file_frame_number_);}
+bool   video_decoder::is_progressive() const{return impl_->is_progressive_;}
 std::wstring video_decoder::print() const{return impl_->print();}
-core::monitor::subject& video_decoder::monitor_output() { return impl_->monitor_subject_; }
-}}
+
+}}
\ No newline at end of file
index 5cc1aea1dcca1ab2867ee8bc5553e328003049a8..d954dc05b05610a1d8b24d26b8cd03b7a75e573a 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
 *
 * This file is part of CasparCG (www.casparcg.com).
 *
 #pragma once
 
 #include <common/memory.h>
-#include <common/forward.h>
-
-#include <core/monitor/monitor.h>
 
 #include <boost/noncopyable.hpp>
-#include <boost/rational.hpp>
 
 struct AVFormatContext;
 struct AVFrame;
 struct AVPacket;
 
-namespace caspar { namespace ffmpeg {
+namespace caspar {
+
+namespace core {
+       class frame_factory;
+       class write_frame;
+}
 
-class video_decoder : public boost::noncopyable
+namespace ffmpeg {
+
+class video_decoder : boost::noncopyable
 {
 public:
-       explicit video_decoder(class input& input, bool single_threaded);
+       explicit video_decoder(const spl::shared_ptr<AVFormatContext>& context);
        
-       video_decoder(video_decoder&& other);
-       video_decoder& operator=(video_decoder&& other);
-
-       std::shared_ptr<AVFrame> operator()();
+       bool                                            ready() const;
+       void                                            push(const std::shared_ptr<AVPacket>& packet);
+       std::shared_ptr<AVFrame>        poll();
        
-       int      width() const;
-       int      height() const;
-       bool is_progressive() const;
-       uint32_t file_frame_number() const;
-       boost::rational<int> framerate() const;
+       int                                                     width() const;
+       int                                                     height() const;
 
-       uint32_t nb_frames() const;
+       uint32_t                                        nb_frames() const;
+       uint32_t                                        file_frame_number() const;
+       bool                                            is_progressive() const;
 
-       std::wstring print() const;
-               
-       core::monitor::subject& monitor_output();
+       std::wstring                            print() const;
 
 private:
-       struct impl;
-       spl::shared_ptr<impl> impl_;
+       struct implementation;
+       spl::shared_ptr<implementation> impl_;
 };
 
 }}
\ No newline at end of file
index efc5c51c2c302ae2defe080eeaa051c19a0276eb..d35cccdcac31abb79095bf21e7df6fa4fac560fe 100644 (file)
 <!--\r
 <log-level>           info  [trace|debug|info|warning|error|fatal]</log-level>\r
 <log-categories>      communication  [calltrace|communication|calltrace,communication]</log-categories>\r
-<force-deinterlace>   true  [true|false]</force-deinterlacing>\r
+<force-deinterlace>   false  [true|false]</force-deinterlacing>\r
 <channel-grid>        false [true|false]</channel-grid>\r
 <mixer>\r
     <blend-modes>          false [true|false]</blend-modes>\r
     <mipmapping_default_on>false [true|false]</mipmapping_default_on>\r
     <straight-alpha>       false [true|false]</straight-alpha>\r
 </mixer>\r
-<auto-transcode>      true  [true|false]</auto-transcode>\r
 <accelerator>auto [cpu|gpu|auto]</accelerator>\r
 <template-hosts>\r
     <template-host>\r
-        <video-mode/>\r
-        <filename/>\r
-        <width/>\r
-        <height/>\r
+        <video-mode />\r
+        <filename />\r
+        <width />\r
+        <height />\r
     </template-host>\r
 </template-hosts>\r
 <flash>\r