#include "../../frame/frame_transform.h"
#include "../../frame/pixel_format.h"
#include "../../monitor/monitor.h"
+#include "../../help/help_sink.h"
#include <common/future.h>
#include <common/tweener.h>
#include <functional>
#include <queue>
#include <future>
+#include <stack>
namespace caspar { namespace core {
auto under = source;
auto over = destination;
- double float_distance = static_cast<double>(distance.numerator()) / static_cast<double>(distance.denominator());
+ double float_distance = boost::rational_cast<double>(distance);
under.transform().image_transform.is_mix = true;
under.transform().image_transform.opacity = 1 - float_distance;
middle.transform().image_transform.is_mix = true;
next_frame.transform().image_transform.is_mix = true;
- double float_distance = static_cast<double>(distance.numerator()) / static_cast<double>(distance.denominator());
+ double float_distance = boost::rational_cast<double>(distance);
previous_frame.transform().image_transform.opacity = std::max(0.0, 0.5 - float_distance * 0.5);
middle.transform().image_transform.opacity = 0.5;
next_frame.transform().image_transform.opacity = 1.0 - previous_frame.transform().image_transform.opacity - middle.transform().image_transform.opacity;
}
};
-struct audio_extractor : public frame_visitor
+class audio_extractor : public frame_visitor
{
- std::function<void(const const_frame& frame)> on_frame_;
-
+ std::stack<core::audio_transform> transform_stack_;
+ std::function<void(const const_frame& frame)> on_frame_;
+public:
audio_extractor(std::function<void(const const_frame& frame)> on_frame)
: on_frame_(std::move(on_frame))
{
+ transform_stack_.push(audio_transform());
+ }
+
+ void push(const frame_transform& transform) override
+ {
+ transform_stack_.push(transform_stack_.top() * transform.audio_transform);
+ }
+
+ void pop() override
+ {
+ transform_stack_.pop();
}
- void push(const frame_transform& transform) override { }
- void pop() override { }
void visit(const const_frame& frame) override
{
- if (!frame.audio_data().empty())
+ if (!frame.audio_data().empty() && !transform_stack_.top().is_still)
on_frame_(frame);
}
};
{
if (time_ == duration_)
return dest_;
- double source = static_cast<double>(source_.numerator()) / static_cast<double>(source_.denominator());
- double delta = static_cast<double>(dest_.numerator()) / static_cast<double>(dest_.denominator()) - source;
- double result = tweener_(time_, source, delta, duration_);
+
+ double source = boost::rational_cast<double>(source_);
+ double delta = boost::rational_cast<double>(dest_) - source;
+ double result = tweener_(time_, source, delta, duration_);
return boost::rational<int64_t>(static_cast<int64_t>(result * 1000000.0), 1000000);
}
// for all other framerates a frame interpolator will be chosen.
if (speed_ != 1 && speed_ * 2 != 1 && speed_ != 2)
{
- if (source_framerate_ > 47) // The bluriness of blend_all is acceptable on high framerates.
+ auto high_source_framerate = source_framerate_ > 47;
+ auto high_destination_framerate = destination_framerate_ > 47
+ || destination_fieldmode_ != field_mode::progressive;
+
+ if (high_source_framerate && high_destination_framerate) // The bluriness of blend_all is acceptable on high framerates.
interpolator_ = blend_all();
- else // blend_all is mostly too blurry on low framerates. blend provides a compromise.
+ else // blend_all is mostly too blurry on low framerates. blend provides a compromise.
interpolator_ = &blend;
CASPAR_LOG(warning) << source_->print() << L" Frame blending frame rate conversion required to conform to channel frame rate.";
boost::property_tree::wptree info() const override
{
- return source_->info();
+ auto info = source_->info();
+
+ auto incorrect_frame_number = info.get_child_optional(L"frame-number");
+ if (incorrect_frame_number)
+ incorrect_frame_number->put_value(frame_number());
+
+ auto incorrect_nb_frames = info.get_child_optional(L"nb-frames");
+ if (incorrect_nb_frames)
+ incorrect_nb_frames->put_value(nb_frames());
+
+ return info;
+ }
+
+ uint32_t nb_frames() const override
+ {
+ return static_cast<uint32_t>(source_->nb_frames() * boost::rational_cast<double>(1 / get_speed() / (output_repeat_ != 0 ? 2 : 1)));
+ }
+
+ uint32_t frame_number() const override
+ {
+ return static_cast<uint32_t>(source_->frame_number() * boost::rational_cast<double>(1 / get_speed() / (output_repeat_ != 0 ? 2 : 1)));
}
constraints& pixel_constraints() override
for (std::int64_t i = 0; i < num_frames; ++i)
{
- previous_frame_ = std::move(next_frame_);
+ if (next_frame_ == draw_frame::empty())
+ previous_frame_ = pop_frame_from_source();
+ else
+ {
+ previous_frame_ = std::move(next_frame_);
- next_frame_ = pop_frame_from_source();
+ next_frame_ = pop_frame_from_source();
+ }
}
}
{
auto needed = destination_audio_cadence_.front();
auto got = audio_samples_.size() / source_channel_layout_.num_channels;
- CASPAR_LOG(debug) << print() << L" Too few audio samples. Needed " << needed << L" but got " << got;
+ if (got != 0) // If at end of stream we don't care
+ CASPAR_LOG(debug) << print() << L" Too few audio samples. Needed " << needed << L" but got " << got;
buffer.swap(audio_samples_);
buffer.resize(needed * source_channel_layout_.num_channels, 0);
}
}
};
+void describe_framerate_producer(help_sink& sink)
+{
+ sink.para()->text(L"Framerate conversion control / Slow motion examples:");
+ sink.example(L">> CALL 1-10 FRAMERATE INTERPOLATION BLEND", L"enables 2 frame blend interpolation.");
+ sink.example(L">> CALL 1-10 FRAMERATE INTERPOLATION BLEND_ALL", L"enables 3 frame blend interpolation.");
+ sink.example(L">> CALL 1-10 FRAMERATE INTERPOLATION DROP_AND_SKIP", L"disables frame interpolation.");
+ sink.example(L">> CALL 1-10 FRAMERATE SPEED 0.25", L"immediately changes the speed to 25%. Sound will be disabled.");
+ sink.example(L">> CALL 1-10 FRAMERATE SPEED 0.25 50", L"changes the speed to 25% linearly over 50 frames. Sound will be disabled.");
+ sink.example(L">> CALL 1-10 FRAMERATE SPEED 0.25 50 easeinoutsine", L"changes the speed to 25% over 50 frames using specified easing curve. Sound will be disabled.");
+ sink.example(L">> CALL 1-10 FRAMERATE SPEED 1 50", L"changes the speed to 100% linearly over 50 frames. Sound will be enabled when the destination speed of 100% has been reached.");
+}
+
spl::shared_ptr<frame_producer> create_framerate_producer(
spl::shared_ptr<frame_producer> source,
boost::rational<int> source_framerate,
namespace caspar { namespace core {
+void describe_framerate_producer(help_sink& sink);
+
spl::shared_ptr<frame_producer> create_framerate_producer(
spl::shared_ptr<frame_producer> source,
boost::rational<int> source_framerate,
audio_channel_remapper.cpp
ffmpeg.cpp
ffmpeg_error.cpp
+ ffmpeg_pipeline.cpp
+ ffmpeg_pipeline_backend_internal.cpp
StdAfx.cpp
)
set(HEADERS
ffmpeg.h
ffmpeg_error.h
+ ffmpeg_pipeline.h
+ ffmpeg_pipeline_backend.h
+ ffmpeg_pipeline_backend_internal.h
StdAfx.h
)
--- /dev/null
+/*
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Helge Norberg, helge.norberg@svt.se
+*/
+
+#include "StdAfx.h"
+
+#include "ffmpeg_pipeline.h"
+#include "ffmpeg_pipeline_backend.h"
+#include "ffmpeg_pipeline_backend_internal.h"
+
+#include <core/frame/draw_frame.h>
+#include <core/video_format.h>
+
+namespace caspar { namespace ffmpeg {
+
+ffmpeg_pipeline::ffmpeg_pipeline()
+ : impl_(create_internal_pipeline())
+{
+}
+
+ffmpeg_pipeline ffmpeg_pipeline::graph(spl::shared_ptr<caspar::diagnostics::graph> g) { impl_->graph(std::move(g)); return *this; }
+ffmpeg_pipeline ffmpeg_pipeline::from_file(std::string filename) { impl_->from_file(std::move(filename)); return *this; }
+ffmpeg_pipeline ffmpeg_pipeline::from_memory_only_audio(int num_channels, int samplerate) { impl_->from_memory_only_audio(num_channels, samplerate); return *this; }
+ffmpeg_pipeline ffmpeg_pipeline::from_memory_only_video(int width, int height, boost::rational<int> framerate) { impl_->from_memory_only_video(width, height, std::move(framerate)); return *this; }
+ffmpeg_pipeline ffmpeg_pipeline::from_memory(int num_channels, int samplerate, int width, int height, boost::rational<int> framerate) { impl_->from_memory(num_channels, samplerate, width, height, std::move(framerate)); return *this; }
+ffmpeg_pipeline ffmpeg_pipeline::start_frame(std::uint32_t frame) { impl_->start_frame(frame); return *this; }
+std::uint32_t ffmpeg_pipeline::start_frame() const { return impl_->start_frame(); }
+ffmpeg_pipeline ffmpeg_pipeline::length(std::uint32_t frames) { impl_->length(frames); return *this; }
+std::uint32_t ffmpeg_pipeline::length() const { return impl_->length(); }
+ffmpeg_pipeline ffmpeg_pipeline::seek(std::uint32_t frame) { impl_->seek(frame); return *this; }
+ffmpeg_pipeline ffmpeg_pipeline::loop(bool value) { impl_->loop(value); return *this; }
+bool ffmpeg_pipeline::loop() const { return impl_->loop(); }
+std::string ffmpeg_pipeline::source_filename() const { return impl_->source_filename(); }
+ffmpeg_pipeline ffmpeg_pipeline::vfilter(std::string filter) { impl_->vfilter(std::move(filter)); return *this; }
+ffmpeg_pipeline ffmpeg_pipeline::afilter(std::string filter) { impl_->afilter(std::move(filter)); return *this; }
+int ffmpeg_pipeline::width() const { return impl_->width(); }
+int ffmpeg_pipeline::height() const { return impl_->height(); }
+boost::rational<int> ffmpeg_pipeline::framerate() const { return impl_->framerate(); }
+bool ffmpeg_pipeline::progressive() const { return impl_->progressive(); }
+ffmpeg_pipeline ffmpeg_pipeline::to_memory(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format) { impl_->to_memory(std::move(factory), std::move(format)); return *this; }
+ffmpeg_pipeline ffmpeg_pipeline::to_file(std::string filename) { impl_->to_file(std::move(filename)); return *this; }
+ffmpeg_pipeline ffmpeg_pipeline::vcodec(std::string codec) { impl_->vcodec(std::move(codec)); return *this; }
+ffmpeg_pipeline ffmpeg_pipeline::acodec(std::string codec) { impl_->acodec(std::move(codec)); return *this; }
+ffmpeg_pipeline ffmpeg_pipeline::format(std::string fmt) { impl_->format(std::move(fmt)); return *this; }
+ffmpeg_pipeline ffmpeg_pipeline::start() { impl_->start(); return *this; }
+bool ffmpeg_pipeline::try_push_audio(caspar::array<const std::int32_t> data) { return impl_->try_push_audio(std::move(data)); }
+bool ffmpeg_pipeline::try_push_video(caspar::array<const std::uint8_t> data) { return impl_->try_push_video(std::move(data)); }
+core::draw_frame ffmpeg_pipeline::try_pop_frame() { return impl_->try_pop_frame(); }
+std::uint32_t ffmpeg_pipeline::last_frame() const { return impl_->last_frame(); }
+bool ffmpeg_pipeline::started() const { return impl_->started(); }
+void ffmpeg_pipeline::stop() { impl_->stop(); }
+
+}}
--- /dev/null
+/*
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Helge Norberg, helge.norberg@svt.se
+*/
+
+#pragma once
+
+#include <common/memory.h>
+#include <common/array.h>
+
+#include <core/fwd.h>
+
+#include <boost/rational.hpp>
+
+#include <string>
+#include <functional>
+#include <cstdint>
+
+FORWARD2(caspar, diagnostics, class graph);
+
+namespace caspar { namespace ffmpeg {
+
+struct ffmpeg_pipeline_backend;
+
+class ffmpeg_pipeline
+{
+public:
+ ffmpeg_pipeline();
+
+ ffmpeg_pipeline graph(spl::shared_ptr<caspar::diagnostics::graph> g);
+
+ ffmpeg_pipeline from_file(std::string filename);
+ ffmpeg_pipeline from_memory_only_audio(int num_channels, int samplerate);
+ ffmpeg_pipeline from_memory_only_video(int width, int height, boost::rational<int> framerate);
+ ffmpeg_pipeline from_memory(int num_channels, int samplerate, int width, int height, boost::rational<int> framerate);
+
+ ffmpeg_pipeline start_frame(std::uint32_t frame);
+ std::uint32_t start_frame() const;
+ ffmpeg_pipeline length(std::uint32_t frames);
+ std::uint32_t length() const;
+ ffmpeg_pipeline seek(std::uint32_t frame);
+ ffmpeg_pipeline loop(bool value);
+ bool loop() const;
+ std::string source_filename() const;
+
+ ffmpeg_pipeline vfilter(std::string filter);
+ ffmpeg_pipeline afilter(std::string filter);
+ int width() const;
+ int height() const;
+ boost::rational<int> framerate() const;
+ bool progressive() const;
+
+ ffmpeg_pipeline to_memory(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format);
+ ffmpeg_pipeline to_file(std::string filename);
+ ffmpeg_pipeline vcodec(std::string codec);
+ ffmpeg_pipeline acodec(std::string codec);
+ ffmpeg_pipeline format(std::string fmt);
+
+ ffmpeg_pipeline start();
+ bool try_push_audio(caspar::array<const std::int32_t> data);
+ bool try_push_video(caspar::array<const std::uint8_t> data);
+ core::draw_frame try_pop_frame();
+ std::uint32_t last_frame() const;
+ bool started() const;
+ void stop();
+
+private:
+ std::shared_ptr<ffmpeg_pipeline_backend> impl_;
+};
+
+}}
--- /dev/null
+/*
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Helge Norberg, helge.norberg@svt.se
+*/
+
+#include "StdAfx.h"
+
+#include <common/diagnostics/graph.h>
+#include <common/array.h>
+
+#include <core/frame/draw_frame.h>
+
+#include <boost/rational.hpp>
+
+namespace caspar { namespace ffmpeg {
+
+struct ffmpeg_pipeline_backend
+{
+ virtual ~ffmpeg_pipeline_backend() { }
+
+ virtual void graph(spl::shared_ptr<caspar::diagnostics::graph> g) = 0;
+
+ virtual void from_file(std::string filename) = 0;
+ virtual void from_memory_only_audio(int num_channels, int samplerate) = 0;
+ virtual void from_memory_only_video(int width, int height, boost::rational<int> framerate) = 0;
+ virtual void from_memory(int num_channels, int samplerate, int width, int height, boost::rational<int> framerate) = 0;
+
+ virtual void start_frame(std::uint32_t frame) = 0;
+ virtual std::uint32_t start_frame() const = 0;
+ virtual void length(std::uint32_t frames) = 0;
+ virtual std::uint32_t length() const = 0;
+ virtual void seek(std::uint32_t frame) = 0;
+ virtual void loop(bool value) = 0;
+ virtual bool loop() const = 0;
+ virtual std::string source_filename() const = 0;
+
+ virtual void vfilter(std::string filter) = 0;
+ virtual void afilter(std::string filter) = 0;
+ virtual int width() const = 0;
+ virtual int height() const = 0;
+ virtual boost::rational<int> framerate() const = 0;
+ virtual bool progressive() const = 0;
+ virtual std::uint32_t last_frame() const = 0;
+
+ virtual void to_memory(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format) = 0;
+ virtual void to_file(std::string filename) = 0;
+ virtual void vcodec(std::string codec) = 0;
+ virtual void acodec(std::string codec) = 0;
+ virtual void format(std::string fmt) = 0;
+
+ virtual void start() = 0;
+ virtual bool try_push_audio(caspar::array<const std::int32_t> data) = 0;
+ virtual bool try_push_video(caspar::array<const std::uint8_t> data) = 0;
+ virtual core::draw_frame try_pop_frame() = 0;
+ virtual bool started() const = 0;
+ virtual void stop() = 0;
+};
+
+}}
--- /dev/null
+/*
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Helge Norberg, helge.norberg@svt.se
+*/
+
+#include "StdAfx.h"
+
+#include "ffmpeg_pipeline_backend.h"
+#include "ffmpeg_pipeline_backend_internal.h"
+#include "producer/input/input.h"
+#include "producer/video/video_decoder.h"
+#include "producer/audio/audio_decoder.h"
+#include "producer/filter/audio_filter.h"
+#include "producer/filter/filter.h"
+#include "producer/util/util.h"
+#include "ffmpeg_error.h"
+#include "ffmpeg.h"
+
+#include <common/diagnostics/graph.h>
+#include <common/os/general_protection_fault.h>
+#include <common/enum_class.h>
+
+#include <core/frame/audio_channel_layout.h>
+#include <core/frame/frame.h>
+#include <core/frame/frame_factory.h>
+#include <core/video_format.h>
+
+#include <functional>
+#include <limits>
+#include <queue>
+#include <map>
+
+#include <tbb/atomic.h>
+#include <tbb/concurrent_queue.h>
+
+#include <boost/thread.hpp>
+#include <boost/optional.hpp>
+
+namespace caspar { namespace ffmpeg {
+
+std::string to_string(const boost::rational<int>& framerate)
+{
+ return boost::lexical_cast<std::string>(framerate.numerator())
+ + "/" + boost::lexical_cast<std::string>(framerate.denominator()) + " (" + boost::lexical_cast<std::string>(static_cast<double>(framerate.numerator()) / static_cast<double>(framerate.denominator())) + ") fps";
+}
+
+std::vector<int> find_audio_cadence(const boost::rational<int>& framerate)
+{
+ static std::map<boost::rational<int>, std::vector<int>> CADENCES_BY_FRAMERATE = []
+ {
+ std::map<boost::rational<int>, std::vector<int>> result;
+
+ for (core::video_format format : enum_constants<core::video_format>())
+ {
+ core::video_format_desc desc(format);
+ boost::rational<int> format_rate(desc.time_scale, desc.duration);
+
+ result.insert(std::make_pair(format_rate, desc.audio_cadence));
+ }
+
+ return result;
+ }();
+
+ auto exact_match = CADENCES_BY_FRAMERATE.find(framerate);
+
+ if (exact_match != CADENCES_BY_FRAMERATE.end())
+ return exact_match->second;
+
+ boost::rational<int> closest_framerate_diff = std::numeric_limits<int>::max();
+ boost::rational<int> closest_framerate = 0;
+
+ for (auto format_framerate : CADENCES_BY_FRAMERATE | boost::adaptors::map_keys)
+ {
+ auto diff = boost::abs(framerate - format_framerate);
+
+ if (diff < closest_framerate_diff)
+ {
+ closest_framerate_diff = diff;
+ closest_framerate = format_framerate;
+ }
+ }
+
+ if (is_logging_quiet_for_thread())
+ CASPAR_LOG(debug) << "No exact audio cadence match found for framerate " << to_string(framerate)
+ << "\nClosest match is " << to_string(closest_framerate)
+ << "\nwhich is a " << to_string(closest_framerate_diff) << " difference.";
+ else
+ CASPAR_LOG(warning) << "No exact audio cadence match found for framerate " << to_string(framerate)
+ << "\nClosest match is " << to_string(closest_framerate)
+ << "\nwhich is a " << to_string(closest_framerate_diff) << " difference.";
+
+ return CADENCES_BY_FRAMERATE[closest_framerate];
+}
+
+struct source
+{
+ virtual ~source() { }
+
+ virtual std::wstring print() const = 0;
+ virtual void start() { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+ virtual void graph(spl::shared_ptr<caspar::diagnostics::graph> g) { }
+ virtual void stop() { }
+ virtual void start_frame(std::uint32_t frame) { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
+ virtual std::uint32_t start_frame() const { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
+ virtual void loop(bool value) { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
+ virtual bool loop() const { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
+ virtual void length(std::uint32_t frames) { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
+ virtual std::uint32_t length() const { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
+ virtual std::string filename() const { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print())); }
+ virtual void seek(std::uint32_t frame) { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
+ virtual bool has_audio() const { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+ virtual int samplerate() const { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+ virtual bool has_video() const { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+ virtual bool eof() const { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+ virtual boost::rational<int> framerate() const { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+ virtual std::uint32_t frame_number() const { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+ virtual std::shared_ptr<AVFrame> get_input_frame(AVMediaType type) { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+};
+
+struct no_source_selected : public source
+{
+ std::wstring print() const override
+ {
+ return L"[no_source_selected]";
+ }
+};
+
+class file_source : public source
+{
+ std::wstring filename_;
+ spl::shared_ptr<diagnostics::graph> graph_;
+ std::uint32_t start_frame_ = 0;
+ std::uint32_t length_ = std::numeric_limits<std::uint32_t>::max();
+ bool loop_ = false;
+ mutable boost::mutex pointer_mutex_;
+ std::shared_ptr<input> input_;
+ std::shared_ptr<audio_decoder> audio_decoder_;
+ std::shared_ptr<video_decoder> video_decoder_;
+ bool started_ = false;
+public:
+ file_source(std::string filename)
+ : filename_(u16(filename))
+ {
+ }
+
+ std::wstring print() const override
+ {
+ return L"[file_source " + filename_ + L"]";
+ }
+
+ void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
+ {
+ graph_ = std::move(g);
+ }
+
+ void start() override
+ {
+ boost::lock_guard<boost::mutex> lock(pointer_mutex_);
+ bool thumbnail_mode = is_logging_quiet_for_thread();
+ input_.reset(new input(graph_, filename_, loop_, start_frame_, length_, thumbnail_mode));
+
+ try
+ {
+ audio_decoder_.reset(new audio_decoder(*input_, core::video_format_desc(), L""));
+ }
+ catch (averror_stream_not_found&)
+ {
+ CASPAR_LOG(debug) << print() << " No audio-stream found. Running without audio.";
+ }
+ catch (...)
+ {
+ if (is_logging_quiet_for_thread())
+ {
+ CASPAR_LOG_CURRENT_EXCEPTION_AT_LEVEL(debug);
+ CASPAR_LOG(info) << print() << " Failed to open audio-stream. Running without audio. Turn on log level debug to see more information.";
+ }
+ else
+ {
+ CASPAR_LOG_CURRENT_EXCEPTION();
+ CASPAR_LOG(warning) << print() << " Failed to open audio-stream. Running without audio.";
+ }
+ }
+
+ try
+ {
+ video_decoder_.reset(new video_decoder(*input_, false));
+ }
+ catch (averror_stream_not_found&)
+ {
+ CASPAR_LOG(debug) << print() << " No video-stream found. Running without video.";
+ }
+ catch (...)
+ {
+ if (is_logging_quiet_for_thread())
+ {
+ CASPAR_LOG_CURRENT_EXCEPTION_AT_LEVEL(debug);
+ CASPAR_LOG(info) << print() << " Failed to open video-stream. Running without audio. Turn on log level debug to see more information.";
+ }
+ else
+ {
+ CASPAR_LOG_CURRENT_EXCEPTION();
+ CASPAR_LOG(warning) << print() << " Failed to open video-stream. Running without audio.";
+ }
+ }
+
+ started_ = true;
+ }
+
+ void stop() override
+ {
+ started_ = false;
+ }
+
+ void start_frame(std::uint32_t frame) override
+ {
+ start_frame_ = frame;
+
+ auto i = get_input();
+ if (i)
+ i->start(frame);
+ }
+
+ std::uint32_t start_frame() const override
+ {
+ return start_frame_;
+ }
+
+ void loop(bool value) override
+ {
+ loop_ = value;
+
+ auto i = get_input();
+ if (i)
+ i->loop(value);
+ }
+
+ bool loop() const override
+ {
+ return loop_;
+ }
+
+ void length(std::uint32_t frames) override
+ {
+ length_ = frames;
+
+ auto i = get_input();
+ if (i)
+ i->length(frames);
+ }
+
+ std::uint32_t length() const override
+ {
+ auto v = get_video_decoder();
+
+ if (v)
+ return v->nb_frames();
+
+ auto a = get_audio_decoder();
+
+ if (a)
+ return a->nb_frames();
+
+ return length_;
+ }
+
+ std::string filename() const override
+ {
+ return u8(filename_);
+ }
+
+ void seek(std::uint32_t frame) override
+ {
+ expect_started();
+ get_input()->seek(frame);
+ }
+
+ bool eof() const override
+ {
+ auto i = get_input();
+ return !i || i->eof();
+ }
+
+ bool has_audio() const override
+ {
+ return static_cast<bool>(get_audio_decoder());
+ }
+
+ int samplerate() const override
+ {
+ auto decoder = get_audio_decoder();
+
+ if (!decoder)
+ return -1;
+
+ return 48000;
+ }
+
+ bool has_video() const override
+ {
+ return static_cast<bool>(get_video_decoder());
+ }
+
+ boost::rational<int> framerate() const override
+ {
+ auto decoder = get_video_decoder();
+
+ if (!decoder)
+ return -1;
+
+ return decoder->framerate();
+ }
+
+ std::uint32_t frame_number() const override
+ {
+ auto decoder = get_video_decoder();
+
+ if (!decoder)
+ return 0;
+
+ return decoder->file_frame_number();
+ }
+
+ std::shared_ptr<AVFrame> get_input_frame(AVMediaType type) override
+ {
+ auto a_decoder = get_audio_decoder();
+ auto v_decoder = get_video_decoder();
+ expect_started();
+
+ if (type == AVMediaType::AVMEDIA_TYPE_AUDIO && a_decoder)
+ {
+ std::shared_ptr<AVFrame> frame;
+
+ for (int i = 0; i < 64; ++i)
+ {
+ frame = (*a_decoder)();
+
+ if (frame && frame->data[0])
+ return spl::make_shared_ptr(frame);
+ }
+ }
+ else if (type == AVMediaType::AVMEDIA_TYPE_VIDEO && v_decoder)
+ {
+ std::shared_ptr<AVFrame> frame;
+
+ for (int i = 0; i < 128; ++i)
+ {
+ frame = (*v_decoder)();
+
+ if (frame && frame->data[0])
+ return spl::make_shared_ptr(frame);
+ }
+ }
+ else
+ CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(
+ print() + L" Unhandled media type " + boost::lexical_cast<std::wstring>(type)));
+
+ return nullptr;
+ }
+private:
+ void expect_started() const
+ {
+ if (!started_)
+ CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" Not started."));
+ }
+
+ std::shared_ptr<input> get_input() const
+ {
+ boost::lock_guard<boost::mutex> lock(pointer_mutex_);
+ return input_;
+ }
+
+ std::shared_ptr<audio_decoder> get_audio_decoder() const
+ {
+ boost::lock_guard<boost::mutex> lock(pointer_mutex_);
+ return audio_decoder_;
+ }
+
+ std::shared_ptr<video_decoder> get_video_decoder() const
+ {
+ boost::lock_guard<boost::mutex> lock(pointer_mutex_);
+ return video_decoder_;
+ }
+};
+
+class memory_source : public source
+{
+ int samplerate_ = -1;
+ int num_channels_ = -1;
+ int width_ = -1;
+ int height_ = -1;
+ boost::rational<int> framerate_ = -1;
+
+ tbb::atomic<bool> running_;
+ tbb::concurrent_bounded_queue<caspar::array<const int32_t>> audio_frames_;
+ tbb::concurrent_bounded_queue<caspar::array<const uint8_t>> video_frames_;
+ int64_t audio_pts_ = 0;
+ int64_t video_pts_ = 0;
+public:
+ memory_source()
+ {
+ running_ = false;
+ video_frames_.set_capacity(1);
+ audio_frames_.set_capacity(1);
+ }
+
+ ~memory_source()
+ {
+ stop();
+ }
+
+ void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
+ {
+ }
+
+ std::wstring print() const override
+ {
+ return L"[memory_source]";
+ }
+
+ void enable_audio(int samplerate, int num_channels)
+ {
+ samplerate_ = samplerate;
+ num_channels_ = num_channels;
+ }
+
+ void enable_video(int width, int height, boost::rational<int> framerate)
+ {
+ width_ = width;
+ height_ = height;
+ }
+
+ void start() override
+ {
+ running_ = true;
+ }
+
+ void stop() override
+ {
+ running_ = false;
+ video_frames_.try_push(caspar::array<const uint8_t>());
+ audio_frames_.try_push(caspar::array<const int32_t>());
+ }
+
+ bool has_audio() const override
+ {
+ return samplerate_ != -1;
+ }
+
+ int samplerate() const override
+ {
+ return samplerate_;
+ }
+
+ bool has_video() const override
+ {
+ return width_ != -1;
+ }
+
+ bool eof() const override
+ {
+ return !running_;
+ }
+
+ boost::rational<int> framerate() const override
+ {
+ return framerate_;
+ }
+
+ bool try_push_audio(caspar::array<const std::int32_t> data)
+ {
+ if (!has_audio())
+ CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" audio not enabled."));
+
+ if (data.empty() || data.size() % num_channels_ != 0)
+ CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(print() + L" audio with incorrect number of channels submitted."));
+
+ return audio_frames_.try_push(std::move(data));
+ }
+
+ bool try_push_video(caspar::array<const std::uint8_t> data)
+ {
+ if (!has_video())
+ CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" video not enabled."));
+
+ if (data.size() != width_ * height_ * 4)
+ CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(print() + L" video with incorrect size submitted."));
+
+ return video_frames_.try_push(std::move(data));
+ }
+
+ std::shared_ptr<AVFrame> get_input_frame(AVMediaType type) override
+ {
+ if (!running_)
+ CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not running."));
+
+ if (type == AVMediaType::AVMEDIA_TYPE_AUDIO && has_audio())
+ {
+ caspar::array<const std::int32_t> samples;
+ audio_frames_.pop(samples);
+
+ if (samples.empty())
+ return nullptr;
+
+ spl::shared_ptr<AVFrame> av_frame(av_frame_alloc(), [samples](AVFrame* p) { av_frame_free(&p); });
+
+ av_frame->channels = num_channels_;
+ av_frame->channel_layout = av_get_default_channel_layout(num_channels_);
+ av_frame->sample_rate = samplerate_;
+ av_frame->nb_samples = static_cast<int>(samples.size()) / num_channels_;
+ av_frame->format = AV_SAMPLE_FMT_S32;
+ av_frame->pts = audio_pts_;
+
+ audio_pts_ += av_frame->nb_samples;
+
+ FF(av_samples_fill_arrays(
+ av_frame->extended_data,
+ av_frame->linesize,
+ reinterpret_cast<const std::uint8_t*>(&*samples.begin()),
+ av_frame->channels,
+ av_frame->nb_samples,
+ static_cast<AVSampleFormat>(av_frame->format),
+ 16));
+
+ return av_frame;
+ }
+ else if (type == AVMediaType::AVMEDIA_TYPE_VIDEO && has_video())
+ {
+ caspar::array<const std::uint8_t> data;
+ video_frames_.pop(data);
+
+ if (data.empty())
+ return nullptr;
+
+ spl::shared_ptr<AVFrame> av_frame(av_frame_alloc(), [data](AVFrame* p) { av_frame_free(&p); });
+ avcodec_get_frame_defaults(av_frame.get());
+
+ const auto sample_aspect_ratio = boost::rational<int>(width_, height_);
+
+ av_frame->format = AV_PIX_FMT_BGRA;
+ av_frame->width = width_;
+ av_frame->height = height_;
+ av_frame->sample_aspect_ratio.num = sample_aspect_ratio.numerator();
+ av_frame->sample_aspect_ratio.den = sample_aspect_ratio.denominator();
+ av_frame->pts = video_pts_;
+
+ video_pts_ += 1;
+
+ FF(av_image_fill_arrays(
+ av_frame->data,
+ av_frame->linesize,
+ data.begin(),
+ static_cast<AVPixelFormat>(av_frame->format),
+ width_,
+ height_,
+ 1));
+
+ return av_frame;
+ }
+ else
+ CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(
+ print() + L" Unhandled media type " + boost::lexical_cast<std::wstring>(type)));
+ }
+};
+
+struct sink
+{
+ virtual ~sink() { }
+
+ virtual std::wstring print() const = 0;
+ virtual void graph(spl::shared_ptr<caspar::diagnostics::graph> g) { }
+ virtual void acodec(std::string codec) { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not an encoder.")); }
+ virtual void vcodec(std::string codec) { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not an encoder.")); }
+ virtual void format(std::string fmt) { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not an encoder.")); }
+ virtual void framerate(boost::rational<int> framerate) { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not an encoder.")); }
+ virtual void start(bool has_audio, bool has_video) { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+ virtual void stop() { }
+ virtual std::vector<AVSampleFormat> supported_sample_formats() const { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+ virtual std::vector<int> supported_samplerates() const { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+ virtual std::vector<AVPixelFormat> supported_pixel_formats() const { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+ virtual boost::optional<AVMediaType> try_push(AVMediaType type, spl::shared_ptr<AVFrame> frame) { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+ virtual void eof() { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
+};
+
+struct no_sink_selected : public sink
+{
+ std::wstring print() const override
+ {
+ return L"[no_sink_selected]";
+ }
+};
+
+class file_sink : public sink
+{
+ std::wstring filename_;
+ spl::shared_ptr<diagnostics::graph> graph_;
+public:
+ file_sink(std::string filename)
+ : filename_(u16(std::move(filename)))
+ {
+ }
+
+ std::wstring print() const override
+ {
+ return L"[file_sink " + filename_ + L"]";
+ }
+
+ void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
+ {
+ graph_ = std::move(g);
+ }
+};
+
+class memory_sink : public sink
+{
+ spl::shared_ptr<core::frame_factory> factory_;
+
+ bool has_audio_ = false;
+ bool has_video_ = false;
+ std::vector<int> audio_cadence_;
+ core::audio_channel_layout channel_layout_ = core::audio_channel_layout::invalid();
+ core::mutable_audio_buffer audio_samples_;
+
+ std::queue<std::shared_ptr<AVFrame>> video_frames_;
+
+ tbb::concurrent_bounded_queue<core::draw_frame> output_frames_;
+ tbb::atomic<bool> running_;
+public:
+ memory_sink(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format)
+ : factory_(std::move(factory))
+ , audio_cadence_(format.audio_cadence)
+ {
+ output_frames_.set_capacity(2);
+ running_ = false;
+ // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
+ // This cadence fills the audio mixer most optimally.
+ boost::range::rotate(audio_cadence_, std::end(audio_cadence_) - 1);
+ }
+
+ ~memory_sink()
+ {
+ stop();
+ }
+
+ std::wstring print() const override
+ {
+ return L"[memory_sink]";
+ }
+
+ void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
+ {
+ }
+
+ void framerate(boost::rational<int> framerate) override
+ {
+ audio_cadence_ = find_audio_cadence(framerate);
+ // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
+ // This cadence fills the audio mixer most optimally.
+ boost::range::rotate(audio_cadence_, std::end(audio_cadence_) - 1);
+ }
+
+ void start(bool has_audio, bool has_video) override
+ {
+ has_audio_ = has_audio;
+ has_video_ = has_video;
+ running_ = true;
+ }
+
+ void stop() override
+ {
+ running_ = false;
+ try_pop_frame();
+ try_pop_frame();
+ }
+
+ std::vector<AVSampleFormat> supported_sample_formats() const override
+ {
+ return { AVSampleFormat::AV_SAMPLE_FMT_S32 };
+ }
+
+ std::vector<int> supported_samplerates() const override {
+ return { 48000 };
+ }
+
+ std::vector<AVPixelFormat> supported_pixel_formats() const override
+ {
+ return {
+ AVPixelFormat::AV_PIX_FMT_YUVA420P,
+ AVPixelFormat::AV_PIX_FMT_YUV444P,
+ AVPixelFormat::AV_PIX_FMT_YUV422P,
+ AVPixelFormat::AV_PIX_FMT_YUV420P,
+ AVPixelFormat::AV_PIX_FMT_YUV411P,
+ AVPixelFormat::AV_PIX_FMT_BGRA,
+ AVPixelFormat::AV_PIX_FMT_ARGB,
+ AVPixelFormat::AV_PIX_FMT_RGBA,
+ AVPixelFormat::AV_PIX_FMT_ABGR,
+ AVPixelFormat::AV_PIX_FMT_GRAY8
+ };
+ }
+
+ boost::optional<AVMediaType> try_push(AVMediaType type, spl::shared_ptr<AVFrame> av_frame) override
+ {
+ if (!has_audio_ && !has_video_)
+ CASPAR_THROW_EXCEPTION(invalid_operation());
+
+ if (type == AVMediaType::AVMEDIA_TYPE_AUDIO && av_frame->data[0])
+ {
+ if (channel_layout_ == core::audio_channel_layout::invalid()) // First audio
+ {
+ channel_layout_ = get_audio_channel_layout(av_frame->channels, av_frame->channel_layout, L"");
+
+ // Insert silence samples so that the audio mixer is guaranteed to be filled.
+ auto min_num_samples_per_frame = *boost::min_element(audio_cadence_);
+ auto max_num_samples_per_frame = *boost::max_element(audio_cadence_);
+ auto cadence_safety_samples = max_num_samples_per_frame - min_num_samples_per_frame;
+ audio_samples_.resize(channel_layout_.num_channels * cadence_safety_samples, 0);
+ }
+
+ auto ptr = reinterpret_cast<int32_t*>(av_frame->data[0]);
+
+ audio_samples_.insert(audio_samples_.end(), ptr, ptr + av_frame->linesize[0] / sizeof(int32_t));
+ }
+ else if (type == AVMediaType::AVMEDIA_TYPE_VIDEO)
+ {
+ video_frames_.push(std::move(av_frame));
+ }
+
+ while (true)
+ {
+ bool enough_audio =
+ !has_audio_ ||
+ (channel_layout_ != core::audio_channel_layout::invalid() && audio_samples_.size() >= audio_cadence_.front() * channel_layout_.num_channels);
+ bool enough_video =
+ !has_video_ ||
+ !video_frames_.empty();
+
+ if (!enough_audio)
+ return AVMediaType::AVMEDIA_TYPE_AUDIO;
+
+ if (!enough_video)
+ return AVMediaType::AVMEDIA_TYPE_VIDEO;
+
+ core::mutable_audio_buffer audio_data;
+
+ if (has_audio_)
+ {
+ auto begin = audio_samples_.begin();
+ auto end = begin + audio_cadence_.front() * channel_layout_.num_channels;
+
+ audio_data.insert(audio_data.begin(), begin, end);
+ audio_samples_.erase(begin, end);
+ boost::range::rotate(audio_cadence_, std::begin(audio_cadence_) + 1);
+ }
+
+ if (!has_video_) // Audio only
+ {
+ core::mutable_frame audio_only_frame(
+ { },
+ std::move(audio_data),
+ this,
+ core::pixel_format_desc(core::pixel_format::invalid),
+ channel_layout_);
+
+ output_frames_.push(core::draw_frame(std::move(audio_only_frame)));
+
+ return AVMediaType::AVMEDIA_TYPE_AUDIO;
+ }
+
+ auto output_frame = make_frame(this, spl::make_shared_ptr(video_frames_.front()), *factory_, channel_layout_);
+ video_frames_.pop();
+ output_frame.audio_data() = std::move(audio_data);
+
+ output_frames_.push(core::draw_frame(std::move(output_frame)));
+ }
+ }
+
+ void eof() override
+ {
+ // Drain rest, regardless of it being enough or not.
+ while (!video_frames_.empty() || !audio_samples_.empty())
+ {
+ core::mutable_audio_buffer audio_data;
+
+ audio_data.swap(audio_samples_);
+
+ if (!video_frames_.empty())
+ {
+ auto output_frame = make_frame(this, spl::make_shared_ptr(video_frames_.front()), *factory_, channel_layout_);
+ video_frames_.pop();
+ output_frame.audio_data() = std::move(audio_data);
+
+ output_frames_.push(core::draw_frame(std::move(output_frame)));
+ }
+ else
+ {
+ core::mutable_frame audio_only_frame(
+ {},
+ std::move(audio_data),
+ this,
+ core::pixel_format_desc(core::pixel_format::invalid),
+ channel_layout_);
+
+ output_frames_.push(core::draw_frame(std::move(audio_only_frame)));
+ output_frames_.push(core::draw_frame::empty());
+ }
+ }
+ }
+
+ core::draw_frame try_pop_frame()
+ {
+ core::draw_frame frame = core::draw_frame::late();
+
+ if (!output_frames_.try_pop(frame) && !running_)
+ return core::draw_frame::empty();
+
+ return frame;
+ }
+};
+
+class ffmpeg_pipeline_backend_internal : public ffmpeg_pipeline_backend
+{
+ spl::shared_ptr<diagnostics::graph> graph_;
+
+ spl::unique_ptr<source> source_ = spl::make_unique<no_source_selected>();
+ std::function<bool (caspar::array<const std::int32_t> data)> try_push_audio_;
+ std::function<bool (caspar::array<const std::uint8_t> data)> try_push_video_;
+
+ int source_num_channels_ = 0;
+ AVSampleFormat source_sampleformat_ = AVSampleFormat::AV_SAMPLE_FMT_NONE;
+ int source_width_ = 0;
+ int source_height_ = 0;
+ AVPixelFormat source_pixelformat_ = AVPixelFormat::AV_PIX_FMT_NONE;
+ core::field_mode source_fieldmode_ = core::field_mode::progressive;
+
+ std::string afilter_;
+ std::unique_ptr<audio_filter> audio_filter_;
+ std::string vfilter_;
+ std::unique_ptr<filter> video_filter_;
+
+ spl::unique_ptr<sink> sink_ = spl::make_unique<no_sink_selected>();
+ std::function<core::draw_frame ()> try_pop_frame_;
+
+ tbb::atomic<bool> started_;
+ boost::thread thread_;
+public:
+ ffmpeg_pipeline_backend_internal()
+ {
+ started_ = false;
+ diagnostics::register_graph(graph_);
+ }
+
+ ~ffmpeg_pipeline_backend_internal()
+ {
+ stop();
+ }
+
+ void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
+ {
+ graph_ = std::move(g);
+ source_->graph(graph_);
+ sink_->graph(graph_);
+ }
+
+ // Source setup
+
+ void from_file(std::string filename) override
+ {
+ source_ = spl::make_unique<file_source>(std::move(filename));
+ try_push_audio_ = std::function<bool (caspar::array<const std::int32_t>)>();
+ try_push_video_ = std::function<bool (caspar::array<const std::uint8_t>)>();
+ source_->graph(graph_);
+ }
+
+ void from_memory_only_audio(int num_channels, int samplerate) override
+ {
+ auto source = spl::make_unique<memory_source>();
+ auto source_ptr = source.get();
+ try_push_audio_ = [this, source_ptr](caspar::array<const std::int32_t> data) { return source_ptr->try_push_audio(std::move(data)); };
+ source->enable_audio(samplerate, num_channels);
+
+ source_ = std::move(source);
+ source_->graph(graph_);
+ }
+
+ void from_memory_only_video(int width, int height, boost::rational<int> framerate) override
+ {
+ auto source = spl::make_unique<memory_source>();
+ auto source_ptr = source.get();
+ try_push_video_ = [this, source_ptr](caspar::array<const std::uint8_t> data) { return source_ptr->try_push_video(std::move(data)); };
+ source->enable_video(width, height, std::move(framerate));
+
+ source_ = std::move(source);
+ source_->graph(graph_);
+ }
+
+ void from_memory(int num_channels, int samplerate, int width, int height, boost::rational<int> framerate) override
+ {
+ auto source = spl::make_unique<memory_source>();
+ auto source_ptr = source.get();
+ try_push_audio_ = [this, source_ptr](caspar::array<const std::int32_t> data) { return source_ptr->try_push_audio(std::move(data)); };
+ try_push_video_ = [this, source_ptr](caspar::array<const std::uint8_t> data) { return source_ptr->try_push_video(std::move(data)); };
+ source->enable_audio(samplerate, num_channels);
+ source->enable_video(width, height, std::move(framerate));
+
+ source_ = std::move(source);
+ source_->graph(graph_);
+ }
+
+ void start_frame(std::uint32_t frame) override { source_->start_frame(frame); }
+ std::uint32_t start_frame() const override { return source_->start_frame(); }
+ void length(std::uint32_t frames) override { source_->length(frames); }
+ std::uint32_t length() const override { return source_->length(); }
+ void seek(std::uint32_t frame) override { source_->seek(frame); }
+ void loop(bool value) override { source_->loop(value); }
+ bool loop() const override { return source_->loop(); }
+ std::string source_filename() const override { return source_->filename(); }
+
+ // Filter setup
+
+ void vfilter(std::string filter) override
+ {
+ vfilter_ = std::move(filter);
+ }
+
+ void afilter(std::string filter) override
+ {
+ afilter_ = std::move(filter);
+ }
+
+ int width() const override
+ {
+ return source_width_;
+ }
+
+ int height() const override
+ {
+ return source_height_;
+ }
+
+ boost::rational<int> framerate() const override
+ {
+ bool double_rate = filter::is_double_rate(u16(vfilter_));
+
+ return double_rate ? source_->framerate() * 2 : source_->framerate();
+ }
+
+ bool progressive() const override
+ {
+ return true;//TODO
+ }
+
+ // Sink setup
+
+ void to_memory(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format) override
+ {
+ auto sink = spl::make_unique<memory_sink>(std::move(factory), std::move(format));
+ auto sink_ptr = sink.get();
+ try_pop_frame_ = [sink_ptr] { return sink_ptr->try_pop_frame(); };
+
+ sink_ = std::move(sink);
+ sink_->graph(graph_);
+ }
+
+ void to_file(std::string filename) override
+ {
+ sink_ = spl::make_unique<file_sink>(std::move(filename));
+ try_pop_frame_ = std::function<core::draw_frame ()>();
+ sink_->graph(graph_);
+ }
+
+ void acodec(std::string codec) override { sink_->acodec(std::move(codec)); }
+ void vcodec(std::string codec) override { sink_->vcodec(std::move(codec)); }
+ void format(std::string fmt) override { sink_->format(std::move(fmt)); }
+
+ // Runtime control
+
+ void start() override
+ {
+ source_->start();
+ sink_->start(source_->has_audio(), source_->has_video());
+ started_ = true;
+ bool quiet = is_logging_quiet_for_thread();
+
+ thread_ = boost::thread([=] { run(quiet); });
+ }
+
+ bool try_push_audio(caspar::array<const std::int32_t> data) override
+ {
+ if (try_push_audio_)
+ return try_push_audio_(std::move(data));
+ else
+ return false;
+ }
+
+ bool try_push_video(caspar::array<const std::uint8_t> data) override
+ {
+ if (try_push_video_)
+ return try_push_video_(std::move(data));
+ else
+ return false;
+ }
+
+ core::draw_frame try_pop_frame() override
+ {
+ if (!try_pop_frame_)
+ CASPAR_THROW_EXCEPTION(invalid_operation());
+
+ return try_pop_frame_();
+ }
+
+ std::uint32_t last_frame() const override
+ {
+ return source_->frame_number();
+ }
+
+ bool started() const override
+ {
+ return started_;
+ }
+
+ void stop() override
+ {
+ started_ = false;
+
+ sink_->stop();
+ source_->stop();
+
+ if (thread_.joinable())
+ thread_.join();
+ }
+
+private:
+ void run(bool quiet)
+ {
+ ensure_gpf_handler_installed_for_thread(u8(L"ffmpeg-pipeline: " + source_->print() + L" -> " + sink_->print()).c_str());
+ auto quiet_logging = temporary_enable_quiet_logging_for_thread(quiet);
+
+ try
+ {
+ boost::optional<AVMediaType> result = source_->has_audio() ? AVMediaType::AVMEDIA_TYPE_AUDIO : AVMediaType::AVMEDIA_TYPE_VIDEO;
+
+ while (started_ && (source_->has_audio() || source_->has_video()))
+ {
+ auto needed = *result;
+ auto input_frame = source_->get_input_frame(needed);
+
+ if (input_frame)
+ {
+ if (needed == AVMediaType::AVMEDIA_TYPE_AUDIO)
+ {
+ result = sink_->try_push(AVMediaType::AVMEDIA_TYPE_AUDIO, spl::make_shared_ptr(std::move(input_frame)));
+ }
+ else if (needed == AVMediaType::AVMEDIA_TYPE_VIDEO)
+ {
+ initialize_video_filter_if_needed(*input_frame);
+ video_filter_->push(std::move(input_frame));
+
+ for (auto filtered_frame : video_filter_->poll_all())
+ {
+ result = sink_->try_push(AVMediaType::AVMEDIA_TYPE_VIDEO, std::move(filtered_frame));
+ }
+ }
+ else
+ CASPAR_THROW_EXCEPTION(not_supported());
+ }
+ else if (source_->eof())
+ {
+ started_ = false;
+ sink_->eof();
+ break;
+ }
+ else
+ result = boost::none;
+
+ if (!result)
+ {
+ graph_->set_tag(caspar::diagnostics::tag_severity::WARNING, "dropped-frame");
+ result = needed; // Repeat same media type
+ }
+ }
+ }
+ catch (...)
+ {
+ if (is_logging_quiet_for_thread())
+ {
+ CASPAR_LOG_CURRENT_EXCEPTION_AT_LEVEL(debug);
+ }
+ else
+ {
+ CASPAR_LOG_CURRENT_EXCEPTION();
+ }
+ }
+
+ video_filter_.reset();
+ audio_filter_.reset();
+ source_->stop();
+ sink_->stop();
+ started_ = false;
+ }
+
+ template<typename T>
+ void set_if_changed(bool& changed, T& old_value, T new_value)
+ {
+ if (old_value != new_value)
+ {
+ changed = true;
+ old_value = new_value;
+ }
+ }
+
+ void initialize_audio_filter_if_needed(const AVFrame& av_frame)
+ {
+ bool changed = false;
+
+ set_if_changed(changed, source_sampleformat_, static_cast<AVSampleFormat>(av_frame.format));
+ set_if_changed(changed, source_num_channels_, av_frame.channels);
+
+ if (changed)
+ initialize_audio_filter();
+ }
+
+ void initialize_audio_filter()
+ {
+ audio_filter_.reset(new audio_filter(
+ boost::rational<int>(1, source_->samplerate()),
+ source_->samplerate(),
+ source_sampleformat_,
+ av_get_default_channel_layout(source_num_channels_),
+ sink_->supported_samplerates(),
+ sink_->supported_sample_formats(),
+ {},
+ afilter_));
+ }
+
+ void initialize_video_filter_if_needed(const AVFrame& av_frame)
+ {
+ bool changed = false;
+
+ set_if_changed(changed, source_width_, av_frame.width);
+ set_if_changed(changed, source_height_, av_frame.height);
+ set_if_changed(changed, source_pixelformat_, static_cast<AVPixelFormat>(av_frame.format));
+
+ core::field_mode field_mode = core::field_mode::progressive;
+
+ if (av_frame.interlaced_frame)
+ field_mode = av_frame.top_field_first ? core::field_mode::upper : core::field_mode::lower;
+
+ set_if_changed(changed, source_fieldmode_, field_mode);
+
+ if (changed)
+ initialize_video_filter();
+ }
+
+ void initialize_video_filter()
+ {
+ if (source_fieldmode_ != core::field_mode::progressive && !filter::is_deinterlacing(u16(vfilter_)))
+ vfilter_ = u8(append_filter(u16(vfilter_), L"YADIF=1:-1"));
+
+ if (source_height_ == 480) // NTSC DV
+ {
+ auto pad_str = L"PAD=" + boost::lexical_cast<std::wstring>(source_width_) + L":486:0:2:black";
+ vfilter_ = u8(append_filter(u16(vfilter_), pad_str));
+ }
+
+ video_filter_.reset(new filter(
+ source_width_,
+ source_height_,
+ 1 / source_->framerate(),
+ source_->framerate(),
+ boost::rational<int>(1, 1), // TODO
+ source_pixelformat_,
+ sink_->supported_pixel_formats(),
+ vfilter_));
+ sink_->framerate(framerate());
+ }
+};
+
+spl::shared_ptr<struct ffmpeg_pipeline_backend> create_internal_pipeline()
+{
+ return spl::make_shared<ffmpeg_pipeline_backend_internal>();
+}
+
+}}
--- /dev/null
+/*
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Helge Norberg, helge.norberg@svt.se
+*/
+
+#pragma once
+
+#include <common/memory.h>
+
+#include <boost/rational.hpp>
+
+#include <string>
+#include <functional>
+#include <cstdint>
+
+namespace caspar { namespace ffmpeg {
+
+spl::shared_ptr<struct ffmpeg_pipeline_backend> create_internal_pipeline();
+
+}}
#include "ffmpeg_producer.h"
-#include "../ffmpeg_error.h"
+#include "../ffmpeg_pipeline.h"
#include "../ffmpeg.h"
-
-#include "muxer/frame_muxer.h"
-#include "input/input.h"
#include "util/util.h"
-#include "audio/audio_decoder.h"
-#include "video/video_decoder.h"
-#include <common/env.h>
-#include <common/log.h>
#include <common/param.h>
#include <common/diagnostics/graph.h>
#include <common/future.h>
-#include <common/timer.h>
-#include <common/assert.h>
-#include <core/video_format.h>
-#include <core/producer/frame_producer.h>
-#include <core/frame/audio_channel_layout.h>
-#include <core/frame/frame_factory.h>
#include <core/frame/draw_frame.h>
-#include <core/frame/frame_transform.h>
-#include <core/monitor/monitor.h>
#include <core/help/help_repository.h>
#include <core/help/help_sink.h>
-#include <core/producer/media_info/media_info_repository.h>
#include <core/producer/media_info/media_info.h>
+#include <core/producer/framerate/framerate_producer.h>
-#include <boost/algorithm/string.hpp>
-#include <boost/filesystem.hpp>
-#include <boost/property_tree/ptree.hpp>
-#include <boost/regex.hpp>
-#include <boost/thread/future.hpp>
-
-#include <tbb/parallel_invoke.h>
-
-#include <limits>
-#include <memory>
-#include <queue>
+#include <future>
namespace caspar { namespace ffmpeg {
struct ffmpeg_producer : public core::frame_producer_base
{
spl::shared_ptr<core::monitor::subject> monitor_subject_;
+ ffmpeg_pipeline pipeline_;
const std::wstring filename_;
const std::wstring path_relative_to_media_ = get_relative_or_original(filename_, env::media_folder());
const spl::shared_ptr<diagnostics::graph> graph_;
- const spl::shared_ptr<core::frame_factory> frame_factory_;
const core::video_format_desc format_desc_;
- input input_;
-
- const double fps_ = read_fps(input_.context(), format_desc_.fps);
- const uint32_t start_;
- const bool thumbnail_mode_;
- const boost::optional<core::media_info> info_;
-
- std::unique_ptr<video_decoder> video_decoder_;
- std::unique_ptr<audio_decoder> audio_decoder_;
- std::unique_ptr<frame_muxer> muxer_;
core::constraints constraints_;
+ core::draw_frame first_frame_ = core::draw_frame::empty();
core::draw_frame last_frame_ = core::draw_frame::empty();
boost::optional<uint32_t> seek_target_;
public:
explicit ffmpeg_producer(
- const spl::shared_ptr<core::frame_factory>& frame_factory,
- const core::video_format_desc& format_desc,
- const std::wstring& channel_layout_spec,
- const std::wstring& filename,
- const std::wstring& filter,
- bool loop,
- uint32_t start,
- uint32_t length,
- bool thumbnail_mode,
- boost::optional<core::media_info> info)
- : filename_(filename)
- , frame_factory_(frame_factory)
+ ffmpeg_pipeline pipeline,
+ const core::video_format_desc& format_desc)
+ : pipeline_(std::move(pipeline))
+ , filename_(u16(pipeline_.source_filename()))
, format_desc_(format_desc)
- , input_(graph_, filename_, loop, start, length, thumbnail_mode)
- , start_(start)
- , thumbnail_mode_(thumbnail_mode)
- , info_(info)
{
graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));
graph_->set_color("underflow", diagnostics::color(0.6f, 0.3f, 0.9f));
diagnostics::register_graph(graph_);
-
- try
- {
- video_decoder_.reset(new video_decoder(input_, thumbnail_mode));
- video_decoder_->monitor_output().attach_parent(monitor_subject_);
- constraints_.width.set(video_decoder_->width());
- constraints_.height.set(video_decoder_->height());
-
- if (is_logging_quiet_for_thread())
- CASPAR_LOG(debug) << print() << L" " << video_decoder_->print();
- else
- CASPAR_LOG(info) << print() << L" " << video_decoder_->print();
- }
- catch(averror_stream_not_found&)
- {
- CASPAR_LOG(debug) << print() << " No video-stream found. Running without video.";
- }
- catch(...)
- {
- CASPAR_LOG_CURRENT_EXCEPTION();
- CASPAR_LOG(warning) << print() << "Failed to open video-stream. Running without video.";
- }
+ pipeline_.graph(graph_);
+ pipeline_.start();
- auto channel_layout = core::audio_channel_layout::invalid();
+ while ((first_frame_ = pipeline_.try_pop_frame()) == core::draw_frame::late())
+ boost::this_thread::sleep_for(boost::chrono::milliseconds(1));
- if (!thumbnail_mode)
- {
- try
- {
- audio_decoder_.reset(new audio_decoder(input_, format_desc_, channel_layout_spec));
- audio_decoder_->monitor_output().attach_parent(monitor_subject_);
-
- channel_layout = audio_decoder_->channel_layout();
-
- CASPAR_LOG(info) << print() << L" " << audio_decoder_->print();
- }
- catch (averror_stream_not_found&)
- {
- CASPAR_LOG(debug) << print() << " No audio-stream found. Running without audio.";
- }
- catch (...)
- {
- CASPAR_LOG_CURRENT_EXCEPTION();
- CASPAR_LOG(warning) << print() << " Failed to open audio-stream. Running without audio.";
- }
- }
-
- if (start_ > file_nb_frames())
- CASPAR_THROW_EXCEPTION(seek_out_of_range() << msg_info("SEEK out of range"));
-
- muxer_.reset(new frame_muxer(fps_, frame_factory, format_desc_, channel_layout, filter));
-
- decode_next_frame();
+ constraints_.width.set(pipeline_.width());
+ constraints_.height.set(pipeline_.height());
if (is_logging_quiet_for_thread())
CASPAR_LOG(debug) << print() << L" Initialized";
core::draw_frame receive_impl() override
{
- auto frame = core::draw_frame::late();
+ auto frame = core::draw_frame::late();
caspar::timer frame_timer;
- end_seek();
-
- decode_next_frame();
-
- if(!muxer_->empty())
- {
- last_frame_ = frame = std::move(muxer_->front());
- muxer_->pop();
- }
- else if (!input_.eof())
+ auto decoded_frame = first_frame_;
+
+ if (decoded_frame == core::draw_frame::empty())
+ decoded_frame = pipeline_.try_pop_frame();
+ else
+ first_frame_ = core::draw_frame::empty();
+
+ if (decoded_frame == core::draw_frame::empty())
+ frame = core::draw_frame::still(last_frame_);
+ else if (decoded_frame != core::draw_frame::late())
+ last_frame_ = frame = core::draw_frame(std::move(decoded_frame));
+ else if (pipeline_.started())
graph_->set_tag(diagnostics::tag_severity::WARNING, "underflow");
+ graph_->set_text(print());
+
graph_->set_value("frame-time", frame_timer.elapsed()*format_desc_.fps*0.5);
*monitor_subject_
<< core::monitor::message("/profiler/time") % frame_timer.elapsed() % (1.0/format_desc_.fps);
*monitor_subject_
- << core::monitor::message("/file/frame") % static_cast<int32_t>(file_frame_number())
- % static_cast<int32_t>(file_nb_frames())
- << core::monitor::message("/file/fps") % fps_
+ << core::monitor::message("/file/frame") % static_cast<int32_t>(pipeline_.last_frame())
+ % static_cast<int32_t>(pipeline_.length())
+ << core::monitor::message("/file/fps") % boost::rational_cast<double>(pipeline_.framerate())
<< core::monitor::message("/file/path") % path_relative_to_media_
- << core::monitor::message("/loop") % input_.loop();
-
+ << core::monitor::message("/loop") % pipeline_.loop();
+
return frame;
}
core::draw_frame last_frame() override
{
- end_seek();
return core::draw_frame::still(last_frame_);
}
uint32_t nb_frames() const override
{
- if(input_.loop())
+ if (pipeline_.loop())
return std::numeric_limits<uint32_t>::max();
- uint32_t nb_frames = file_nb_frames();
-
- nb_frames = std::min(input_.length(), nb_frames);
- nb_frames = muxer_->calc_nb_frames(nb_frames);
-
- return nb_frames > start_ ? nb_frames - start_ : 0;
- }
-
- uint32_t file_nb_frames() const
- {
- uint32_t file_nb_frames = 0;
-
- if (info_)
- file_nb_frames = static_cast<uint32_t>(info_->duration);
-
- file_nb_frames = std::max(file_nb_frames, video_decoder_ ? video_decoder_->nb_frames() : 0);
- file_nb_frames = std::max(file_nb_frames, audio_decoder_ ? audio_decoder_->nb_frames() : 0);
- return file_nb_frames;
- }
-
- uint32_t file_frame_number() const
- {
- return video_decoder_ ? video_decoder_->file_frame_number() : 0;
+ return pipeline_.length();
}
std::future<std::wstring> call(const std::vector<std::wstring>& params) override
{
auto value = what["VALUE"].str();
if(!value.empty())
- input_.loop(boost::lexical_cast<bool>(value));
- result = boost::lexical_cast<std::wstring>(loop());
+ pipeline_.loop(boost::lexical_cast<bool>(value));
+ result = boost::lexical_cast<std::wstring>(pipeline_.loop());
}
else if(boost::regex_match(param, what, seek_exp))
{
auto value = what["VALUE"].str();
- seek(boost::lexical_cast<uint32_t>(value));
+ pipeline_.seek(boost::lexical_cast<uint32_t>(value));
}
else if(boost::regex_match(param, what, length_exp))
{
auto value = what["VALUE"].str();
if(!value.empty())
- length(boost::lexical_cast<uint32_t>(value));
- result = boost::lexical_cast<std::wstring>(length());
+ pipeline_.length(boost::lexical_cast<uint32_t>(value));
+ result = boost::lexical_cast<std::wstring>(pipeline_.length());
}
else if(boost::regex_match(param, what, start_exp))
{
auto value = what["VALUE"].str();
if(!value.empty())
- start(boost::lexical_cast<uint32_t>(value));
- result = boost::lexical_cast<std::wstring>(start());
+ pipeline_.start_frame(boost::lexical_cast<uint32_t>(value));
+ result = boost::lexical_cast<std::wstring>(pipeline_.start_frame());
}
else
CASPAR_THROW_EXCEPTION(invalid_argument());
{
return L"ffmpeg[" + boost::filesystem::path(filename_).filename().wstring() + L"|"
+ print_mode() + L"|"
- + boost::lexical_cast<std::wstring>(file_frame_number()) + L"/" + boost::lexical_cast<std::wstring>(file_nb_frames()) + L"]";
+ + boost::lexical_cast<std::wstring>(pipeline_.last_frame()) + L"/" + boost::lexical_cast<std::wstring>(pipeline_.length()) + L"]";
}
std::wstring name() const override
boost::property_tree::wptree info;
info.add(L"type", L"ffmpeg");
info.add(L"filename", filename_);
- info.add(L"width", video_decoder_ ? video_decoder_->width() : 0);
- info.add(L"height", video_decoder_ ? video_decoder_->height() : 0);
- info.add(L"progressive", video_decoder_ ? video_decoder_->is_progressive() : 0);
- info.add(L"fps", fps_);
- info.add(L"loop", input_.loop());
+ info.add(L"width", pipeline_.width());
+ info.add(L"height", pipeline_.height());
+ info.add(L"progressive", pipeline_.progressive());
+ info.add(L"fps", boost::rational_cast<double>(pipeline_.framerate()));
+ info.add(L"loop", pipeline_.loop());
info.add(L"frame-number", frame_number());
- auto nb_frames2 = nb_frames();
- info.add(L"nb-frames", nb_frames2 == std::numeric_limits<int64_t>::max() ? -1 : nb_frames2);
- info.add(L"file-frame-number", file_frame_number());
- info.add(L"file-nb-frames", file_nb_frames());
+ info.add(L"nb-frames", nb_frames());
+ info.add(L"file-frame-number", pipeline_.last_frame());
+ info.add(L"file-nb-frames", pipeline_.length());
return info;
}
}
// ffmpeg_producer
-
- void end_seek()
- {
- for(int n = 0; n < 8 && (last_frame_ == core::draw_frame::empty() || (seek_target_ && file_frame_number() != *seek_target_+2)); ++n)
- {
- decode_next_frame();
- if(!muxer_->empty())
- {
- last_frame_ = muxer_->front();
- seek_target_.reset();
- }
- }
- }
-
- void loop(bool value)
- {
- input_.loop(value);
- }
-
- bool loop() const
- {
- return input_.loop();
- }
-
- void length(uint32_t value)
- {
- input_.length(value);
- }
-
- uint32_t length()
- {
- return input_.length();
- }
-
- void start(uint32_t value)
- {
- input_.start(value);
- }
-
- uint32_t start()
- {
- return input_.start();
- }
-
- void seek(uint32_t target)
- {
- if (target > file_nb_frames())
- CASPAR_THROW_EXCEPTION(seek_out_of_range() << msg_info("SEEK out of range"));
-
- seek_target_ = target;
-
- input_.seek(*seek_target_);
- muxer_->clear();
- }
std::wstring print_mode() const
{
- return ffmpeg::print_mode(video_decoder_ ? video_decoder_->width() : 0,
- video_decoder_ ? video_decoder_->height() : 0,
- fps_,
- video_decoder_ ? !video_decoder_->is_progressive() : false);
- }
-
- void decode_next_frame()
- {
- for(int n = 0; n < 32 && muxer_->empty(); ++n)
- {
- std::shared_ptr<AVFrame> video;
- std::shared_ptr<AVFrame> audio;
- bool needs_video = !muxer_->video_ready();
- bool needs_audio = !muxer_->audio_ready();
-
- tbb::parallel_invoke(
- [&]
- {
- if (needs_video)
- video = video_decoder_ ? (*video_decoder_)() : create_frame();
- },
- [&]
- {
- if (needs_audio)
- audio = audio_decoder_ ? (*audio_decoder_)() : create_frame();
- });
-
- muxer_->push_video(video);
- muxer_->push_audio(audio);
- }
-
- graph_->set_text(print());
+ return ffmpeg::print_mode(
+ pipeline_.width(),
+ pipeline_.height(),
+ boost::rational_cast<double>(pipeline_.framerate()),
+ !pipeline_.progressive());
}
};
sink.example(L">> PLAY 1-10 folder/clip FILTER yadif=1,-1", L"to deinterlace the video.");
sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT film", L"given the defaults in casparcg.config this will specifies that the clip has 6 audio channels of the type 5.1 and that they are in the order FL FC FR BL BR LFE regardless of what ffmpeg says.");
sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT \"5.1:LFE FL FC FR BL BR\"", L"specifies that the clip has 6 audio channels of the type 5.1 and that they are in the specified order regardless of what ffmpeg says.");
- sink.para()->text(L"The FFmpeg producer also supports changing some of the settings via CALL:");
+ sink.para()->text(L"The FFmpeg producer also supports changing some of the settings via ")->code(L"CALL")->text(L":");
sink.example(L">> CALL 1-10 LOOP 1");
sink.example(L">> CALL 1-10 START 10");
sink.example(L">> CALL 1-10 LENGTH 50");
+ core::describe_framerate_producer(sink);
}
spl::shared_ptr<core::frame_producer> create_producer(
if(filename.empty())
return core::frame_producer::empty();
- bool loop = contains_param(L"LOOP", params);
- auto start = get_param(L"START", params, get_param(L"SEEK", params, static_cast<uint32_t>(0)));
- auto length = get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max());
- auto filter_str = get_param(L"FILTER", params, L"");
- auto channel_layout = get_param(L"CHANNEL_LAYOUT", params, L"");
- bool thumbnail_mode = false;
- auto info = info_repo->get(filename);
-
- return create_destroy_proxy(spl::make_shared_ptr(std::make_shared<ffmpeg_producer>(
- dependencies.frame_factory,
- dependencies.format_desc,
- channel_layout,
- filename,
- filter_str,
- loop,
- start,
- length,
- thumbnail_mode,
- info)));
+ auto pipeline = ffmpeg_pipeline()
+ .from_file(u8(filename))
+ .loop(contains_param(L"LOOP", params))
+ .start_frame(get_param(L"START", params, get_param(L"SEEK", params, static_cast<uint32_t>(0))))
+ .length(get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max()))
+ .vfilter(u8(get_param(L"FILTER", params, L"")))
+ .to_memory(dependencies.frame_factory, dependencies.format_desc);
+
+ auto producer = create_destroy_proxy(spl::make_shared_ptr(std::make_shared<ffmpeg_producer>(
+ pipeline,
+ dependencies.format_desc)));
+
+ if (pipeline.framerate() == -1) // Audio only.
+ return producer;
+
+ auto source_framerate = pipeline.framerate();
+ auto target_framerate = boost::rational<int>(
+ dependencies.format_desc.time_scale,
+ dependencies.format_desc.duration);
+
+ return core::create_framerate_producer(
+ producer,
+ source_framerate,
+ target_framerate,
+ dependencies.format_desc.field_mode,
+ dependencies.format_desc.audio_cadence);
}
core::draw_frame create_thumbnail_frame(
- const core::frame_producer_dependencies& dependencies,
- const std::wstring& media_file,
- const spl::shared_ptr<core::media_info_repository>& info_repo)
+ const core::frame_producer_dependencies& dependencies,
+ const std::wstring& media_file,
+ const spl::shared_ptr<core::media_info_repository>& info_repo)
{
auto quiet_logging = temporary_enable_quiet_logging_for_thread(true);
auto filename = probe_stem(env::media_folder() + L"/" + media_file, true);
auto render_specific_frame = [&](std::int64_t frame_num)
{
- spl::shared_ptr<core::frame_producer> producer = spl::make_shared<ffmpeg_producer>(
- dependencies.frame_factory,
- dependencies.format_desc,
- L"",
- filename,
- L"",
- false,
- static_cast<uint32_t>(frame_num),
- std::numeric_limits<uint32_t>::max(),
- true,
- info_repo->get(filename));
- return producer->receive();
+ auto pipeline = ffmpeg_pipeline()
+ .from_file(u8(filename))
+ .start_frame(static_cast<uint32_t>(frame_num))
+ .to_memory(dependencies.frame_factory, dependencies.format_desc);
+ pipeline.start();
+
+ auto frame = core::draw_frame::empty();
+ while ((frame = pipeline.try_pop_frame()) == core::draw_frame::late())
+ boost::this_thread::sleep_for(boost::chrono::milliseconds(1));
+ return frame;
};
auto info = info_repo->get(filename);
if (i == 0)
desired_frame = 0; // first
else if (i == num_snapshots - 1)
- desired_frame = total_frames - 30; // last
+ desired_frame = total_frames - 2; // last
else
// evenly distributed across the file.
desired_frame = total_frames * i / (num_snapshots - 1);
tbb::atomic<uint32_t> length_;
tbb::atomic<bool> loop_;
tbb::atomic<bool> eof_;
- bool thumbnail_mode_;
double fps_ = read_fps(*format_context_, 0.0);
uint32_t frame_number_ = 0;
bool thumbnail_mode)
: graph_(graph)
, filename_(filename)
- , thumbnail_mode_(thumbnail_mode)
{
start_ = start;
length_ = length;
graph_->set_color("seek", diagnostics::color(1.0f, 0.5f, 0.0f));
- if (!thumbnail_mode_)
+ if (!thumbnail_mode)
for (unsigned i = 0; i < format_context_->nb_streams; ++i)
if (format_context_->streams[i]->codec->codec_type == AVMediaType::AVMEDIA_TYPE_AUDIO)
audio_streams_.emplace_back(i);
for(int n = 0; n < 8; ++n)
tick();
- if (!thumbnail_mode)
- thread_ = boost::thread([this]{run();});
+ thread_ = boost::thread([this, thumbnail_mode]{run(thumbnail_mode);});
}
~impl()
{
is_running_ = false;
cond_.notify_one();
-
- if (!thumbnail_mode_)
- thread_.join();
+ thread_.join();
}
bool try_pop_video(std::shared_ptr<AVPacket>& packet)
if (!video_stream_.is_available())
return false;
- if (thumbnail_mode_)
- {
- int ticks = 0;
- while (!video_stream_.try_pop(packet))
- {
- tick();
- if (++ticks > 32) // Infinite loop should not be possible
- return false;
-
- // Play nice
- boost::this_thread::sleep_for(boost::chrono::milliseconds(5));
- }
-
- return true;
- }
-
bool result = video_stream_.try_pop(packet);
if(result)
return true;
}
- void run()
+ void run(bool thumbnail_mode)
{
ensure_gpf_handler_installed_for_thread(u8(print()).c_str());
+ auto quiet_logging = temporary_enable_quiet_logging_for_thread(thumbnail_mode);
while(is_running_)
{