#include "../../frame/frame_transform.h"
#include "../../frame/pixel_format.h"
#include "../../monitor/monitor.h"
+#include "../../help/help_sink.h"
#include <common/future.h>
+#include <common/tweener.h>
#include <functional>
#include <queue>
#include <future>
+#include <stack>
namespace caspar { namespace core {
auto under = source;
auto over = destination;
- double float_distance = static_cast<double>(distance.numerator()) / static_cast<double>(distance.denominator());
+ double float_distance = boost::rational_cast<double>(distance);
under.transform().image_transform.is_mix = true;
under.transform().image_transform.opacity = 1 - float_distance;
middle.transform().image_transform.is_mix = true;
next_frame.transform().image_transform.is_mix = true;
- double float_distance = static_cast<double>(distance.numerator()) / static_cast<double>(distance.denominator());
+ double float_distance = boost::rational_cast<double>(distance);
previous_frame.transform().image_transform.opacity = std::max(0.0, 0.5 - float_distance * 0.5);
middle.transform().image_transform.opacity = 0.5;
next_frame.transform().image_transform.opacity = 1.0 - previous_frame.transform().image_transform.opacity - middle.transform().image_transform.opacity;
}
};
-struct audio_extractor : public frame_visitor
+class audio_extractor : public frame_visitor
{
- std::function<void(const const_frame& frame)> on_frame_;
-
+ std::stack<core::audio_transform> transform_stack_;
+ std::function<void(const const_frame& frame)> on_frame_;
+public:
audio_extractor(std::function<void(const const_frame& frame)> on_frame)
: on_frame_(std::move(on_frame))
{
+ transform_stack_.push(audio_transform());
+ }
+
+ void push(const frame_transform& transform) override
+ {
+ transform_stack_.push(transform_stack_.top() * transform.audio_transform);
+ }
+
+ void pop() override
+ {
+ transform_stack_.pop();
}
- void push(const frame_transform& transform) override { }
- void pop() override { }
void visit(const const_frame& frame) override
{
- if (!frame.audio_data().empty())
+ if (!frame.audio_data().empty() && !transform_stack_.top().is_still)
on_frame_(frame);
}
};
+// Like tweened_transform but for framerates
+class speed_tweener
+{
+ boost::rational<int64_t> source_ = 1LL;
+ boost::rational<int64_t> dest_ = 1LL;
+ int duration_ = 0;
+ int time_ = 0;
+ tweener tweener_;
+public:
+ speed_tweener() = default;
+ speed_tweener(
+ const boost::rational<int64_t>& source,
+ const boost::rational<int64_t>& dest,
+ int duration,
+ const tweener& tween)
+ : source_(source)
+ , dest_(dest)
+ , duration_(duration)
+ , time_(0)
+ , tweener_(tween)
+ {
+ }
+
+ const boost::rational<int64_t>& dest() const
+ {
+ return dest_;
+ }
+
+ boost::rational<int64_t> fetch() const
+ {
+ if (time_ == duration_)
+ return dest_;
+
+ double source = boost::rational_cast<double>(source_);
+ double delta = boost::rational_cast<double>(dest_) - source;
+ double result = tweener_(time_, source, delta, duration_);
+
+ return boost::rational<int64_t>(static_cast<int64_t>(result * 1000000.0), 1000000);
+ }
+
+ boost::rational<int64_t> fetch_and_tick()
+ {
+ time_ = std::min(time_ + 1, duration_);
+ return fetch();
+ }
+};
+
class framerate_producer : public frame_producer_base
{
spl::shared_ptr<frame_producer> source_;
field_mode destination_fieldmode_;
std::vector<int> destination_audio_cadence_;
boost::rational<std::int64_t> speed_;
+ speed_tweener user_speed_;
std::function<draw_frame (
const draw_frame& source,
const draw_frame& destination,
// for all other framerates a frame interpolator will be chosen.
if (speed_ != 1 && speed_ * 2 != 1 && speed_ != 2)
{
- if (source_framerate_ > 47) // The bluriness of blend_all is acceptable on high framerates.
+ auto high_source_framerate = source_framerate_ > 47;
+ auto high_destination_framerate = destination_framerate_ > 47
+ || destination_fieldmode_ != field_mode::progressive;
+
+ if (high_source_framerate && high_destination_framerate) // The bluriness of blend_all is acceptable on high framerates.
interpolator_ = blend_all();
- else // blend_all is mostly too blurry on low framerates. blend provides a compromise.
+ else // blend_all is mostly too blurry on low framerates. blend provides a compromise.
interpolator_ = &blend;
CASPAR_LOG(warning) << source_->print() << L" Frame blending frame rate conversion required to conform to channel frame rate.";
std::future<std::wstring> call(const std::vector<std::wstring>& params) override
{
- if (!boost::iequals(params.at(0), L"framerate") || params.size() != 3)
+ if (!boost::iequals(params.at(0), L"framerate"))
return source_->call(params);
- if (boost::iequals(params.at(1), L"interpolation"))
+ if (boost::iequals(params.at(1), L"speed"))
+ {
+ auto destination_user_speed = boost::rational<std::int64_t>(
+ static_cast<std::int64_t>(boost::lexical_cast<double>(params.at(2)) * 1000000.0),
+ 1000000);
+ auto frames = params.size() > 3 ? boost::lexical_cast<int>(params.at(3)) : 0;
+ auto easing = params.size() > 4 ? params.at(4) : L"linear";
+
+ user_speed_ = speed_tweener(user_speed_.fetch(), destination_user_speed, frames, tweener(easing));
+ }
+ else if (boost::iequals(params.at(1), L"interpolation"))
{
if (boost::iequals(params.at(2), L"blend"))
interpolator_ = &blend;
boost::property_tree::wptree info() const override
{
- return source_->info();
+ auto info = source_->info();
+
+ auto incorrect_frame_number = info.get_child_optional(L"frame-number");
+ if (incorrect_frame_number)
+ incorrect_frame_number->put_value(frame_number());
+
+ auto incorrect_nb_frames = info.get_child_optional(L"nb-frames");
+ if (incorrect_nb_frames)
+ incorrect_nb_frames->put_value(nb_frames());
+
+ return info;
+ }
+
+ uint32_t nb_frames() const override
+ {
+ auto source_nb_frames = source_->nb_frames();
+ auto multiple = boost::rational_cast<double>(1 / get_speed() * (output_repeat_ != 0 ? 2 : 1));
+
+ return static_cast<uint32_t>(source_nb_frames * multiple);
+ }
+
+ uint32_t frame_number() const override
+ {
+ auto source_frame_number = source_->frame_number() - 1; // next frame already received
+ auto multiple = boost::rational_cast<double>(1 / get_speed() * (output_repeat_ != 0 ? 2 : 1));
+
+ return static_cast<uint32_t>(source_frame_number * multiple);
}
constraints& pixel_constraints() override
private:
draw_frame do_render_progressive_frame(bool sound)
{
- if (output_repeat_ && ++output_frame_ % output_repeat_)
+ user_speed_.fetch_and_tick();
+
+ if (output_repeat_ && output_frame_++ % output_repeat_)
{
auto frame = draw_frame::still(last_frame());
for (std::int64_t i = 0; i < num_frames; ++i)
{
- previous_frame_ = std::move(next_frame_);
+ if (next_frame_ == draw_frame::empty())
+ previous_frame_ = pop_frame_from_source();
+ else
+ {
+ previous_frame_ = std::move(next_frame_);
- next_frame_ = pop_frame_from_source();
+ next_frame_ = pop_frame_from_source();
+ }
}
}
boost::rational<std::int64_t> get_speed() const
{
- return speed_;
+ return speed_ * user_speed_.fetch();
}
draw_frame pop_frame_from_source()
{
auto frame = source_->receive();
- audio_extractor extractor([this](const const_frame& frame)
+ if (user_speed_.fetch() == 1)
{
- if (source_channel_layout_ != frame.audio_channel_layout())
+ audio_extractor extractor([this](const const_frame& frame)
{
- source_channel_layout_ = frame.audio_channel_layout();
-
- // Insert silence samples so that the audio mixer is guaranteed to be filled.
- auto min_num_samples_per_frame = *boost::min_element(destination_audio_cadence_);
- auto max_num_samples_per_frame = *boost::max_element(destination_audio_cadence_);
- auto cadence_safety_samples = max_num_samples_per_frame - min_num_samples_per_frame;
- audio_samples_.resize(source_channel_layout_.num_channels * cadence_safety_samples, 0);
- }
-
- auto& buffer = frame.audio_data();
- audio_samples_.insert(audio_samples_.end(), buffer.begin(), buffer.end());
- });
+ if (source_channel_layout_ != frame.audio_channel_layout())
+ {
+ source_channel_layout_ = frame.audio_channel_layout();
+
+ // Insert silence samples so that the audio mixer is guaranteed to be filled.
+ auto min_num_samples_per_frame = *boost::min_element(destination_audio_cadence_);
+ auto max_num_samples_per_frame = *boost::max_element(destination_audio_cadence_);
+ auto cadence_safety_samples = max_num_samples_per_frame - min_num_samples_per_frame;
+ audio_samples_.resize(source_channel_layout_.num_channels * cadence_safety_samples, 0);
+ }
+
+ auto& buffer = frame.audio_data();
+ audio_samples_.insert(audio_samples_.end(), buffer.begin(), buffer.end());
+ });
+
+ frame.accept(extractor);
+ }
+ else
+ {
+ source_channel_layout_ = audio_channel_layout::invalid();
+ audio_samples_.clear();
+ }
- frame.accept(extractor);
frame.transform().audio_transform.volume = 0.0;
return frame;
draw_frame attach_sound(draw_frame frame)
{
- if (source_channel_layout_ == audio_channel_layout::invalid())
+ if (user_speed_.fetch() != 1 || source_channel_layout_ == audio_channel_layout::invalid())
return frame;
mutable_audio_buffer buffer;
{
auto needed = destination_audio_cadence_.front();
auto got = audio_samples_.size() / source_channel_layout_.num_channels;
- CASPAR_LOG(debug) << print() << L" Too few audio samples. Needed " << needed << L" but got " << got;
+ if (got != 0) // If at end of stream we don't care
+ CASPAR_LOG(debug) << print() << L" Too few audio samples. Needed " << needed << L" but got " << got;
buffer.swap(audio_samples_);
buffer.resize(needed * source_channel_layout_.num_channels, 0);
}
bool enough_sound() const
{
return source_channel_layout_ == core::audio_channel_layout::invalid()
+ || user_speed_.fetch() != 1
|| audio_samples_.size() / source_channel_layout_.num_channels >= destination_audio_cadence_.at(0);
}
};
+void describe_framerate_producer(help_sink& sink)
+{
+ sink.para()->text(L"Framerate conversion control / Slow motion examples:");
+ sink.example(L">> CALL 1-10 FRAMERATE INTERPOLATION BLEND", L"enables 2 frame blend interpolation.");
+ sink.example(L">> CALL 1-10 FRAMERATE INTERPOLATION BLEND_ALL", L"enables 3 frame blend interpolation.");
+ sink.example(L">> CALL 1-10 FRAMERATE INTERPOLATION DROP_AND_SKIP", L"disables frame interpolation.");
+ sink.example(L">> CALL 1-10 FRAMERATE SPEED 0.25", L"immediately changes the speed to 25%. Sound will be disabled.");
+ sink.example(L">> CALL 1-10 FRAMERATE SPEED 0.25 50", L"changes the speed to 25% linearly over 50 frames. Sound will be disabled.");
+ sink.example(L">> CALL 1-10 FRAMERATE SPEED 0.25 50 easeinoutsine", L"changes the speed to 25% over 50 frames using specified easing curve. Sound will be disabled.");
+ sink.example(L">> CALL 1-10 FRAMERATE SPEED 1 50", L"changes the speed to 100% linearly over 50 frames. Sound will be enabled when the destination speed of 100% has been reached.");
+}
+
spl::shared_ptr<frame_producer> create_framerate_producer(
spl::shared_ptr<frame_producer> source,
boost::rational<int> source_framerate,