return boost::join(cadence | boost::adaptors::transformed([](size_t i) { return boost::lexical_cast<std::wstring>(i); }), L", ");
}
-ffmpeg::audio_input_pad create_input_pad(const core::video_format_desc& in_format, int num_channels)
-{
- return ffmpeg::audio_input_pad(
- boost::rational<int>(1, in_format.audio_sample_rate),
- in_format.audio_sample_rate,
- AVSampleFormat::AV_SAMPLE_FMT_S32,
- av_get_default_channel_layout(num_channels));
-}
-
class decklink_producer : boost::noncopyable, public IDeckLinkInputCallback
{
const int device_index_;
core::audio_channel_layout channel_layout_;
ffmpeg::frame_muxer muxer_ {
in_format_desc_.framerate,
- { create_input_pad(in_format_desc_, channel_layout_.num_channels) },
+ { ffmpeg::create_input_pad(in_format_desc_, channel_layout_.num_channels) },
frame_factory_,
out_format_desc_,
channel_layout_,
return frames;
}
+ffmpeg::audio_input_pad create_input_pad(const core::video_format_desc& in_format, int num_channels)
+{
+ return ffmpeg::audio_input_pad(
+ boost::rational<int>(1, in_format.audio_sample_rate),
+ in_format.audio_sample_rate,
+ AVSampleFormat::AV_SAMPLE_FMT_S32,
+ av_get_default_channel_layout(num_channels));
+}
+
}}
#include <common/memory.h>
+#include <core/fwd.h>
+
#include <boost/rational.hpp>
#include <boost/noncopyable.hpp>
#include <boost/range/iterator_range.hpp>
spl::shared_ptr<implementation> impl_;
};
+audio_input_pad create_input_pad(const core::video_format_desc& in_format, int num_channels);
+
}}
const std::wstring filter_str_;
std::unique_ptr<audio_filter> audio_filter_;
const bool multithreaded_filter_;
- bool force_deinterlacing_ = env::properties().get(L"configuration.force-deinterlace", false);
+ bool force_deinterlacing_;
mutable boost::mutex out_framerate_mutex_;
boost::rational<int> out_framerate_;
const core::video_format_desc& format_desc,
const core::audio_channel_layout& channel_layout,
const std::wstring& filter_str,
- bool multithreaded_filter)
+ bool multithreaded_filter,
+ bool force_deinterlacing)
: in_framerate_(in_framerate)
, format_desc_(format_desc)
, audio_channel_layout_(channel_layout)
, frame_factory_(frame_factory)
, filter_str_(filter_str)
, multithreaded_filter_(multithreaded_filter)
+ , force_deinterlacing_(force_deinterlacing)
{
video_streams_.push(std::queue<core::mutable_frame>());
audio_streams_.push(core::mutable_audio_buffer());
if (display_mode_ == display_mode::deinterlace_bob)
filter_str = append_filter(filter_str, L"YADIF=1:-1");
+ else
+ {
+ if (mode == core::field_mode::lower && format_desc_.field_mode == core::field_mode::upper)
+ {
+ filter_str = append_filter(filter_str, L"CROP=h=" + boost::lexical_cast<std::wstring>(frame->height - 1) + L":y=0");
+ filter_str = append_filter(filter_str, L"PAD=0:" + boost::lexical_cast<std::wstring>(frame->height) + L":0:1:black");
+ filter_str = append_filter(filter_str, L"SETFIELD=tff");
+ }
+ else if (mode == core::field_mode::upper && format_desc_.field_mode == core::field_mode::lower)
+ {
+ filter_str = append_filter(filter_str, L"PAD=0:0:0:-1:black");
+ filter_str = append_filter(filter_str, L"SETFIELD=bff");
+ }
+ }
auto out_framerate = in_framerate_;
const core::video_format_desc& format_desc,
const core::audio_channel_layout& channel_layout,
const std::wstring& filter,
- bool multithreaded_filter)
- : impl_(new impl(std::move(in_framerate), std::move(audio_input_pads), frame_factory, format_desc, channel_layout, filter, multithreaded_filter)){}
+ bool multithreaded_filter,
+ bool force_deinterlacing)
+ : impl_(new impl(std::move(in_framerate), std::move(audio_input_pads), frame_factory, format_desc, channel_layout, filter, multithreaded_filter, force_deinterlacing)){}
void frame_muxer::push(const std::shared_ptr<AVFrame>& video){impl_->push(video);}
void frame_muxer::push(const std::vector<std::shared_ptr<core::mutable_audio_buffer>>& audio_samples_per_stream){impl_->push(audio_samples_per_stream);}
core::draw_frame frame_muxer::poll(){return impl_->poll();}
#include <common/forward.h>
#include <common/memory.h>
+#include <common/env.h>
#include <core/frame/frame.h>
#include <core/mixer/audio/audio_mixer.h>
const core::video_format_desc& format_desc,
const core::audio_channel_layout& channel_layout,
const std::wstring& filter,
- bool multithreaded_filter);
+ bool multithreaded_filter,
+ bool force_deinterlacing = env::properties().get(L"configuration.force-deinterlace", false));
void push(const std::shared_ptr<AVFrame>& video_frame);
void push(const std::vector<std::shared_ptr<core::mutable_audio_buffer>>& audio_samples_per_stream);
include_directories(${RXCPP_INCLUDE_PATH})
include_directories(${TBB_INCLUDE_PATH})
include_directories(${ASMLIB_INCLUDE_PATH})
+include_directories(${FFMPEG_INCLUDE_PATH})
set_target_properties(reroute PROPERTIES FOLDER modules)
source_group(sources\\producer producer/*)
source_group(sources ./*)
-target_link_libraries(reroute common core)
+target_link_libraries(reroute common core ffmpeg)
casparcg_add_include_statement("modules/reroute/reroute.h")
casparcg_add_init_statement("reroute::init" "reroute")
#include <tbb/concurrent_queue.h>
+#if defined(_MSC_VER)
+#pragma warning (push)
+#pragma warning (disable : 4244)
+#endif
+extern "C"
+{
+#define __STDC_CONSTANT_MACROS
+#define __STDC_LIMIT_MACROS
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+}
+#if defined(_MSC_VER)
+#pragma warning (pop)
+#endif
+
+#include <modules/ffmpeg/producer/muxer/frame_muxer.h>
+#include <modules/ffmpeg/producer/util/util.h>
+
#include <asmlib.h>
#include <queue>
}
};
+core::video_format_desc get_progressive_format(core::video_format_desc format_desc)
+{
+ if (format_desc.field_count == 1)
+ return format_desc;
+
+ format_desc.framerate *= 2;
+ format_desc.fps *= 2.0;
+ format_desc.audio_cadence = core::find_audio_cadence(format_desc.framerate);
+ format_desc.time_scale *= 2;
+ format_desc.field_count = 1;
+
+ return format_desc;
+}
+
class channel_producer : public core::frame_producer_base
{
core::monitor::subject monitor_subject_;
const core::video_format_desc output_format_desc_;
const spl::shared_ptr<channel_consumer> consumer_;
core::constraints pixel_constraints_;
+ ffmpeg::frame_muxer muxer_;
std::queue<core::draw_frame> frame_buffer_;
: frame_factory_(dependecies.frame_factory)
, output_format_desc_(dependecies.format_desc)
, consumer_(spl::make_shared<channel_consumer>(frames_delay))
+ , muxer_(
+ channel->video_format_desc().framerate,
+ { ffmpeg::create_input_pad(channel->video_format_desc(), channel->audio_channel_layout().num_channels) },
+ dependecies.frame_factory,
+ get_progressive_format(channel->video_format_desc()),
+ channel->audio_channel_layout(),
+ L"",
+ false,
+ true)
{
pixel_constraints_.width.set(output_format_desc_.width);
pixel_constraints_.height.set(output_format_desc_.height);
core::draw_frame receive_impl() override
{
- auto format_desc = consumer_->get_video_format_desc();
-
- if(frame_buffer_.size() > 0)
+ if (!muxer_.video_ready() || !muxer_.audio_ready())
{
- auto frame = frame_buffer_.front();
- frame_buffer_.pop();
- return frame;
+ auto read_frame = consumer_->receive();
+
+ if (read_frame == core::const_frame::empty() || read_frame.image_data().empty())
+ return core::draw_frame::late();
+
+ auto video_frame = ffmpeg::create_frame();
+
+ video_frame->data[0] = const_cast<uint8_t*>(read_frame.image_data().begin());
+ video_frame->linesize[0] = static_cast<int>(read_frame.width()) * 4;
+ video_frame->format = AVPixelFormat::AV_PIX_FMT_BGRA;
+ video_frame->width = static_cast<int>(read_frame.width());
+ video_frame->height = static_cast<int>(read_frame.height());
+ video_frame->interlaced_frame = consumer_->get_video_format_desc().field_mode != core::field_mode::progressive;
+ video_frame->top_field_first = consumer_->get_video_format_desc().field_mode == core::field_mode::upper ? 1 : 0;
+ video_frame->key_frame = 1;
+
+ muxer_.push(video_frame);
+ muxer_.push(
+ {
+ std::make_shared<core::mutable_audio_buffer>(
+ read_frame.audio_data().begin(),
+ read_frame.audio_data().end())
+ });
}
- auto read_frame = consumer_->receive();
- if(read_frame == core::const_frame::empty() || read_frame.image_data().empty())
- return core::draw_frame::late();
-
- core::pixel_format_desc desc;
- desc.format = core::pixel_format::bgra;
- desc.planes.push_back(core::pixel_format_desc::plane(format_desc.width, format_desc.height, 4));
- auto frame = frame_factory_->create_frame(this, desc, consumer_->get_audio_channel_layout());
-
- frame.audio_data().reserve(read_frame.audio_data().size());
- boost::copy(read_frame.audio_data(), std::back_inserter(frame.audio_data()));
+ auto frame = muxer_.poll();
- fast_memcpy(frame.image_data().begin(), read_frame.image_data().begin(), read_frame.image_data().size());
-
- frame_buffer_.push(core::draw_frame(std::move(frame)));
+ if (frame == core::draw_frame::empty())
+ return core::draw_frame::late();
- return receive_impl();
+ return frame;
}
std::wstring name() const override
{
return monitor_subject_;
}
+
+ boost::rational<int> current_framerate() const
+ {
+ return muxer_.out_framerate();
+ }
};
spl::shared_ptr<core::frame_producer> create_channel_producer(
const spl::shared_ptr<core::video_channel>& channel,
int frames_delay)
{
+ auto producer = spl::make_shared<channel_producer>(dependencies, channel, frames_delay);
+
return core::create_framerate_producer(
- spl::make_shared<channel_producer>(dependencies, channel, frames_delay),
- [channel] { return channel->video_format_desc().framerate; } ,
+ producer,
+ [producer] { return producer->current_framerate(); },
dependencies.format_desc.framerate,
dependencies.format_desc.field_mode,
dependencies.format_desc.audio_cadence);