#include "../ffmpeg_error.h"
#include "../producer/util/util.h"
+#include "../producer/filter/filter.h"
+#include "../producer/filter/audio_filter.h"
#include <common/except.h>
#include <common/executor.h>
namespace caspar { namespace ffmpeg { namespace {
-class ffmpeg_consumer
+void set_pixel_format(AVFilterContext* sink, AVPixelFormat pix_fmt)
{
-public:
- // Static Members
+#pragma warning (push)
+#pragma warning (disable : 4245)
+
+ FF(av_opt_set_int_list(
+ sink,
+ "pix_fmts",
+ std::vector<AVPixelFormat>({ pix_fmt, AVPixelFormat::AV_PIX_FMT_NONE }).data(),
+ -1,
+ AV_OPT_SEARCH_CHILDREN));
+
+#pragma warning (pop)
+}
+
+void adjust_video_filter(const AVCodec& codec, const core::video_format_desc& in_format, AVFilterContext* sink, std::string& filter)
+{
+ switch (codec.id)
+ {
+ case AV_CODEC_ID_DVVIDEO:
+ // Crop
+ if (in_format.format == core::video_format::ntsc)
+ filter = u8(append_filter(u16(filter), L"crop=720:480:0:2"));
+
+ // Pixel format selection
+ if (in_format.format == core::video_format::ntsc)
+ set_pixel_format(sink, AVPixelFormat::AV_PIX_FMT_YUV411P);
+ else if (in_format.format == core::video_format::pal)
+ set_pixel_format(sink, AVPixelFormat::AV_PIX_FMT_YUV420P);
+ else
+ set_pixel_format(sink, AVPixelFormat::AV_PIX_FMT_YUV422P);
+
+ // Scale
+ if (in_format.height == 1080)
+ filter = u8(append_filter(u16(filter), in_format.duration == 1001
+ ? L"scale=1280:1080"
+ : L"scale=1440:1080"));
+ else if (in_format.height == 720)
+ filter = u8(append_filter(u16(filter), L"scale=960:720"));
+
+ break;
+ }
+}
+
+void setup_codec_defaults(AVCodecContext& encoder)
+{
+ static const int MEGABIT = 1000000;
+
+ switch (encoder.codec_id)
+ {
+ case AV_CODEC_ID_DNXHD:
+ encoder.bit_rate = 220 * MEGABIT;
+
+ break;
+ case AV_CODEC_ID_PRORES:
+ encoder.bit_rate = encoder.width < 1280
+ ? 63 * MEGABIT
+ : 220 * MEGABIT;
+
+ break;
+ case AV_CODEC_ID_H264:
+ av_opt_set(encoder.priv_data, "preset", "ultrafast", 0);
+ av_opt_set(encoder.priv_data, "tune", "fastdecode", 0);
+ av_opt_set(encoder.priv_data, "crf", "5", 0);
+
+ break;
+ }
+}
+
+bool is_pcm_s24le_not_supported(const AVFormatContext& container)
+{
+ auto name = std::string(container.oformat->name);
+
+ if (name == "mp4" || name == "dv")
+ return true;
+
+ return false;
+}
+
+template<typename Out, typename In>
+std::vector<Out> from_terminated_array(const In* array, In terminator)
+{
+ std::vector<Out> result;
+
+ while (array != nullptr && *array != terminator)
+ {
+ In val = *array;
+ Out casted = static_cast<Out>(val);
+
+ result.push_back(casted);
+ ++array;
+ }
+
+ return result;
+}
+
+class ffmpeg_consumer
+{
private:
const spl::shared_ptr<diagnostics::graph> graph_;
core::monitor::subject subject_;
- boost::filesystem::path path_;
+ std::string path_;
+ boost::filesystem::path full_path_;
std::map<std::string, std::string> options_;
+ bool mono_streams_;
core::video_format_desc in_video_format_;
core::audio_channel_layout in_channel_layout_ = core::audio_channel_layout::invalid();
tbb::atomic<bool> abort_request_;
std::shared_ptr<AVStream> video_st_;
- std::shared_ptr<AVStream> audio_st_;
+ std::vector<std::shared_ptr<AVStream>> audio_sts_;
- std::int64_t video_pts_;
- std::int64_t audio_pts_;
+ std::int64_t video_pts_ = 0;
+ std::int64_t audio_pts_ = 0;
- AVFilterContext* audio_graph_in_;
- AVFilterContext* audio_graph_out_;
- std::shared_ptr<AVFilterGraph> audio_graph_;
+ std::unique_ptr<audio_filter> audio_filter_;
+ // TODO: make use of already existent avfilter abstraction for video also
AVFilterContext* video_graph_in_;
AVFilterContext* video_graph_out_;
std::shared_ptr<AVFilterGraph> video_graph_;
ffmpeg_consumer(
std::string path,
- std::string options)
+ std::string options,
+ bool mono_streams)
: path_(path)
- , video_pts_(0)
- , audio_pts_(0)
+ , full_path_(path)
+ , mono_streams_(mono_streams)
, audio_encoder_executor_(print() + L" audio_encoder")
, video_encoder_executor_(print() + L" video_encoder")
, write_executor_(print() + L" io")
audio_encoder_executor_.join();
video_graph_.reset();
- audio_graph_.reset();
+ audio_filter_.reset();
video_st_.reset();
- audio_st_.reset();
+ audio_sts_.clear();
write_packet(nullptr, nullptr);
static boost::regex prot_exp("^.+:.*" );
if(!boost::regex_match(
- path_.string(),
+ path_,
prot_exp))
{
- if(!path_.is_complete())
+ if(!full_path_.is_complete())
{
- path_ =
+ full_path_ =
u8(
env::media_folder()) +
- path_.string();
+ path_;
}
- if(boost::filesystem::exists(path_))
- boost::filesystem::remove(path_);
+ if(boost::filesystem::exists(full_path_))
+ boost::filesystem::remove(full_path_);
+
+ boost::filesystem::create_directories(full_path_.parent_path());
}
graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));
&oc,
nullptr,
oformat_name && !oformat_name->empty() ? oformat_name->c_str() : nullptr,
- path_.string().c_str()));
+ full_path_.string().c_str()));
oc_.reset(
oc,
const auto audio_codec =
audio_codec_name
? avcodec_find_encoder_by_name(audio_codec_name->c_str())
- : avcodec_find_encoder(oc_->oformat->audio_codec);
+ : (is_pcm_s24le_not_supported(*oc_)
+ ? avcodec_find_encoder(oc_->oformat->audio_codec)
+ : avcodec_find_encoder_by_name("pcm_s24le"));
if (!video_codec)
CASPAR_THROW_EXCEPTION(user_error() << msg_info(
video_st_ = open_encoder(
*video_codec,
- video_options);
+ video_options,
+ 0);
- audio_st_ = open_encoder(
- *audio_codec,
- audio_options);
+ for (int i = 0; i < audio_filter_->get_num_output_pads(); ++i)
+ audio_sts_.push_back(open_encoder(
+ *audio_codec,
+ audio_options,
+ i));
auto it = options_.begin();
while(it != options_.end())
{
FF(avio_open2(
&oc_->pb,
- path_.string().c_str(),
+ full_path_.string().c_str(),
AVIO_FLAG_WRITE,
&oc_->interrupt_callback,
&av_opts));
catch(...)
{
video_st_.reset();
- audio_st_.reset();
+ audio_sts_.clear();
oc_.reset();
throw;
}
std::wstring print() const
{
- return L"ffmpeg_consumer[" + u16(path_.string()) + L"]";
+ return L"ffmpeg_consumer[" + u16(path_) + L"]";
}
int64_t presentation_frame_age_millis() const
std::shared_ptr<AVStream> open_encoder(
const AVCodec& codec,
std::map<std::string,
- std::string>& options)
+ std::string>& options,
+ int stream_number_for_media_type)
{
auto st =
avformat_new_stream(
&codec);
if (!st)
- CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Could not allocate video-stream.") << boost::errinfo_api_function("av_new_stream"));
+ CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Could not allocate video-stream.") << boost::errinfo_api_function("avformat_new_stream"));
auto enc = st->codec;
}
case AVMEDIA_TYPE_AUDIO:
{
- enc->time_base = audio_graph_out_->inputs[0]->time_base;
- enc->sample_fmt = static_cast<AVSampleFormat>(audio_graph_out_->inputs[0]->format);
- enc->sample_rate = audio_graph_out_->inputs[0]->sample_rate;
- enc->channel_layout = audio_graph_out_->inputs[0]->channel_layout;
- enc->channels = audio_graph_out_->inputs[0]->channels;
+ enc->time_base = audio_filter_->get_output_pad_info(stream_number_for_media_type).time_base;
+ enc->sample_fmt = static_cast<AVSampleFormat>(audio_filter_->get_output_pad_info(stream_number_for_media_type).format);
+ enc->sample_rate = audio_filter_->get_output_pad_info(stream_number_for_media_type).sample_rate;
+ enc->channel_layout = audio_filter_->get_output_pad_info(stream_number_for_media_type).channel_layout;
+ enc->channels = audio_filter_->get_output_pad_info(stream_number_for_media_type).channels;
break;
}
}
+ setup_codec_defaults(*enc);
+
if(oc_->oformat->flags & AVFMT_GLOBALHEADER)
enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
if(enc->codec_type == AVMEDIA_TYPE_AUDIO && !(codec.capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
{
CASPAR_ASSERT(enc->frame_size > 0);
- av_buffersink_set_frame_size(audio_graph_out_,
- enc->frame_size);
+ audio_filter_->set_guaranteed_output_num_samples_per_frame(
+ stream_number_for_media_type,
+ enc->frame_size);
}
return std::shared_ptr<AVStream>(st, [this](AVStream* st)
void configure_video_filters(
const AVCodec& codec,
- const std::string& filtergraph)
+ std::string filtergraph)
{
video_graph_.reset(
avfilter_graph_alloc(),
const auto vsrc_options = (boost::format("video_size=%1%x%2%:pix_fmt=%3%:time_base=%4%/%5%:pixel_aspect=%6%/%7%:frame_rate=%8%/%9%")
% in_video_format_.width % in_video_format_.height
- % AV_PIX_FMT_BGRA
+ % AVPixelFormat::AV_PIX_FMT_BGRA
% in_video_format_.duration % in_video_format_.time_scale
% sample_aspect_ratio.numerator() % sample_aspect_ratio.denominator()
% in_video_format_.time_scale % in_video_format_.duration).str();
#pragma warning (pop)
+ adjust_video_filter(codec, in_video_format_, filt_vsink, filtergraph);
+
configure_filtergraph(
*video_graph_,
filtergraph,
void configure_audio_filters(
const AVCodec& codec,
- const std::string& filtergraph)
+ std::string filtergraph)
{
- audio_graph_.reset(
- avfilter_graph_alloc(),
- [](AVFilterGraph* p)
- {
- avfilter_graph_free(&p);
- });
-
- audio_graph_->nb_threads = boost::thread::hardware_concurrency()/2;
- audio_graph_->thread_type = AVFILTER_THREAD_SLICE;
-
- const auto asrc_options = (boost::format("sample_rate=%1%:sample_fmt=%2%:channels=%3%:time_base=%4%/%5%:channel_layout=%6%")
- % in_video_format_.audio_sample_rate
- % av_get_sample_fmt_name(AV_SAMPLE_FMT_S32)
- % in_channel_layout_.num_channels
- % 1 % in_video_format_.audio_sample_rate
- % boost::io::group(
- std::hex,
- std::showbase,
- av_get_default_channel_layout(in_channel_layout_.num_channels))).str();
-
- AVFilterContext* filt_asrc = nullptr;
- FF(avfilter_graph_create_filter(
- &filt_asrc,
- avfilter_get_by_name("abuffer"),
- "ffmpeg_consumer_abuffer",
- asrc_options.c_str(),
- nullptr,
- audio_graph_.get()));
-
- AVFilterContext* filt_asink = nullptr;
- FF(avfilter_graph_create_filter(
- &filt_asink,
- avfilter_get_by_name("abuffersink"),
- "ffmpeg_consumer_abuffersink",
- nullptr,
- nullptr,
- audio_graph_.get()));
-
-#pragma warning (push)
-#pragma warning (disable : 4245)
+ int num_output_pads = 1;
- FF(av_opt_set_int(
- filt_asink,
- "all_channel_counts",
- 1,
- AV_OPT_SEARCH_CHILDREN));
-
- FF(av_opt_set_int_list(
- filt_asink,
- "sample_fmts",
- codec.sample_fmts,
- -1,
- AV_OPT_SEARCH_CHILDREN));
+ if (mono_streams_)
+ {
+ num_output_pads = in_channel_layout_.num_channels;
+ }
- FF(av_opt_set_int_list(
- filt_asink,
- "channel_layouts",
- codec.channel_layouts,
- -1,
- AV_OPT_SEARCH_CHILDREN));
+ if (num_output_pads > 1)
+ {
+ std::string splitfilter = "[a:0]channelsplit=channel_layout=";
- FF(av_opt_set_int_list(
- filt_asink,
- "sample_rates" ,
- codec.supported_samplerates,
- -1,
- AV_OPT_SEARCH_CHILDREN));
+ splitfilter += (boost::format("0x%|1$x|") % create_channel_layout_bitmask(in_channel_layout_.num_channels)).str();
-#pragma warning (pop)
+ for (int i = 0; i < num_output_pads; ++i)
+ splitfilter += "[aout:" + boost::lexical_cast<std::string>(i) + "]";
- configure_filtergraph(
- *audio_graph_,
- filtergraph,
- *filt_asrc,
- *filt_asink);
-
- audio_graph_in_ = filt_asrc;
- audio_graph_out_ = filt_asink;
+ filtergraph = u8(append_filter(u16(filtergraph), u16(splitfilter)));
+ }
- CASPAR_LOG(info)
- << u16(std::string("\n")
- + avfilter_graph_dump(
- audio_graph_.get(),
- nullptr));
+ std::vector<audio_output_pad> output_pads(
+ num_output_pads,
+ audio_output_pad(
+ from_terminated_array<int>( codec.supported_samplerates, 0),
+ from_terminated_array<AVSampleFormat>( codec.sample_fmts, AVSampleFormat::AV_SAMPLE_FMT_NONE),
+ from_terminated_array<uint64_t>( codec.channel_layouts, 0ull)));
+
+ audio_filter_.reset(new audio_filter(
+ { audio_input_pad(
+ boost::rational<int>(1, in_video_format_.audio_sample_rate),
+ in_video_format_.audio_sample_rate,
+ AVSampleFormat::AV_SAMPLE_FMT_S32,
+ create_channel_layout_bitmask(in_channel_layout_.num_channels)) },
+ output_pads,
+ filtergraph));
}
void configure_filtergraph(
AVFilterInOut* outputs = nullptr;
AVFilterInOut* inputs = nullptr;
- try
+ if(!filtergraph.empty())
{
- if(!filtergraph.empty())
- {
- outputs = avfilter_inout_alloc();
- inputs = avfilter_inout_alloc();
+ outputs = avfilter_inout_alloc();
+ inputs = avfilter_inout_alloc();
+ try
+ {
CASPAR_VERIFY(outputs && inputs);
- outputs->name = av_strdup("in");
- outputs->filter_ctx = &source_ctx;
- outputs->pad_idx = 0;
- outputs->next = nullptr;
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = &source_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = nullptr;
- inputs->name = av_strdup("out");
- inputs->filter_ctx = &sink_ctx;
- inputs->pad_idx = 0;
- inputs->next = nullptr;
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = &sink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = nullptr;
+ }
+ catch (...)
+ {
+ avfilter_inout_free(&outputs);
+ avfilter_inout_free(&inputs);
+ throw;
+ }
- FF(avfilter_graph_parse(
+ FF(avfilter_graph_parse(
&graph,
filtergraph.c_str(),
inputs,
outputs,
nullptr));
- }
- else
- {
- FF(avfilter_link(
+ }
+ else
+ {
+ FF(avfilter_link(
&source_ctx,
0,
&sink_ctx,
0));
- }
+ }
- FF(avfilter_graph_config(
+ FF(avfilter_graph_config(
&graph,
nullptr));
- }
- catch(...)
- {
- avfilter_inout_free(&outputs);
- avfilter_inout_free(&inputs);
- throw;
- }
}
void encode_video(core::const_frame frame_ptr, std::shared_ptr<void> token)
in_video_format_.width,
in_video_format_.height);
- src_av_frame->format = AV_PIX_FMT_BGRA;
- src_av_frame->width = in_video_format_.width;
- src_av_frame->height = in_video_format_.height;
- src_av_frame->sample_aspect_ratio.num = sample_aspect_ratio.numerator();
- src_av_frame->sample_aspect_ratio.den = sample_aspect_ratio.denominator();
- src_av_frame->pts = video_pts_;
+ src_av_frame->format = AVPixelFormat::AV_PIX_FMT_BGRA;
+ src_av_frame->width = in_video_format_.width;
+ src_av_frame->height = in_video_format_.height;
+ src_av_frame->sample_aspect_ratio.num = sample_aspect_ratio.numerator();
+ src_av_frame->sample_aspect_ratio.den = sample_aspect_ratio.denominator();
+ src_av_frame->pts = video_pts_;
video_pts_ += 1;
+ subject_
+ << core::monitor::message("/frame") % video_pts_
+ << core::monitor::message("/path") % path_
+ << core::monitor::message("/fps") % in_video_format_.fps;
+
FF(av_image_fill_arrays(
src_av_frame->data,
src_av_frame->linesize,
void encode_audio(core::const_frame frame_ptr, std::shared_ptr<void> token)
{
- if(!audio_st_)
+ if(audio_sts_.empty())
return;
- auto enc = audio_st_->codec;
-
if(frame_ptr != core::const_frame::empty())
{
auto src_av_frame = create_frame();
- src_av_frame->channels = in_channel_layout_.num_channels;
- src_av_frame->channel_layout = av_get_default_channel_layout(in_channel_layout_.num_channels);
- src_av_frame->sample_rate = in_video_format_.audio_sample_rate;
- src_av_frame->nb_samples = static_cast<int>(frame_ptr.audio_data().size()) / src_av_frame->channels;
- src_av_frame->format = AV_SAMPLE_FMT_S32;
- src_av_frame->pts = audio_pts_;
+ src_av_frame->channels = in_channel_layout_.num_channels;
+ src_av_frame->channel_layout = create_channel_layout_bitmask(in_channel_layout_.num_channels);
+ src_av_frame->sample_rate = in_video_format_.audio_sample_rate;
+ src_av_frame->nb_samples = static_cast<int>(frame_ptr.audio_data().size()) / src_av_frame->channels;
+ src_av_frame->format = AV_SAMPLE_FMT_S32;
+ src_av_frame->pts = audio_pts_;
audio_pts_ += src_av_frame->nb_samples;
static_cast<AVSampleFormat>(src_av_frame->format),
16));
- FF(av_buffersrc_add_frame(
- audio_graph_in_,
- src_av_frame.get()));
+ audio_filter_->push(0, src_av_frame);
}
- int ret = 0;
-
- while(ret >= 0)
+ for (int pad_id = 0; pad_id < audio_filter_->get_num_output_pads(); ++pad_id)
{
- auto filt_frame = create_frame();
+ for (auto filt_frame : audio_filter_->poll_all(pad_id))
+ {
+ audio_encoder_executor_.begin_invoke([=]
+ {
+ encode_av_frame(
+ *audio_sts_.at(pad_id),
+ avcodec_encode_audio2,
+ filt_frame,
+ token);
- ret = av_buffersink_get_frame(
- audio_graph_out_,
- filt_frame.get());
+ boost::this_thread::yield(); // TODO:
+ });
+ }
+ }
+ bool eof = frame_ptr == core::const_frame::empty();
+
+ if (eof)
+ {
audio_encoder_executor_.begin_invoke([=]
{
- if(ret == AVERROR_EOF)
+ for (int pad_id = 0; pad_id < audio_filter_->get_num_output_pads(); ++pad_id)
{
- if(enc->codec->capabilities & CODEC_CAP_DELAY)
+ auto enc = audio_sts_.at(pad_id)->codec;
+
+ if (enc->codec->capabilities & CODEC_CAP_DELAY)
{
- while(encode_av_frame(
- *audio_st_,
+ while (encode_av_frame(
+ *audio_sts_.at(pad_id),
avcodec_encode_audio2,
nullptr,
token))
}
}
}
- else if(ret != AVERROR(EAGAIN))
- {
- FF_RET(
- ret,
- "av_buffersink_get_frame");
-
- encode_av_frame(
- *audio_st_,
- avcodec_encode_audio2,
- filt_frame,
- token);
-
- boost::this_thread::yield(); // TODO:
- }
});
}
}
const std::string path_;
const std::string options_;
const bool separate_key_;
+ const bool mono_streams_;
const bool compatibility_mode_;
int consumer_index_offset_;
public:
- ffmpeg_consumer_proxy(const std::string& path, const std::string& options, bool separate_key, bool compatibility_mode)
+ ffmpeg_consumer_proxy(const std::string& path, const std::string& options, bool separate_key, bool mono_streams, bool compatibility_mode)
: path_(path)
, options_(options)
, separate_key_(separate_key)
+ , mono_streams_(mono_streams)
, compatibility_mode_(compatibility_mode)
, consumer_index_offset_(crc16(path))
{
if (consumer_)
CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info("Cannot reinitialize ffmpeg-consumer."));
- consumer_.reset(new ffmpeg_consumer(path_, options_));
+ consumer_.reset(new ffmpeg_consumer(path_, options_, mono_streams_));
consumer_->initialize(format_desc, channel_layout);
if (separate_key_)
auto without_extension = u16(fill_file.parent_path().string() + "/" + fill_file.stem().string());
auto key_file = without_extension + L"_A" + u16(fill_file.extension().string());
- key_only_consumer_.reset(new ffmpeg_consumer(u8(key_file), options_));
+ key_only_consumer_.reset(new ffmpeg_consumer(u8(key_file), options_, mono_streams_));
key_only_consumer_->initialize(format_desc, channel_layout);
}
}
boost::property_tree::wptree info() const override
{
boost::property_tree::wptree info;
- info.add(L"type", L"ffmpeg");
- info.add(L"path", u16(path_));
- info.add(L"separate_key", separate_key_);
+
+ info.add(L"type", L"ffmpeg");
+ info.add(L"path", u16(path_));
+ info.add(L"separate_key", separate_key_);
+ info.add(L"mono_streams", mono_streams_);
+
return info;
}
return compatibility_mode_ ? 200 : 100000 + consumer_index_offset_;
}
- core::monitor::subject& monitor_output()
+ core::monitor::subject& monitor_output() override
{
return consumer_->monitor_output();
}
void describe_streaming_consumer(core::help_sink& sink, const core::help_repository& repo)
{
sink.short_description(L"For streaming/recording the contents of a channel using FFmpeg.");
- sink.syntax(L"FILE,STREAM [filename:string],[url:string] {-[ffmpeg_param1:string] [value1:string] {-[ffmpeg_param2:string] [value2:string] {...}}}");
+ sink.syntax(L"FILE,STREAM [filename:string],[url:string] {-[ffmpeg_param1:string] [value1:string] {-[ffmpeg_param2:string] [value2:string] {...}}} {[separate_key:SEPARATE_KEY]} {[mono_streams:MONO_STREAMS]}");
sink.para()->text(L"For recording or streaming the contents of a channel using FFmpeg");
sink.definitions()
- ->item(L"filename", L"The filename under the media folder including the extension (decides which kind of container format that will be used).")
- ->item(L"url", L"If the filename is given in the form of an URL a network stream will be created instead of a file on disk.")
- ->item(L"ffmpeg_paramX", L"A parameter supported by FFmpeg. For example vcodec or acodec etc.");
+ ->item(L"filename", L"The filename under the media folder including the extension (decides which kind of container format that will be used).")
+ ->item(L"url", L"If the filename is given in the form of an URL a network stream will be created instead of a file on disk.")
+ ->item(L"ffmpeg_paramX", L"A parameter supported by FFmpeg. For example vcodec or acodec etc.")
+ ->item(L"separate_key", L"If defined will create two files simultaneously -- One for fill and one for key (_A will be appended).")
+ ->item(L"mono_streams", L"If defined every audio channel will be written to its own audio stream.");
sink.para()->text(L"Examples:");
sink.example(L">> ADD 1 FILE output.mov -vcodec dnxhd");
sink.example(L">> ADD 1 FILE output.mov -vcodec prores");
sink.example(L">> ADD 1 FILE output.mov -vcodec dvvideo");
sink.example(L">> ADD 1 FILE output.mov -vcodec libx264 -preset ultrafast -tune fastdecode -crf 25");
sink.example(L">> ADD 1 FILE output.mov -vcodec dnxhd SEPARATE_KEY", L"for creating output.mov with fill and output_A.mov with key/alpha");
+ sink.example(L">> ADD 1 FILE output.mxf -vcodec dnxhd MONO_STREAMS", L"for creating output.mxf with every audio channel encoded in its own mono stream.");
sink.example(L">> ADD 1 STREAM udp://<client_ip_address>:9250 -format mpegts -vcodec libx264 -crf 25 -tune zerolatency -preset ultrafast",
L"for streaming over UDP instead of creating a local file.");
}
return core::frame_consumer::empty();
auto params2 = params;
- auto separate_key_it = std::find_if(params2.begin(), params2.end(), param_comparer(L"SEPARATE_KEY"));
- bool separate_key = false;
-
- if (separate_key_it != params2.end())
- {
- separate_key = true;
- params2.erase(separate_key_it);
- }
-
+ bool separate_key = get_and_consume_flag(L"SEPARATE_KEY", params2);
+ bool mono_streams = get_and_consume_flag(L"MONO_STREAMS", params2);
auto compatibility_mode = boost::iequals(params.at(0), L"FILE");
auto path = u8(params2.size() > 1 ? params2.at(1) : L"");
auto args = u8(boost::join(params2, L" "));
- return spl::make_shared<ffmpeg_consumer_proxy>(path, args, separate_key, compatibility_mode);
+ return spl::make_shared<ffmpeg_consumer_proxy>(path, args, separate_key, mono_streams, compatibility_mode);
}
spl::shared_ptr<core::frame_consumer> create_preconfigured_streaming_consumer(
u8(ptree_get<std::wstring>(ptree, L"path")),
u8(ptree.get<std::wstring>(L"args", L"")),
ptree.get<bool>(L"separate-key", false),
+ ptree.get<bool>(L"mono-streams", false),
false);
}