#include "audio/audio_decoder.h"
#include "video/video_decoder.h"
#include "muxer/frame_muxer.h"
+#include "filter/audio_filter.h"
#include <common/param.h>
#include <common/diagnostics/graph.h>
const std::wstring filename_;
const std::wstring path_relative_to_media_ = get_relative_or_original(filename_, env::media_folder());
- FFMPEG_Resource resource_type_;
-
const spl::shared_ptr<diagnostics::graph> graph_;
timer frame_timer_;
input input_;
std::unique_ptr<video_decoder> video_decoder_;
- std::unique_ptr<audio_decoder> audio_decoder_;
+ std::vector<std::unique_ptr<audio_decoder>> audio_decoders_;
std::unique_ptr<frame_muxer> muxer_;
const boost::rational<int> framerate_;
- const uint32_t start_;
- const uint32_t length_;
const bool thumbnail_mode_;
core::draw_frame last_frame_;
explicit ffmpeg_producer(
const spl::shared_ptr<core::frame_factory>& frame_factory,
const core::video_format_desc& format_desc,
- const std::wstring& filename,
- FFMPEG_Resource resource_type,
+ const std::wstring& url_or_file,
const std::wstring& filter,
bool loop,
- uint32_t start,
- uint32_t length,
+ uint32_t in,
+ uint32_t out,
bool thumbnail_mode,
const std::wstring& custom_channel_order,
const ffmpeg_options& vid_params)
- : filename_(filename)
- , resource_type_(resource_type)
+ : filename_(url_or_file)
, frame_factory_(frame_factory)
, initial_logger_disabler_(temporary_enable_quiet_logging_for_thread(thumbnail_mode))
- , input_(graph_, filename_, resource_type, loop, start, length, thumbnail_mode, vid_params)
+ , input_(graph_, url_or_file, loop, in, out, thumbnail_mode, vid_params)
, framerate_(read_framerate(*input_.context(), format_desc.framerate))
- , start_(start)
- , length_(length)
, thumbnail_mode_(thumbnail_mode)
, last_frame_(core::draw_frame::empty())
- , frame_number_(0)
{
graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));
graph_->set_color("underflow", diagnostics::color(0.6f, 0.3f, 0.9f));
}
auto channel_layout = core::audio_channel_layout::invalid();
+ std::vector<audio_input_pad> audio_input_pads;
if (!thumbnail_mode_)
{
- try
+ for (unsigned stream_index = 0; stream_index < input_.context()->nb_streams; ++stream_index)
{
- audio_decoder_.reset(new audio_decoder(input_.context(), format_desc.audio_sample_rate));
- channel_layout = get_audio_channel_layout(
- audio_decoder_->num_channels(),
- audio_decoder_->ffmpeg_channel_layout(),
- custom_channel_order);
- CASPAR_LOG(info) << print() << L" " << audio_decoder_->print();
+ auto stream = input_.context()->streams[stream_index];
+
+ if (stream->codec->codec_type != AVMediaType::AVMEDIA_TYPE_AUDIO)
+ continue;
+
+ try
+ {
+ audio_decoders_.push_back(std::unique_ptr<audio_decoder>(new audio_decoder(stream_index, input_.context(), format_desc.audio_sample_rate)));
+ audio_input_pads.emplace_back(
+ boost::rational<int>(1, format_desc.audio_sample_rate),
+ format_desc.audio_sample_rate,
+ AVSampleFormat::AV_SAMPLE_FMT_S32,
+ audio_decoders_.back()->ffmpeg_channel_layout());
+ CASPAR_LOG(info) << print() << L" " << audio_decoders_.back()->print();
+ }
+ catch (averror_stream_not_found&)
+ {
+ //CASPAR_LOG(warning) << print() << " No audio-stream found. Running without audio.";
+ }
+ catch (...)
+ {
+ CASPAR_LOG_CURRENT_EXCEPTION();
+ CASPAR_LOG(warning) << print() << " Failed to open audio-stream. Running without audio.";
+ }
}
- catch (averror_stream_not_found&)
+
+ if (audio_decoders_.size() == 1)
{
- //CASPAR_LOG(warning) << print() << " No audio-stream found. Running without audio.";
+ channel_layout = get_audio_channel_layout(
+ audio_decoders_.at(0)->num_channels(),
+ audio_decoders_.at(0)->ffmpeg_channel_layout(),
+ custom_channel_order);
}
- catch (...)
+ else if (audio_decoders_.size() > 1)
{
- CASPAR_LOG_CURRENT_EXCEPTION();
- CASPAR_LOG(warning) << print() << " Failed to open audio-stream. Running without audio.";
+ auto num_channels = cpplinq::from(audio_decoders_)
+ .select(std::mem_fn(&audio_decoder::num_channels))
+ .aggregate(0, std::plus<int>());
+ auto ffmpeg_channel_layout = av_get_default_channel_layout(num_channels);
+
+ channel_layout = get_audio_channel_layout(
+ num_channels,
+ ffmpeg_channel_layout,
+ custom_channel_order);
}
}
- if (!video_decoder_ && !audio_decoder_)
+ if (!video_decoder_ && audio_decoders_.empty())
CASPAR_THROW_EXCEPTION(averror_stream_not_found() << msg_info("No streams found"));
- muxer_.reset(new frame_muxer(framerate_, frame_factory, format_desc, channel_layout, filter, true));
+ muxer_.reset(new frame_muxer(framerate_, std::move(audio_input_pads), frame_factory, format_desc, channel_layout, filter, true));
+
+ if (auto nb_frames = file_nb_frames())
+ {
+ out = std::min(out, nb_frames);
+ input_.out(out);
+ }
}
// frame_producer
send_osc();
return std::make_pair(last_frame(), -1);
}
- else if (resource_type_ == FFMPEG_Resource::FFMPEG_FILE)
+ else if (!is_url())
{
graph_->set_tag(diagnostics::tag_severity::WARNING, "underflow");
send_osc();
- return std::make_pair(core::draw_frame::late(), -1);
+ return std::make_pair(last_frame_, -1);
}
else
{
send_osc();
- return std::make_pair(last_frame(), -1);
+ return std::make_pair(last_frame_, -1);
}
}
return frame;
}
+ bool is_url() const
+ {
+ return boost::contains(filename_, L"://");
+ }
+
void send_osc()
{
double fps = static_cast<double>(framerate_.numerator()) / static_cast<double>(framerate_.denominator());
// therefore no seeking should be necessary for the first frame.
{
input_.seek(file_position > 1 ? file_position - 2: file_position).get();
- boost::this_thread::sleep(boost::posix_time::milliseconds(40));
+ boost::this_thread::sleep_for(boost::chrono::milliseconds(40));
}
for (int i = 0; i < NUM_RETRIES; ++i)
{
- boost::this_thread::sleep(boost::posix_time::milliseconds(40));
+ boost::this_thread::sleep_for(boost::chrono::milliseconds(40));
auto frame = render_frame();
{
CASPAR_LOG(trace) << print() << L" adjusting to " << adjusted_seek;
input_.seek(static_cast<uint32_t>(adjusted_seek) - 1).get();
- boost::this_thread::sleep(boost::posix_time::milliseconds(40));
+ boost::this_thread::sleep_for(boost::chrono::milliseconds(40));
}
else
return frame.first;
if (grid < 1)
{
CASPAR_LOG(error) << L"configuration/thumbnails/video-grid cannot be less than 1";
- BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("configuration/thumbnails/video-grid cannot be less than 1"));
+ CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("configuration/thumbnails/video-grid cannot be less than 1"));
}
if (grid == 1)
uint32_t nb_frames() const override
{
- if (resource_type_ == FFMPEG_Resource::FFMPEG_DEVICE || resource_type_ == FFMPEG_Resource::FFMPEG_STREAM || input_.loop())
+ if (is_url() || input_.loop())
return std::numeric_limits<uint32_t>::max();
- uint32_t nb_frames = file_nb_frames();
-
- nb_frames = std::min(length_, nb_frames - start_);
- nb_frames = muxer_->calc_nb_frames(nb_frames);
+ auto nb_frames = std::min(input_.out(), file_nb_frames());
+ if (nb_frames >= input_.in())
+ nb_frames -= input_.in();
+ else
+ nb_frames = 0;
- return nb_frames;
+ return muxer_->calc_nb_frames(nb_frames);
}
uint32_t file_nb_frames() const
{
- uint32_t file_nb_frames = 0;
- file_nb_frames = std::max(file_nb_frames, video_decoder_ ? video_decoder_->nb_frames() : 0);
- return file_nb_frames;
+ return video_decoder_ ? video_decoder_->nb_frames() : 0;
}
std::future<std::wstring> call(const std::vector<std::wstring>& params) override
{
- static const boost::wregex loop_exp(LR"(LOOP\s*(?<VALUE>\d?)?)", boost::regex::icase);
- static const boost::wregex seek_exp(LR"(SEEK\s+(?<VALUE>\d+))", boost::regex::icase);
- static const boost::wregex length_exp(LR"(LENGTH\s+(?<VALUE>\d+)?)", boost::regex::icase);
- static const boost::wregex start_exp(LR"(START\\s+(?<VALUE>\\d+)?)", boost::regex::icase);
-
- auto param = boost::algorithm::join(params, L" ");
-
std::wstring result;
- boost::wsmatch what;
- if(boost::regex_match(param, what, loop_exp))
+ std::wstring cmd = params.at(0);
+ std::wstring value;
+ if (params.size() > 1)
+ value = params.at(1);
+
+ if (boost::iequals(cmd, L"loop"))
{
- auto value = what["VALUE"].str();
if (!value.empty())
input_.loop(boost::lexical_cast<bool>(value));
result = boost::lexical_cast<std::wstring>(input_.loop());
}
- else if(boost::regex_match(param, what, seek_exp))
+ else if (boost::iequals(cmd, L"in") || boost::iequals(cmd, L"start"))
{
- auto value = what["VALUE"].str();
- input_.seek(boost::lexical_cast<uint32_t>(value));
+ if (!value.empty())
+ input_.in(boost::lexical_cast<uint32_t>(value));
+ result = boost::lexical_cast<std::wstring>(input_.in());
+ }
+ else if (boost::iequals(cmd, L"out"))
+ {
+ if (!value.empty())
+ input_.out(boost::lexical_cast<uint32_t>(value));
+ result = boost::lexical_cast<std::wstring>(input_.out());
}
- else if(boost::regex_match(param, what, length_exp))
+ else if (boost::iequals(cmd, L"length"))
{
- auto value = what["VALUE"].str();
- if(!value.empty())
+ if (!value.empty())
input_.length(boost::lexical_cast<uint32_t>(value));
result = boost::lexical_cast<std::wstring>(input_.length());
}
- else if(boost::regex_match(param, what, start_exp))
+ else if (boost::iequals(cmd, L"seek") && !value.empty())
{
- auto value = what["VALUE"].str();
- if(!value.empty())
- input_.start(boost::lexical_cast<uint32_t>(value));
- result = boost::lexical_cast<std::wstring>(input_.start());
+ auto nb_frames = file_nb_frames();
+
+ int64_t seek;
+ if (boost::iequals(value, L"rel"))
+ seek = file_frame_number();
+ else if (boost::iequals(value, L"in"))
+ seek = input_.in();
+ else if (boost::iequals(value, L"out"))
+ seek = input_.out();
+ else if (boost::iequals(value, L"end"))
+ seek = nb_frames;
+ else
+ seek = boost::lexical_cast<int64_t>(value);
+
+ if (params.size() > 2)
+ seek += boost::lexical_cast<int64_t>(params.at(2));
+
+ if (seek < 0)
+ seek = 0;
+ else if (seek >= nb_frames)
+ seek = nb_frames - 1;
+
+ input_.seek(static_cast<uint32_t>(seek));
}
else
CASPAR_THROW_EXCEPTION(invalid_argument());
std::wstring print() const override
{
- return L"ffmpeg[" + boost::filesystem::path(filename_).filename().wstring() + L"|"
+ return L"ffmpeg[" + (is_url() ? filename_ : boost::filesystem::path(filename_).filename().wstring()) + L"|"
+ print_mode() + L"|"
+ boost::lexical_cast<std::wstring>(file_frame_number_) + L"/" + boost::lexical_cast<std::wstring>(file_nb_frames()) + L"]";
}
!video_decoder_->is_progressive()) : L"";
}
+ bool all_audio_decoders_ready() const
+ {
+ for (auto& audio_decoder : audio_decoders_)
+ if (!audio_decoder->ready())
+ return false;
+
+ return true;
+ }
+
void try_decode_frame()
{
std::shared_ptr<AVPacket> pkt;
- for (int n = 0; n < 32 && ((video_decoder_ && !video_decoder_->ready()) || (audio_decoder_ && !audio_decoder_->ready())) && input_.try_pop(pkt); ++n)
+ for (int n = 0; n < 32 && ((video_decoder_ && !video_decoder_->ready()) || !all_audio_decoders_ready()) && input_.try_pop(pkt); ++n)
{
if (video_decoder_)
video_decoder_->push(pkt);
- if (audio_decoder_)
- audio_decoder_->push(pkt);
+
+ for (auto& audio_decoder : audio_decoders_)
+ audio_decoder->push(pkt);
}
- std::shared_ptr<AVFrame> video;
- std::shared_ptr<core::mutable_audio_buffer> audio;
+ std::shared_ptr<AVFrame> video;
+ std::vector<std::shared_ptr<core::mutable_audio_buffer>> audio;
tbb::parallel_invoke(
[&]
{
- if (!muxer_->video_ready() && video_decoder_)
- video = video_decoder_->poll();
+ do
+ {
+ if (!muxer_->video_ready() && video_decoder_)
+ {
+ video = video_decoder_->poll();
+ if (video)
+ break;
+ }
+ else
+ break;
+ } while (!video_decoder_->empty());
},
[&]
{
- if (!muxer_->audio_ready() && audio_decoder_)
- audio = audio_decoder_->poll();
+ if (!muxer_->audio_ready())
+ {
+ for (auto& audio_decoder : audio_decoders_)
+ {
+ auto audio_for_stream = audio_decoder->poll();
+
+ if (audio_for_stream)
+ audio.push_back(audio_for_stream);
+ }
+ }
});
muxer_->push(video);
muxer_->push(audio);
- if (!audio_decoder_)
+ if (audio_decoders_.empty())
{
- if(video == flush_video())
- muxer_->push(flush_audio());
- else if(!muxer_->audio_ready())
- muxer_->push(empty_audio());
+ if (video == flush_video())
+ muxer_->push({ flush_audio() });
+ else if (!muxer_->audio_ready())
+ muxer_->push({ empty_audio() });
}
if (!video_decoder_)
{
- if(audio == flush_audio())
+ if (boost::count_if(audio, [](std::shared_ptr<core::mutable_audio_buffer> a) { return a == flush_audio(); }) > 0)
muxer_->push(flush_video());
- else if(!muxer_->video_ready())
+ else if (!muxer_->video_ready())
muxer_->push(empty_video());
}
uint32_t file_frame_number = 0;
file_frame_number = std::max(file_frame_number, video_decoder_ ? video_decoder_->file_frame_number() : 0);
- //file_frame_number = std::max(file_frame_number, audio_decoder_ ? audio_decoder_->file_frame_number() : 0);
for (auto frame = muxer_->poll(); frame != core::draw_frame::empty(); frame = muxer_->poll())
- frame_buffer_.push(std::make_pair(frame, file_frame_number));
+ if (frame != core::draw_frame::empty())
+ frame_buffer_.push(std::make_pair(frame, file_frame_number));
}
bool audio_only() const
void describe_producer(core::help_sink& sink, const core::help_repository& repo)
{
sink.short_description(L"A producer for playing media files supported by FFmpeg.");
- sink.syntax(L"[clip:string] {[loop:LOOP]} {SEEK [start:int]} {LENGTH [start:int]} {FILTER [filter:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
+ sink.syntax(L"[clip,url:string] {[loop:LOOP]} {IN,SEEK [in:int]} {OUT [out:int] | LENGTH [length:int]} {FILTER [filter:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
sink.para()
->text(L"The FFmpeg Producer can play all media that FFmpeg can play, which includes many ")
->text(L"QuickTime video codec such as Animation, PNG, PhotoJPEG, MotionJPEG, as well as ")
->text(L"H.264, FLV, WMV and several audio codecs as well as uncompressed audio.");
sink.definitions()
->item(L"clip", L"The file without the file extension to play. It should reside under the media folder.")
- ->item(L"loop", L"Will cause the media file to loop between start and start + length")
- ->item(L"start", L"Optionally sets the start frame. 0 by default. If loop is specified this will be the frame where it starts over again.")
- ->item(L"length", L"Optionally sets the length of the clip. If not specified the clip will be played to the end. If loop is specified the file will jump to start position once this number of frames has been played.")
+ ->item(L"url", L"If clip contains :// it is instead treated as the URL parameter. The URL can either be any streaming protocol supported by FFmpeg, dshow://video={webcam_name} or v4l2://{video device}.")
+ ->item(L"loop", L"Will cause the media file to loop between in and out.")
+ ->item(L"in", L"Optionally sets the first frame. 0 by default. If loop is specified, this will be the frame where it starts over again.")
+ ->item(L"out", L"Optionally sets the last frame. If not specified the clip will be played to the end. If loop is specified, the file will jump to start position once it reaches the last frame.")
+ ->item(L"length", L"Optionally sets the length of the clip. Equivalent to OUT in + length.")
->item(L"filter", L"If specified, will be used as an FFmpeg video filter.")
->item(L"channel_layout",
- L"Optionally override the automatically deduced audio channel layout. "
+ L"Optionally override the automatically deduced audio channel layout."
L"Either a named layout as specified in casparcg.config or in the format [type:string]:[channel_order:string] for a custom layout.");
sink.para()->text(L"Examples:");
sink.example(L">> PLAY 1-10 folder/clip", L"to play all frames in a clip and stop at the last frame.");
sink.example(L">> PLAY 1-10 folder/clip LOOP", L"to loop a clip between the first frame and the last frame.");
- sink.example(L">> PLAY 1-10 folder/clip LOOP SEEK 10", L"to loop a clip between frame 10 and the last frame.");
- sink.example(L">> PLAY 1-10 folder/clip LOOP SEEK 10 LENGTH 50", L"to loop a clip between frame 10 and frame 60.");
- sink.example(L">> PLAY 1-10 folder/clip SEEK 10 LENGTH 50", L"to play frames 10-60 in a clip and stop.");
+ sink.example(L">> PLAY 1-10 folder/clip LOOP IN 10", L"to loop a clip between frame 10 and the last frame.");
+ sink.example(L">> PLAY 1-10 folder/clip LOOP IN 10 LENGTH 50", L"to loop a clip between frame 10 and frame 60.");
+ sink.example(L">> PLAY 1-10 folder/clip IN 10 OUT 60", L"to play frames 10-60 in a clip and stop.");
sink.example(L">> PLAY 1-10 folder/clip FILTER yadif=1,-1", L"to deinterlace the video.");
sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT film", L"given the defaults in casparcg.config this will specifies that the clip has 6 audio channels of the type 5.1 and that they are in the order FL FC FR BL BR LFE regardless of what ffmpeg says.");
sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT \"5.1:LFE FL FC FR BL BR\"", L"specifies that the clip has 6 audio channels of the type 5.1 and that they are in the specified order regardless of what ffmpeg says.");
+ sink.example(L">> PLAY 1-10 rtmp://example.com/live/stream", L"to play an RTMP stream.");
+ sink.example(L">> PLAY 1-10 \"dshow://video=Live! Cam Chat HD VF0790\"", L"to use a web camera as video input on Windows.");
+ sink.example(L">> PLAY 1-10 v4l2:///dev/video0", L"to use a web camera as video input on Linux.");
sink.para()->text(L"The FFmpeg producer also supports changing some of the settings via ")->code(L"CALL")->text(L":");
sink.example(L">> CALL 1-10 LOOP 1");
- sink.example(L">> CALL 1-10 START 10");
+ sink.example(L">> CALL 1-10 IN 10");
+ sink.example(L">> CALL 1-10 OUT 60");
sink.example(L">> CALL 1-10 LENGTH 50");
sink.example(L">> CALL 1-10 SEEK 30");
core::describe_framerate_producer(sink);
const std::vector<std::wstring>& params,
const spl::shared_ptr<core::media_info_repository>& info_repo)
{
- // Infer the resource type from the resource_name
- auto resource_type = FFMPEG_Resource::FFMPEG_FILE;
- auto tokens = protocol_split(params.at(0));
- auto filename = params.at(0);
+ auto file_or_url = params.at(0);
- if (!tokens[0].empty())
- {
- if (tokens[0] == L"dshow")
- {
- // Camera
- resource_type = FFMPEG_Resource::FFMPEG_DEVICE;
- filename = tokens[1];
- }
- else
- {
- // Stream
- resource_type = FFMPEG_Resource::FFMPEG_STREAM;
- filename = params.at(0);
- }
- }
- else
+ if (!boost::contains(file_or_url, L"://"))
{
// File
- resource_type = FFMPEG_Resource::FFMPEG_FILE;
- filename = probe_stem(env::media_folder() + L"/" + params.at(0), false);
+ file_or_url = probe_stem(env::media_folder() + L"/" + file_or_url, false);
}
- if (filename.empty())
+ if (file_or_url.empty())
return core::frame_producer::empty();
+ constexpr auto uint32_max = std::numeric_limits<uint32_t>::max();
+
auto loop = contains_param(L"LOOP", params);
- auto start = get_param(L"SEEK", params, static_cast<uint32_t>(0));
- auto length = get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max());
+
+ auto in = get_param(L"SEEK", params, static_cast<uint32_t>(0)); // compatibility
+ in = get_param(L"IN", params, in);
+
+ auto out = get_param(L"LENGTH", params, uint32_max);
+ if (out < uint32_max - in)
+ out += in;
+ else
+ out = uint32_max;
+ out = get_param(L"OUT", params, out);
+
auto filter_str = get_param(L"FILTER", params, L"");
auto custom_channel_order = get_param(L"CHANNEL_LAYOUT", params, L"");
auto producer = spl::make_shared<ffmpeg_producer>(
dependencies.frame_factory,
dependencies.format_desc,
- filename,
- resource_type,
+ file_or_url,
filter_str,
loop,
- start,
- length,
+ in,
+ out,
false,
custom_channel_order,
vid_params);
return core::draw_frame::empty();
auto loop = false;
- auto start = 0;
- auto length = std::numeric_limits<uint32_t>::max();
+ auto in = 0;
+ auto out = std::numeric_limits<uint32_t>::max();
auto filter_str = L"";
ffmpeg_options vid_params;
dependencies.frame_factory,
dependencies.format_desc,
filename,
- FFMPEG_Resource::FFMPEG_FILE,
filter_str,
loop,
- start,
- length,
+ in,
+ out,
true,
L"",
vid_params);